pax_global_header00006660000000000000000000000064151252470470014521gustar00rootroot0000000000000052 comment=ebc3a0f4a56be1c9424a89fbec09962ac34fde85 ggml-org-ggml-3678254/000077500000000000000000000000001512524704700143645ustar00rootroot00000000000000ggml-org-ggml-3678254/.editorconfig000066400000000000000000000005671512524704700170510ustar00rootroot00000000000000# https://EditorConfig.org # Top-most EditorConfig file root = true # Unix-style newlines with a newline ending every file, utf-8 charset [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space indent_size = 4 [*.md] indent_size = 2 [Makefile] indent_style = tab [prompts/*.txt] insert_final_newline = unset ggml-org-ggml-3678254/.github/000077500000000000000000000000001512524704700157245ustar00rootroot00000000000000ggml-org-ggml-3678254/.github/pull_request_template.md000066400000000000000000000003421512524704700226640ustar00rootroot00000000000000*For changes to the core `ggml` library (including to the CMake build system), please open a PR in https://github.com/ggml-org/llama.cpp. Doing so will make your PR more visible, better tested and more likely to be reviewed.* ggml-org-ggml-3678254/.github/workflows/000077500000000000000000000000001512524704700177615ustar00rootroot00000000000000ggml-org-ggml-3678254/.github/workflows/ci.yml000066400000000000000000000156101512524704700211020ustar00rootroot00000000000000name: CI on: push: branches: [ master ] pull_request: branches: [ master ] concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true jobs: build: strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] libraries: [shared, static] runs-on: ${{ matrix.os }} steps: - name: Clone uses: actions/checkout@v4 - name: Dependencies for Ubuntu if: matrix.os == 'ubuntu-latest' run: | sudo apt-get update sudo apt-get install llvm - name: Add msbuild to PATH if: matrix.os == 'windows-latest' uses: microsoft/setup-msbuild@v2 - name: Create Build Environment run: mkdir build - name: Configure CMake working-directory: ./build run: cmake .. ${{ contains(matrix.os, 'windows') && '-A x64' || '-G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++' }} ${{ matrix.libraries == 'static' && '-DBUILD_SHARED_LIBS=OFF' || '-DBUILD_SHARED_LIBS=ON' }} -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/installed -DGGML_METAL=OFF - name: Build working-directory: ./build run: cmake --build . ${{ contains(matrix.os, 'windows') && '--config Release' || '' }} - name: Test working-directory: ./build run: ctest --verbose --timeout 900 ${{ contains(matrix.os, 'windows') && '--build-config Release' || '' }} - name: Install working-directory: ./build run: cmake --build . --target install ${{ contains(matrix.os, 'windows') && '--config Release' || '' }} - name: Test CMake config run: | mkdir test-cmake cmake -S examples/test-cmake -B test-cmake -DCMAKE_PREFIX_PATH=${{ github.workspace }}/installed ${{ contains(matrix.os, 'windows') && '-A x64' || '-G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++' }} cmake --build test-cmake ${{ contains(matrix.os, 'windows') && '--config Release' || '' }} # TODO: simplify the following workflows using a matrix ggml-ci-x64-cpu-low-perf: runs-on: ubuntu-22.04 steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: ccache uses: ggml-org/ccache-action@v1.2.16 with: key: ggml-ci-x64-cpu-low-perf evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Test id: ggml-ci run: | LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ggml-ci-arm64-cpu-low-perf: runs-on: ubuntu-22.04-arm steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: ccache uses: ggml-org/ccache-action@v1.2.16 with: key: ggml-ci-arm64-cpu-low-perf evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Test id: ggml-ci run: | LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ggml-ci-x64-cpu-high-perf: runs-on: ubuntu-22.04 steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: ccache uses: ggml-org/ccache-action@v1.2.16 with: key: ggml-ci-x64-cpu-high-perf evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Test id: ggml-ci run: | LLAMA_ARG_THREADS=$(nproc) bash ./ci/run.sh ./tmp/results ./tmp/mnt ggml-ci-arm64-cpu-high-perf: runs-on: ubuntu-22.04-arm steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: ccache uses: ggml-org/ccache-action@v1.2.16 with: key: ggml-ci-arm64-cpu-high-perf evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Test id: ggml-ci run: | LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_SVE=1 GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ggml-ci-arm64-cpu-high-perf-sve: runs-on: ubuntu-22.04-arm steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: ccache uses: ggml-org/ccache-action@v1.2.16 with: key: ggml-ci-arm64-cpu-high-perf-sve evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Test id: ggml-ci run: | LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ggml-ci-x64-nvidia-cuda: runs-on: [self-hosted, Linux, X64, NVIDIA] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | nvidia-smi GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/ggml /mnt/ggml ggml-ci-x64-nvidia-vulkan-cm: runs-on: [self-hosted, Linux, X64, NVIDIA] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | vulkaninfo --summary GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/ggml /mnt/ggml ggml-ci-x64-nvidia-vulkan-cm2: runs-on: [self-hosted, Linux, X64, NVIDIA, COOPMAT2] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | vulkaninfo --summary GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/ggml /mnt/ggml ggml-ci-x64-cpu-amx: runs-on: [self-hosted, Linux, X64, CPU, AMX] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | bash ./ci/run.sh ~/results/ggml /mnt/ggml ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/ggml ~/mnt/ggml ggml-ci-mac-vulkan: runs-on: [self-hosted, macOS, ARM64] steps: - name: Clone id: checkout uses: actions/checkout@v4 - name: Test id: ggml-ci run: | vulkaninfo --summary GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/ggml ~/mnt/ggml ggml-org-ggml-3678254/.github/workflows/release.yml000066400000000000000000000010101512524704700221140ustar00rootroot00000000000000name: Release on: push: tags: - 'v*' jobs: release: runs-on: ubuntu-latest permissions: contents: write steps: - name: Checkout code uses: actions/checkout@v4 - name: Create Release id: create_release uses: ggml-org/action-create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref_name }} release_name: Release ${{ github.ref }} draft: false prerelease: false ggml-org-ggml-3678254/.gitignore000066400000000000000000000004601512524704700163540ustar00rootroot00000000000000build/ build-*/ out/ tmp/ models/ models-mnt compile_commands.json CMakeSettings.json .vs/ .vscode/ .idea/ .clangd .venv/ ggml_env/ .exrc .cache .DS_Store .stablelm .gpt-2 src/arm_neon.h tests/arm_neon.h zig-out/ zig-cache/ *.o *.d *.dot *.sw? __pycache__/ # Model files ggml-model-f16.bin *.bat ggml-org-ggml-3678254/.gitmodules000066400000000000000000000000001512524704700165270ustar00rootroot00000000000000ggml-org-ggml-3678254/AUTHORS000066400000000000000000000340271512524704700154420ustar00rootroot00000000000000# date: Tue Feb 4 13:03:51 EET 2025 # this file is auto-generated by scripts/gen-authors.sh 0cc4m 65a <10104049+65a@users.noreply.github.com> AT Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com> Adam Tazi <52357206+ad1tazi@users.noreply.github.com> Adrien Gallouët Adrien Gallouët Ahmad Tameem <113388789+Tameem-10xE@users.noreply.github.com> AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com> AidanBeltonS Akarshan Biswas Akarshan Biswas Albert Jin Alberto Cabrera Pérez Alberto Cabrera Pérez Alex Azarov Alex O'Connell <35843486+acon96@users.noreply.github.com> Alex von Gluck IV AmbientL <107641468+AmbientL@users.noreply.github.com> AmirAli Mirian <37371367+amiralimi@users.noreply.github.com> Ananta Bastola Andreas (Andi) Kunar Andreas Kieslinger <47689530+aendk@users.noreply.github.com> Andrei Andrew Minh Nguyen <40281306+amqdn@users.noreply.github.com> Andrii Ryzhkov Arjun Ashraful Islam Astariul <43774355+astariul@users.noreply.github.com> AsukaMinato Avi Lumelsky Bart Pelle <3662930+Velocity-@users.noreply.github.com> Ben Ashbaugh Bernhard M. Wiedemann Borislav Stanimirov Brad Ito Brad Murray <59848399+bradmurray-dt@users.noreply.github.com> Brian Bryan Lozano Carolinabanana <140120812+Carolinabanana@users.noreply.github.com> CarterLi999 <664681047@qq.com> Cebtenzzre Changyeon Kim Charles Xu <63788048+chaxu01@users.noreply.github.com> Charles Xu Chen Xi Chen Xi Chenguang Li <87689256+noemotiovon@users.noreply.github.com> Chris Elrod Christian Kastner Clint Herron Conrad Kramer Cordeiro <1471463+ocordeiro@users.noreply.github.com> Cristiano Calcagno DAN™ Dan Forbes Dan Johansson <164997844+eddnjjn@users.noreply.github.com> Dan Johansson Daniel Bevenius Daniel Ziegenberg Daniele <57776841+daniandtheweb@users.noreply.github.com> Daulet Zhanguzin Dave Dave Airlie Dave Airlie David Miller DavidKorczynski Davidson Francis Dibakar Gope Didzis Gosko Diego Devesa Diogo Djip007 <3705339+Djip007@users.noreply.github.com> Djip007 Dou Xinpeng <15529241576@163.com> Dou Xinpeng <81913537+Dou-Git@users.noreply.github.com> Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com> Ebey Abraham Eldar Yusupov Emmanuel Durand Engininja2 <139037756+Engininja2@users.noreply.github.com> Eric Zhang <34133756+EZForever@users.noreply.github.com> Erik Scholz Ettore Di Giacinto Eve <139727413+netrunnereve@users.noreply.github.com> F1L1P <78918286+F1L1Pv2@users.noreply.github.com> Faisal Zaghloul FantasyGmm <16450052+FantasyGmm@users.noreply.github.com> Felix Finn Voorhees FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> Frankie Robertson GainLee George Hindle Georgi Gerganov Gilad S <7817232+giladgd@users.noreply.github.com> Gilad S Gilad S. <7817232+giladgd@users.noreply.github.com> Guillaume Wenzek Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com> Haus1 Herman Semenov HimariO Hirochika Matsumoto Hong Bo PENG Hugo Rosenkranz-Costa Hyunsung Lee IGUILIZ Salah-Eddine <76955987+salahiguiliz@users.noreply.github.com> Ian Bull Ihar Hrachyshka Ikko Eltociear Ashimine Ivan Ivan Filipov <159561759+vanaka11@users.noreply.github.com> Ivan Stepanov Ivan Zdane Jack Mousseau Jack Vial JacobLinCool Jakob Frick Jan Ploski Jared Van Bortel Jeff Bolz Jeffrey Quesnelle Jeroen Mostert Jiahao Li JidongZhang-THU <1119708529@qq.com> Jiří Podivín <66251151+jpodivin@users.noreply.github.com> Jo Liss Joe Todd Johannes Gäßler John Balis Josh Bleecher Snyder Judd Jun Hee Yoo Junil Kim Justina Cho Justine Tunney Justine Tunney Karol Kontny <82021046+kkontny@users.noreply.github.com> Kawrakow <48489457+ikawrakow@users.noreply.github.com> Kevin Gibbons Konstantin Zhuravlyov Kylin <56434533+KyL0N@users.noreply.github.com> LoganDark LoganDark LostRuins <39025047+LostRuins@users.noreply.github.com> Lukas Möller M Refi D.A <24388107+refinism@users.noreply.github.com> M. Yusuf Sarıgöz Ma Mingfei Mahesh Madhav <67384846+heshpdx@users.noreply.github.com> MaiHD Mark Zhuang Markus Tavenrath Masaya, Kato <62578291+msy-kato@users.noreply.github.com> Mathieu Baudier Mathijs de Bruin Matt Stephenson Max Krasnyansky Max Krasnyansky Mayank Kumar Pal Meng, Hengyu Mengqing Cao Metal Whale <45712559+metalwhale@users.noreply.github.com> Michael Klimenko Michael Podvitskiy Michael Verrilli Molly Sophia Natsu Neo Zhang <14088817+arthw@users.noreply.github.com> Neo Zhang Jianyu Neuman Vong Nevin Nicholai Tukanov Nico Bosshard Nicolò Scipione Nikita Sarychev <42014488+sARY77@users.noreply.github.com> Nouamane Tazi Olivier Chafik Olivier Chafik Ondřej Čertík Ouadie EL FAROUKI PAB Paul Tsochantaris Peter Philpax Pierre Alexandre SCHEMBRI Plamen Minev Playdev Prashant Vithule <119530321+Vithulep@users.noreply.github.com> Przemysław Pawełczyk R0CKSTAR R0CKSTAR Radoslav Gerganov Radosław Gryta Ravindra Marella Ray Cromwell Reinforce-II Rémy Oudompheng Reza Rezvan Rick G <26732651+TheFlipbook@users.noreply.github.com> RiverZhou Robert Ormandi <52251610+ormandi@users.noreply.github.com> Romain Biessy Ronsor Rotem Dan Ryan Hitchman SRHMorris <69468379+SRHMorris@users.noreply.github.com> SXX Salvatore Mesoraca Sam Spilsbury Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Santtu Keskinen Sergio López Sergio López Shanshan Shen <467638484@qq.com> Shijie <821898965@qq.com> Shupei Fan Siddharth Ramakrishnan Sigbjørn Skjæret Skyler Celestinian-Sterling <80314197+Celestinian@users.noreply.github.com> Slava Primenko Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com> Steward Garcia <57494570+FSSRepo@users.noreply.github.com> Supreet Sethi Takuya Takeuchi Tamotsu Takahashi Tanmay Tanmay Sachan Timothy Cronin <40186632+4imothy@users.noreply.github.com> Tom Bailey Tom Jobbins <784313+TheBloke@users.noreply.github.com> Tony Wasserka <4840017+neobrain@users.noreply.github.com> Tristan Druyen Tyé singwa <92231658+tye-singwa@users.noreply.github.com> UEXTM.com <84163508+uextm@users.noreply.github.com> WillCorticesAI <150854901+WillCorticesAI@users.noreply.github.com> William Tambellini William Tambellini XiaotaoChen Xinpeng Dou <81913537+Dou-Git@users.noreply.github.com> Xuan Son Nguyen Yavor Ivanov YavorGIvanov Yilong Guo Yilong Guo Yuri Khrustalev Zhenwei Jin <109658203+kylo5aby@users.noreply.github.com> Zhiyuan Li Zhiyuan Li a3sh <38979186+A3shTnT@users.noreply.github.com> ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com> agray3 amd-dwang amritahs-ibm apcameron <37645737+apcameron@users.noreply.github.com> appvoid <78444142+appvoid@users.noreply.github.com> ariez-xyz <41232910+ariez-xyz@users.noreply.github.com> automaticcat bandoti <141645996+bandoti@users.noreply.github.com> bmwl bobqianic <129547291+bobqianic@users.noreply.github.com> bssrdf chengchi compilade <113953597+compilade@users.noreply.github.com> compilade ddpasa <112642920+ddpasa@users.noreply.github.com> denersc dscripka fitzsim fj-y-saito <85871716+fj-y-saito@users.noreply.github.com> fraxy-v <65565042+fraxy-v@users.noreply.github.com> gn64 goerch goldwaving <77494627+goldwaving@users.noreply.github.com> haopeng <657407891@qq.com> hidenorly hipudding hydai issixx <46835150+issixx@users.noreply.github.com> jaeminSon jdomke <28772296+jdomke@users.noreply.github.com> jiez <373447296@qq.com> johnson442 <56517414+johnson442@users.noreply.github.com> junchao-loongson <68935141+junchao-loongson@users.noreply.github.com> k.h.lai katsu560 <118887472+katsu560@users.noreply.github.com> klosax <131523366+klosax@users.noreply.github.com> kunnis l3utterfly le.chang leejet <31925346+leejet@users.noreply.github.com> leejet leo-pony lhez liuwei-git <14815172+liuwei-git@users.noreply.github.com> luoyu-intel magicse mahorozte <41834471+mahorozte@users.noreply.github.com> mashizora <30516315+mashizora@users.noreply.github.com> matt23654 matteo ochafik otaGran pengxin99 pikalover6 <49179590+pikalover6@users.noreply.github.com> postmasters sjinzh skirodev <57715494+skirodev@users.noreply.github.com> slaren snadampal <87143774+snadampal@users.noreply.github.com> someone13574 <81528246+someone13574@users.noreply.github.com> stduhpf taher <8665427+nullhook@users.noreply.github.com> texmex76 <40733439+texmex76@users.noreply.github.com> the-crypt-keeper <84680712+the-crypt-keeper@users.noreply.github.com> thewh1teagle <61390950+thewh1teagle@users.noreply.github.com> ucag.li ulatekh uvos uvos wangshuai09 <391746016@qq.com> woachk <24752637+woachk@users.noreply.github.com> xctan yangyaofei yuri@FreeBSD zhentaoyu zhouwg <6889919+zhouwg@users.noreply.github.com> zhouwg 谢乃闻 布客飞龙 <562826179@qq.com> 旺旺碎冰冰 <38837039+Cyberhan123@users.noreply.github.com> ggml-org-ggml-3678254/CMakeLists.txt000066400000000000000000000502041512524704700171250ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories. project("ggml" C CXX ASM) ### GGML Version set(GGML_VERSION_MAJOR 0) set(GGML_VERSION_MINOR 9) set(GGML_VERSION_PATCH 5) set(GGML_VERSION_BASE "${GGML_VERSION_MAJOR}.${GGML_VERSION_MINOR}.${GGML_VERSION_PATCH}") find_program(GIT_EXE NAMES git git.exe NO_CMAKE_FIND_ROOT_PATH) if(GIT_EXE) # Get current git commit hash execute_process(COMMAND ${GIT_EXE} rev-parse --short HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE GGML_BUILD_COMMIT OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET ) # Check if the working directory is dirty (i.e., has uncommitted changes) execute_process(COMMAND ${GIT_EXE} diff-index --quiet HEAD -- . WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE GGML_GIT_DIRTY ERROR_QUIET ) endif() set(GGML_VERSION "${GGML_VERSION_BASE}") if(NOT GGML_BUILD_COMMIT) set(GGML_BUILD_COMMIT "unknown") endif() # Build the commit string with optional dirty flag if(DEFINED GGML_GIT_DIRTY AND GGML_GIT_DIRTY EQUAL 1) set(GGML_BUILD_COMMIT "${GGML_BUILD_COMMIT}-dirty") endif() include(CheckIncludeFileCXX) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) set(GGML_STANDALONE ON) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # configure project version # TODO else() set(GGML_STANDALONE OFF) if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) endif() endif() if (EMSCRIPTEN) set(BUILD_SHARED_LIBS_DEFAULT OFF) option(GGML_WASM_SINGLE_FILE "ggml: embed WASM inside the generated ggml.js" ON) else() if (MINGW) set(BUILD_SHARED_LIBS_DEFAULT OFF) else() set(BUILD_SHARED_LIBS_DEFAULT ON) endif() endif() # remove the lib prefix on win32 mingw if (WIN32) set(CMAKE_STATIC_LIBRARY_PREFIX "") set(CMAKE_SHARED_LIBRARY_PREFIX "") set(CMAKE_SHARED_MODULE_PREFIX "") endif() option(BUILD_SHARED_LIBS "ggml: build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT}) option(GGML_BACKEND_DL "ggml: build backends as dynamic libraries (requires BUILD_SHARED_LIBS)" OFF) set(GGML_BACKEND_DIR "" CACHE PATH "ggml: directory to load dynamic backends from (requires GGML_BACKEND_DL") # # option list # # TODO: mark all options as advanced when not GGML_STANDALONE if (APPLE) set(GGML_METAL_DEFAULT ON) set(GGML_BLAS_DEFAULT ON) set(GGML_BLAS_VENDOR_DEFAULT "Apple") else() set(GGML_METAL_DEFAULT OFF) set(GGML_BLAS_DEFAULT OFF) set(GGML_BLAS_VENDOR_DEFAULT "Generic") endif() if (CMAKE_CROSSCOMPILING OR DEFINED ENV{SOURCE_DATE_EPOCH}) message(STATUS "Setting GGML_NATIVE_DEFAULT to OFF") set(GGML_NATIVE_DEFAULT OFF) else() set(GGML_NATIVE_DEFAULT ON) endif() # defaults if (NOT GGML_LLAMAFILE_DEFAULT) set(GGML_LLAMAFILE_DEFAULT OFF) endif() if (NOT GGML_CUDA_GRAPHS_DEFAULT) set(GGML_CUDA_GRAPHS_DEFAULT OFF) endif() # general option(GGML_STATIC "ggml: static link libraries" OFF) option(GGML_NATIVE "ggml: optimize the build for the current system" ${GGML_NATIVE_DEFAULT}) option(GGML_LTO "ggml: enable link time optimization" OFF) option(GGML_CCACHE "ggml: use ccache if available" ON) # debug option(GGML_ALL_WARNINGS "ggml: enable all compiler warnings" ON) option(GGML_ALL_WARNINGS_3RD_PARTY "ggml: enable all compiler warnings in 3rd party libs" OFF) option(GGML_GPROF "ggml: enable gprof" OFF) # build option(GGML_FATAL_WARNINGS "ggml: enable -Werror flag" OFF) # sanitizers option(GGML_SANITIZE_THREAD "ggml: enable thread sanitizer" OFF) option(GGML_SANITIZE_ADDRESS "ggml: enable address sanitizer" OFF) option(GGML_SANITIZE_UNDEFINED "ggml: enable undefined sanitizer" OFF) # instruction set specific if (GGML_NATIVE OR NOT GGML_NATIVE_DEFAULT) set(INS_ENB OFF) else() set(INS_ENB ON) endif() message(DEBUG "GGML_NATIVE : ${GGML_NATIVE}") message(DEBUG "GGML_NATIVE_DEFAULT : ${GGML_NATIVE_DEFAULT}") message(DEBUG "INS_ENB : ${INS_ENB}") option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) option(GGML_CPU_REPACK "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF) option(GGML_SSE42 "ggml: enable SSE 4.2" ${INS_ENB}) option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF) option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB}) option(GGML_BMI2 "ggml: enable BMI2" ${INS_ENB}) option(GGML_AVX512 "ggml: enable AVX512F" OFF) option(GGML_AVX512_VBMI "ggml: enable AVX512-VBMI" OFF) option(GGML_AVX512_VNNI "ggml: enable AVX512-VNNI" OFF) option(GGML_AVX512_BF16 "ggml: enable AVX512-BF16" OFF) if (NOT MSVC) # in MSVC F16C and FMA is implied with AVX2/AVX512 option(GGML_FMA "ggml: enable FMA" ${INS_ENB}) option(GGML_F16C "ggml: enable F16C" ${INS_ENB}) # MSVC does not seem to support AMX option(GGML_AMX_TILE "ggml: enable AMX-TILE" OFF) option(GGML_AMX_INT8 "ggml: enable AMX-INT8" OFF) option(GGML_AMX_BF16 "ggml: enable AMX-BF16" OFF) endif() option(GGML_LASX "ggml: enable lasx" ON) option(GGML_LSX "ggml: enable lsx" ON) option(GGML_RVV "ggml: enable rvv" ON) option(GGML_RV_ZFH "ggml: enable riscv zfh" ON) option(GGML_RV_ZVFH "ggml: enable riscv zvfh" ON) option(GGML_RV_ZICBOP "ggml: enable riscv zicbop" ON) option(GGML_RV_ZIHINTPAUSE "ggml: enable riscv zihintpause " ON) option(GGML_XTHEADVECTOR "ggml: enable xtheadvector" OFF) option(GGML_VXE "ggml: enable vxe" ${GGML_NATIVE}) option(GGML_CPU_ALL_VARIANTS "ggml: build all variants of the CPU backend (requires GGML_BACKEND_DL)" OFF) set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM") set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC") # ggml core set(GGML_SCHED_MAX_COPIES "4" CACHE STRING "ggml: max input copies for pipeline parallelism") option(GGML_CPU "ggml: enable CPU backend" ON) option(GGML_SCHED_NO_REALLOC "ggml: disallow reallocations in ggml-alloc (for debugging)" OFF) # 3rd party libs / backends option(GGML_ACCELERATE "ggml: enable Accelerate framework" ON) option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT}) set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING "ggml: BLAS library vendor") option(GGML_LLAMAFILE "ggml: use LLAMAFILE" ${GGML_LLAMAFILE_DEFAULT}) option(GGML_CUDA "ggml: use CUDA" OFF) option(GGML_MUSA "ggml: use MUSA" OFF) option(GGML_CUDA_FORCE_MMQ "ggml: use mmq kernels instead of cuBLAS" OFF) option(GGML_CUDA_FORCE_CUBLAS "ggml: always use cuBLAS instead of mmq kernels" OFF) set (GGML_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING "ggml: max. batch size for using peer access") option(GGML_CUDA_NO_PEER_COPY "ggml: do not use peer to peer copies" OFF) option(GGML_CUDA_NO_VMM "ggml: do not try to use CUDA VMM" OFF) option(GGML_CUDA_FA "ggml: compile ggml FlashAttention CUDA kernels" ON) option(GGML_CUDA_FA_ALL_QUANTS "ggml: compile all quants for FlashAttention" OFF) option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp only)" ${GGML_CUDA_GRAPHS_DEFAULT}) set (GGML_CUDA_COMPRESSION_MODE "size" CACHE STRING "ggml: cuda link binary compression mode; requires cuda 12.8+") set_property(CACHE GGML_CUDA_COMPRESSION_MODE PROPERTY STRINGS "none;speed;balance;size") option(GGML_HIP "ggml: use HIP" OFF) option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF) option(GGML_HIP_MMQ_MFMA "ggml: enable MFMA MMA for CDNA in MMQ" ON) option(GGML_HIP_EXPORT_METRICS "ggml: enable kernel perf metrics output" OFF) option(GGML_MUSA_GRAPHS "ggml: use MUSA graph, experimental, unstable" OFF) option(GGML_MUSA_MUDNN_COPY "ggml: enable muDNN for accelerated copy" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF) option(GGML_VULKAN_MEMORY_DEBUG "ggml: enable Vulkan memory debug output" OFF) option(GGML_VULKAN_SHADER_DEBUG_INFO "ggml: enable Vulkan shader debug info" OFF) option(GGML_VULKAN_VALIDATE "ggml: enable Vulkan validation" OFF) option(GGML_VULKAN_RUN_TESTS "ggml: run Vulkan tests" OFF) option(GGML_WEBGPU "ggml: use WebGPU" OFF) option(GGML_WEBGPU_DEBUG "ggml: enable WebGPU debug output" OFF) option(GGML_WEBGPU_CPU_PROFILE "ggml: enable WebGPU profiling (CPU)" OFF) option(GGML_WEBGPU_GPU_PROFILE "ggml: enable WebGPU profiling (GPU)" OFF) option(GGML_WEBGPU_JSPI "ggml: use JSPI for WebGPU" ON) option(GGML_ZDNN "ggml: use zDNN" OFF) option(GGML_METAL "ggml: use Metal" ${GGML_METAL_DEFAULT}) option(GGML_METAL_NDEBUG "ggml: disable Metal debugging" OFF) option(GGML_METAL_SHADER_DEBUG "ggml: compile Metal with -fno-fast-math" OFF) option(GGML_METAL_EMBED_LIBRARY "ggml: embed Metal library" ${GGML_METAL}) set (GGML_METAL_MACOSX_VERSION_MIN "" CACHE STRING "ggml: metal minimum macOS version") set (GGML_METAL_STD "" CACHE STRING "ggml: metal standard version (-std flag)") option(GGML_OPENMP "ggml: use OpenMP" ON) option(GGML_RPC "ggml: use RPC" OFF) option(GGML_SYCL "ggml: use SYCL" OFF) option(GGML_SYCL_F16 "ggml: use 16 bit floats for sycl calculations" OFF) option(GGML_SYCL_GRAPH "ggml: enable graphs in the SYCL backend" ON) option(GGML_SYCL_DNN "ggml: enable oneDNN in the SYCL backend" ON) set (GGML_SYCL_TARGET "INTEL" CACHE STRING "ggml: sycl target device") set (GGML_SYCL_DEVICE_ARCH "" CACHE STRING "ggml: sycl device architecture") option(GGML_OPENCL "ggml: use OpenCL" OFF) option(GGML_OPENCL_PROFILING "ggml: use OpenCL profiling (increases overhead)" OFF) option(GGML_OPENCL_EMBED_KERNELS "ggml: embed kernels" ON) option(GGML_OPENCL_USE_ADRENO_KERNELS "ggml: use optimized kernels for Adreno" ON) set (GGML_OPENCL_TARGET_VERSION "300" CACHE STRING "gmml: OpenCL API version to target") option(GGML_HEXAGON "ggml: enable Hexagon backend" OFF) set(GGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE 128 CACHE STRING "ggml: quantize group size (32, 64, or 128)") # toolchain for vulkan-shaders-gen set (GGML_VULKAN_SHADERS_GEN_TOOLCHAIN "" CACHE FILEPATH "ggml: toolchain file for vulkan-shaders-gen") option(GGML_ZENDNN "ggml: use ZenDNN" OFF) option(ZENDNN_ROOT "ggml: path to ZenDNN installation" "") # extra artifacts option(GGML_BUILD_TESTS "ggml: build tests" ${GGML_STANDALONE}) option(GGML_BUILD_EXAMPLES "ggml: build examples" ${GGML_STANDALONE}) # # dependencies # set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED true) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED true) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) include(GNUInstallDirs) # # build the library # add_subdirectory(src) # # tests and examples # if (GGML_BUILD_TESTS) enable_testing() add_subdirectory(tests) endif () if (GGML_BUILD_EXAMPLES) add_subdirectory(examples) endif () # # install # include(CMakePackageConfigHelpers) # all public headers set(GGML_PUBLIC_HEADERS include/ggml.h include/ggml-cpu.h include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h include/ggml-cann.h include/ggml-cpp.h include/ggml-cuda.h include/ggml-opt.h include/ggml-metal.h include/ggml-rpc.h include/ggml-sycl.h include/ggml-vulkan.h include/ggml-webgpu.h include/ggml-zendnn.h include/gguf.h) set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}") #if (GGML_METAL) # set_target_properties(ggml PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/src/ggml-metal.metal") #endif() install(TARGETS ggml LIBRARY PUBLIC_HEADER) install(TARGETS ggml-base LIBRARY) if (GGML_STANDALONE) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ggml.pc.in ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc DESTINATION share/pkgconfig) endif() # # Create CMake package # # Capture variables prefixed with GGML_. set(variable_set_statements " ####### Expanded from @GGML_VARIABLES_EXPANED@ by configure_package_config_file() ####### ####### Any changes to this file will be overwritten by the next CMake run ####### ") set(GGML_SHARED_LIB ${BUILD_SHARED_LIBS}) get_cmake_property(all_variables VARIABLES) foreach(variable_name IN LISTS all_variables) if(variable_name MATCHES "^GGML_") string(REPLACE ";" "\\;" variable_value "${${variable_name}}") set(variable_set_statements "${variable_set_statements}set(${variable_name} \"${variable_value}\")\n") endif() endforeach() set(GGML_VARIABLES_EXPANDED ${variable_set_statements}) # Create the CMake package and set install location. set(GGML_INSTALL_VERSION ${GGML_VERSION}) set(GGML_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") set(GGML_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") set(GGML_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") configure_package_config_file( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ggml-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml PATH_VARS GGML_INCLUDE_INSTALL_DIR GGML_LIB_INSTALL_DIR GGML_BIN_INSTALL_DIR) write_basic_package_version_file( ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake VERSION ${GGML_INSTALL_VERSION} COMPATIBILITY SameMajorVersion) target_compile_definitions(ggml-base PRIVATE GGML_VERSION="${GGML_INSTALL_VERSION}" GGML_COMMIT="${GGML_BUILD_COMMIT}" ) message(STATUS "ggml version: ${GGML_INSTALL_VERSION}") message(STATUS "ggml commit: ${GGML_BUILD_COMMIT}") install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml) if (MSVC) set(MSVC_WARNING_FLAGS /wd4005 # Macro redefinition /wd4244 # Conversion from one type to another type, possible loss of data /wd4267 # Conversion from 'size_t' to a smaller type, possible loss of data /wd4305 # Conversion from 'type1' to 'type2', possible loss of data /wd4566 # Conversion from 'char' to 'wchar_t', possible loss of data /wd4996 # Disable POSIX deprecation warnings /wd4702 # Unreachable code warnings ) set(MSVC_COMPILE_OPTIONS "$<$:/utf-8>" "$<$:/utf-8>" ) function(configure_msvc_target target_name) if(TARGET ${target_name}) target_compile_options(${target_name} PRIVATE ${MSVC_WARNING_FLAGS}) target_compile_options(${target_name} PRIVATE ${MSVC_COMPILE_OPTIONS}) endif() endfunction() configure_msvc_target(ggml-base) configure_msvc_target(ggml) configure_msvc_target(ggml-cpu) configure_msvc_target(ggml-cpu-x64) configure_msvc_target(ggml-cpu-sse42) configure_msvc_target(ggml-cpu-sandybridge) # __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 # skipping ggml-cpu-ivybridge # skipping ggml-cpu-piledriver configure_msvc_target(ggml-cpu-haswell) configure_msvc_target(ggml-cpu-skylakex) configure_msvc_target(ggml-cpu-cannonlake) configure_msvc_target(ggml-cpu-cascadelake) configure_msvc_target(ggml-cpu-icelake) # MSVC 2022 doesn't support BF16 intrinsics without `/arch:AVX10.1` ?! # https://learn.microsoft.com/en-us/cpp/intrinsics/x64-amd64-intrinsics-list?view=msvc-170 # https://learn.microsoft.com/en-us/cpp/build/reference/arch-x64?view=msvc-170 # skipping ggml-cpu-cooperlake # skipping ggml-cpu-zen4 configure_msvc_target(ggml-cpu-alderlake) # MSVC doesn't support AMX # skipping ggml-cpu-sapphirerapids if (GGML_BUILD_EXAMPLES) configure_msvc_target(common-ggml) configure_msvc_target(common) configure_msvc_target(mnist-common) configure_msvc_target(mnist-eval) configure_msvc_target(mnist-train) configure_msvc_target(gpt-2-ctx) configure_msvc_target(gpt-2-alloc) configure_msvc_target(gpt-2-backend) configure_msvc_target(gpt-2-sched) configure_msvc_target(gpt-2-quantize) configure_msvc_target(gpt-2-batched) configure_msvc_target(gpt-j) configure_msvc_target(gpt-j-quantize) configure_msvc_target(magika) configure_msvc_target(yolov3-tiny) configure_msvc_target(sam) configure_msvc_target(simple-ctx) configure_msvc_target(simple-backend) endif() if (GGML_BUILD_TESTS) configure_msvc_target(test-mul-mat) configure_msvc_target(test-arange) configure_msvc_target(test-backend-ops) configure_msvc_target(test-cont) configure_msvc_target(test-conv-transpose) configure_msvc_target(test-conv-transpose-1d) configure_msvc_target(test-conv1d) configure_msvc_target(test-conv2d) configure_msvc_target(test-conv2d-dw) configure_msvc_target(test-customop) configure_msvc_target(test-dup) configure_msvc_target(test-opt) configure_msvc_target(test-pool) endif () endif() ggml-org-ggml-3678254/CONTRIBUTING.md000066400000000000000000000005511512524704700166160ustar00rootroot00000000000000Please use [llama.cpp's contribution guidelines](https://github.com/ggml-org/llama.cpp/blob/master/CONTRIBUTING.md) for this project. *For changes to the core `ggml` library (including to the CMake build system), please open a PR in https://github.com/ggml-org/llama.cpp. Doing so will make your PR more visible, better tested and more likely to be reviewed.* ggml-org-ggml-3678254/LICENSE000066400000000000000000000020661512524704700153750ustar00rootroot00000000000000MIT License Copyright (c) 2023-2024 The ggml authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ggml-org-ggml-3678254/README.md000066400000000000000000000056001512524704700156440ustar00rootroot00000000000000# ggml [Roadmap](https://github.com/users/ggerganov/projects/7) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) Tensor library for machine learning ***Note that this project is under active development. \ Some of the development is currently happening in the [llama.cpp](https://github.com/ggerganov/llama.cpp) and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) repos*** ## Features - Low-level cross-platform implementation - Integer quantization support - Broad hardware support - Automatic differentiation - ADAM and L-BFGS optimizers - No third-party dependencies - Zero memory allocations during runtime ## Build ```bash git clone https://github.com/ggml-org/ggml cd ggml # install python dependencies in a virtual environment python3.10 -m venv .venv source .venv/bin/activate pip install -r requirements.txt # build the examples mkdir build && cd build cmake .. cmake --build . --config Release -j 8 ``` ## GPT inference (example) ```bash # run the GPT-2 small 117M model ../examples/gpt-2/download-ggml-model.sh 117M ./bin/gpt-2-backend -m models/gpt-2-117M/ggml-model.bin -p "This is an example" ``` For more information, checkout the corresponding programs in the [examples](examples) folder. ## Using CUDA ```bash # fix the path to point to your CUDA compiler cmake -DGGML_CUDA=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda-12.1/bin/nvcc .. ``` ## Using hipBLAS ```bash cmake -DCMAKE_C_COMPILER="$(hipconfig -l)/clang" -DCMAKE_CXX_COMPILER="$(hipconfig -l)/clang++" -DGGML_HIP=ON ``` ## Using SYCL ```bash # linux source /opt/intel/oneapi/setvars.sh cmake -G "Ninja" -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON .. # windows "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" cmake -G "Ninja" -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DGGML_SYCL=ON .. ``` ## Compiling for Android Download and unzip the NDK from this download [page](https://developer.android.com/ndk/downloads). Set the NDK_ROOT_PATH environment variable or provide the absolute path to the CMAKE_ANDROID_NDK in the command below. ```bash cmake .. \ -DCMAKE_SYSTEM_NAME=Android \ -DCMAKE_SYSTEM_VERSION=33 \ -DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \ -DCMAKE_ANDROID_NDK=$NDK_ROOT_PATH -DCMAKE_ANDROID_STL_TYPE=c++_shared ``` ```bash # create directories adb shell 'mkdir /data/local/tmp/bin' adb shell 'mkdir /data/local/tmp/models' # push the compiled binaries to the folder adb push bin/* /data/local/tmp/bin/ # push the ggml library adb push src/libggml.so /data/local/tmp/ # push model files adb push models/gpt-2-117M/ggml-model.bin /data/local/tmp/models/ adb shell cd /data/local/tmp export LD_LIBRARY_PATH=/data/local/tmp ./bin/gpt-2-backend -m models/ggml-model.bin -p "this is an example" ``` ## Resources - [Introduction to ggml](https://huggingface.co/blog/introduction-to-ggml) - [The GGUF file format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) ggml-org-ggml-3678254/ci/000077500000000000000000000000001512524704700147575ustar00rootroot00000000000000ggml-org-ggml-3678254/ci/run.sh000066400000000000000000000245331512524704700161260ustar00rootroot00000000000000#/bin/bash # # sample usage: # # mkdir tmp # # # CPU-only build # bash ./ci/run.sh ./tmp/results ./tmp/mnt # # # with CUDA support # GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # # # With SYCL support # GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # if [ -z "$2" ]; then echo "usage: $0 " exit 1 fi mkdir -p "$1" mkdir -p "$2" OUT=$(realpath "$1") MNT=$(realpath "$2") rm -v $OUT/*.log rm -v $OUT/*.exit rm -v $OUT/*.md sd=`dirname $0` cd $sd/../ SRC=`pwd` CMAKE_EXTRA="" CTEST_EXTRA="" if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON" fi if [ ! -z ${GG_BUILD_CUDA} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=ON" if command -v nvidia-smi >/dev/null 2>&1; then CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader,nounits 2>/dev/null | head -1 | tr -d '.') if [[ -n "$CUDA_ARCH" && "$CUDA_ARCH" =~ ^[0-9]+$ ]]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH}" else echo "Warning: Using fallback CUDA architectures" CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=61;70;75;80;86;89" fi else echo "Error: nvidia-smi not found, cannot build with CUDA" exit 1 fi fi if [ ! -z ${GG_BUILD_ROCM} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_HIP=ON" if [ -z ${GG_BUILD_AMDGPU_TARGETS} ]; then echo "Missing GG_BUILD_AMDGPU_TARGETS, please set it to your GPU architecture (e.g. gfx90a, gfx1100, etc.)" exit 1 fi CMAKE_EXTRA="${CMAKE_EXTRA} -DAMDGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}" fi if [ ! -z ${GG_BUILD_SYCL} ]; then if [ -z ${ONEAPI_ROOT} ]; then echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:" echo "source /opt/intel/oneapi/setvars.sh" exit 1 fi # Use only main GPU export ONEAPI_DEVICE_SELECTOR="level_zero:0" # Enable sysman for correct memory reporting export ZES_ENABLE_SYSMAN=1 # to circumvent precision issues on CPY operations export SYCL_PROGRAM_COMPILE_OPTIONS="-cl-fp32-correctly-rounded-divide-sqrt" CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON" fi if [ ! -z ${GG_BUILD_VULKAN} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_VULKAN=1" # if on Mac, disable METAL if [[ "$OSTYPE" == "darwin"* ]]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=OFF -DGGML_BLAS=OFF" fi fi if [ ! -z ${GG_BUILD_WEBGPU} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_WEBGPU=1" fi if [ ! -z ${GG_BUILD_MUSA} ]; then # Use qy1 by default (MTT S80) MUSA_ARCH=${MUSA_ARCH:-21} CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_MUSA=ON -DMUSA_ARCHITECTURES=${MUSA_ARCH}" fi if [ ! -z ${GG_BUILD_NO_SVE} ]; then # arm 9 and newer enables sve by default, adjust these flags depending on the cpu used CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm" fi ## helpers # download a file if it does not exist or if it is outdated function gg_wget { local out=$1 local url=$2 local cwd=`pwd` mkdir -p $out cd $out # should not re-download if file is the same wget -nv -N $url cd $cwd } function gg_printf { printf -- "$@" >> $OUT/README.md } function gg_run { ci=$1 set -o pipefail set -x gg_run_$ci | tee $OUT/$ci.log cur=$? echo "$cur" > $OUT/$ci.exit set +x set +o pipefail gg_sum_$ci ret=$((ret | cur)) } ## ci # ctest_debug function gg_run_ctest_debug { cd ${SRC} rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug set -e (time cmake -DCMAKE_BUILD_TYPE=Debug ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log (time ctest ${CTEST_EXTRA} --output-on-failure -E "test-opt|test-backend-ops" ) 2>&1 | tee -a $OUT/${ci}-ctest.log set +e } function gg_sum_ctest_debug { gg_printf '### %s\n\n' "${ci}" gg_printf 'Runs ctest in debug mode\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)" gg_printf '```\n' gg_printf '\n' } # ctest_release function gg_run_ctest_release { cd ${SRC} rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release set -e (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log if [ -z $GG_BUILD_LOW_PERF ]; then (time ctest ${CTEST_EXTRA} --output-on-failure ) 2>&1 | tee -a $OUT/${ci}-ctest.log else (time ctest ${CTEST_EXTRA} --output-on-failure -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log fi set +e } function gg_sum_ctest_release { gg_printf '### %s\n\n' "${ci}" gg_printf 'Runs ctest in release mode\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)" gg_printf '```\n' } # gpt_2 function gg_run_gpt_2 { cd ${SRC} gg_wget models-mnt/gpt-2 https://huggingface.co/ggerganov/ggml/resolve/main/ggml-model-gpt-2-117M.bin cd build-ci-release set -e model="../models-mnt/gpt-2/ggml-model-gpt-2-117M.bin" prompts="../examples/prompts/gpt-2.txt" (time ./bin/gpt-2-backend --model ${model} -s 1234 -n 64 -tt ${prompts} ) 2>&1 | tee -a $OUT/${ci}-tg.log (time ./bin/gpt-2-backend --model ${model} -s 1234 -n 64 -p "I believe the meaning of life is") 2>&1 | tee -a $OUT/${ci}-tg.log (time ./bin/gpt-2-sched --model ${model} -s 1234 -n 64 -p "I believe the meaning of life is") 2>&1 | tee -a $OUT/${ci}-tg.log (time ./bin/gpt-2-batched --model ${model} -s 1234 -n 64 -np 8 -p "I believe the meaning of life is") 2>&1 | tee -a $OUT/${ci}-tg.log set +e } function gg_sum_gpt_2 { gg_printf '### %s\n\n' "${ci}" gg_printf 'Runs short GPT-2 text generation\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-tg.log)" gg_printf '```\n' } # TODO: update ## mnist # #function gg_run_mnist { # cd ${SRC} # # cd build-ci-release # # set -e # # mkdir -p models/mnist # python3 ../examples/mnist/convert-h5-to-ggml.py ../examples/mnist/models/mnist/mnist_model.state_dict # # model_f32="./models/mnist/ggml-model-f32.bin" # samples="../examples/mnist/models/mnist/t10k-images.idx3-ubyte" # # # first command runs and exports "mnist.ggml", the second command runs the exported model # # (time ./bin/mnist ${model_f32} ${samples} ) 2>&1 | tee -a $OUT/${ci}-mnist.log # (time ./bin/mnist-cpu ./mnist.ggml ${samples} ) 2>&1 | tee -a $OUT/${ci}-mnist.log # # set +e #} # #function gg_sum_mnist { # gg_printf '### %s\n\n' "${ci}" # # gg_printf 'MNIST\n' # gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" # gg_printf '```\n' # gg_printf '%s\n' "$(cat $OUT/${ci}-mnist.log)" # gg_printf '```\n' #} # sam function gg_run_sam { cd ${SRC} gg_wget models-mnt/sam/ https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth gg_wget models-mnt/sam/ https://raw.githubusercontent.com/YavorGIvanov/sam.cpp/ceafb7467bff7ec98e0c4f952e58a9eb8fd0238b/img.jpg cd build-ci-release set -e path_models="../models-mnt/sam/" model_f16="${path_models}/ggml-model-f16.bin" img_0="${path_models}/img.jpg" python3 ../examples/sam/convert-pth-to-ggml.py ${path_models}/sam_vit_b_01ec64.pth ${path_models}/ 1 # Test default parameters (time ./bin/sam -m ${model_f16} -i ${img_0} -st 0.925 ) 2>&1 | tee -a $OUT/${ci}-main.log grep -q "point prompt" $OUT/${ci}-main.log grep -q "bbox (371, 436), (144, 168)" $OUT/${ci}-main.log || grep -q "bbox (370, 439), (144, 168)" $OUT/${ci}-main.log # Test box prompt and single mask output (time ./bin/sam -m ${model_f16} -i ${img_0} -st 0.925 -b 368,144,441,173 -sm) 2>&1 | tee -a $OUT/${ci}-main.log grep -q "box prompt" $OUT/${ci}-main.log grep -q "bbox (370, 439), (144, 169)" $OUT/${ci}-main.log || grep -q "bbox (370, 439), (144, 168)" $OUT/${ci}-main.log set +e } function gg_sum_sam { gg_printf '### %s\n\n' "${ci}" gg_printf 'Run SAM\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-main.log)" gg_printf '```\n' } # yolo function gg_run_yolo { cd ${SRC} gg_wget models-mnt/yolo/ https://huggingface.co/ggml-org/models/resolve/main/yolo/yolov3-tiny.weights gg_wget models-mnt/yolo/ https://huggingface.co/ggml-org/models/resolve/main/yolo/dog.jpg cd build-ci-release cp -r ../examples/yolo/data . set -e path_models="../models-mnt/yolo/" python3 ../examples/yolo/convert-yolov3-tiny.py ${path_models}/yolov3-tiny.weights (time ./bin/yolov3-tiny -m yolov3-tiny.gguf -i ${path_models}/dog.jpg ) 2>&1 | tee -a $OUT/${ci}-main.log grep -qE "dog: (55|56|57|58|59)%" $OUT/${ci}-main.log grep -qE "car: (50|51|52|53|54)%" $OUT/${ci}-main.log grep -qE "truck: (54|55|56|57|58)%" $OUT/${ci}-main.log grep -qE "bicycle: (57|58|59|60|61)%" $OUT/${ci}-main.log set +e } function gg_sum_yolo { gg_printf '### %s\n\n' "${ci}" gg_printf 'Run YOLO\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-main.log)" gg_printf '```\n' } ## main if true ; then # Create symlink: ./ggml/models-mnt -> $MNT/models/models-mnt rm -rf ${SRC}/models-mnt mnt_models=${MNT}/models mkdir -p ${mnt_models} ln -sfn ${mnt_models} ${SRC}/models-mnt # Create a fresh python3 venv and enter it if ! python3 -m venv "$MNT/venv"; then echo "Error: Failed to create Python virtual environment at $MNT/venv." exit 1 fi source "$MNT/venv/bin/activate" pip install -r ${SRC}/requirements.txt --disable-pip-version-check fi ret=0 test $ret -eq 0 && gg_run ctest_debug test $ret -eq 0 && gg_run ctest_release test $ret -eq 0 && gg_run gpt_2 #test $ret -eq 0 && gg_run mnist test $ret -eq 0 && gg_run sam test $ret -eq 0 && gg_run yolo if [ -z $GG_BUILD_LOW_PERF ]; then # run tests meant for low-perf runners date fi cat $OUT/README.md exit $ret ggml-org-ggml-3678254/cmake/000077500000000000000000000000001512524704700154445ustar00rootroot00000000000000ggml-org-ggml-3678254/cmake/BuildTypes.cmake000066400000000000000000000037651512524704700205450ustar00rootroot00000000000000# Add new build types # ReleaseGG - Release with enabled asserts SET(CMAKE_CXX_FLAGS_RELEASEGG "-O3" CACHE STRING "Flags used by the c++ compiler during release builds with enabled asserts." FORCE ) SET(CMAKE_C_FLAGS_RELEASEGG "-O3" CACHE STRING "Flags used by the compiler during release builds with enabled asserts." FORCE ) SET(CMAKE_EXE_LINKER_FLAGS_RELEASEGG "" CACHE STRING "Flags used for linking binaries during release builds with enabled asserts." FORCE ) SET(CMAKE_SHARED_LINKER_FLAGS_RELEASEGG "" CACHE STRING "Flags used by the shared libraries linker during release builds with enabled asserts." FORCE ) MARK_AS_ADVANCED( CMAKE_CXX_FLAGS_RELEASEGG CMAKE_C_FLAGS_RELEASEGG CMAKE_EXE_LINKER_FLAGS_RELEASEGG CMAKE_SHARED_LINKER_FLAGS_RELEASEGG ) # RelWithDebInfoGG - RelWithDebInfo with enabled asserts SET(CMAKE_CXX_FLAGS_RELWITHDEBINFOGG "-O2 -g" CACHE STRING "Flags used by the c++ compiler during release builds with debug symbols and enabled asserts." FORCE ) SET(CMAKE_C_FLAGS_RELWITHDEBINFOGG "-O2 -g" CACHE STRING "Flags used by the compiler during release builds with debug symbols and enabled asserts." FORCE ) SET(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG "" CACHE STRING "Flags used for linking binaries during release builds with debug symbols and enabled asserts." FORCE ) SET(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG "" CACHE STRING "Flags used by the shared libraries linker during release builds with debug symbols and enabled asserts." FORCE ) MARK_AS_ADVANCED( CMAKE_CXX_FLAGS_RELWITHDEBINFOGG CMAKE_C_FLAGS_RELWITHDEBINFOGG CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG ) if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo" "ReleaseGG" "RelWithDebInfoGG") endif() ggml-org-ggml-3678254/cmake/GitVars.cmake000066400000000000000000000013151512524704700200250ustar00rootroot00000000000000find_package(Git) # the commit's SHA1 execute_process(COMMAND "${GIT_EXECUTABLE}" describe --match=NeVeRmAtCh --always --abbrev=8 WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" OUTPUT_VARIABLE GIT_SHA1 ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) # the date of the commit execute_process(COMMAND "${GIT_EXECUTABLE}" log -1 --format=%ad --date=local WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" OUTPUT_VARIABLE GIT_DATE ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) # the subject of the commit execute_process(COMMAND "${GIT_EXECUTABLE}" log -1 --format=%s WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" OUTPUT_VARIABLE GIT_COMMIT_SUBJECT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) ggml-org-ggml-3678254/cmake/common.cmake000066400000000000000000000041151512524704700177370ustar00rootroot00000000000000function(ggml_get_flags CCID CCVER) set(C_FLAGS "") set(CXX_FLAGS "") if (CCID MATCHES "Clang") set(C_FLAGS -Wunreachable-code-break -Wunreachable-code-return) set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi) if ( (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) ) list(APPEND C_FLAGS -Wdouble-promotion) endif() elseif (CCID STREQUAL "GNU") set(C_FLAGS -Wdouble-promotion) set(CXX_FLAGS -Wno-array-bounds) if (CCVER VERSION_GREATER_EQUAL 8.1.0) list(APPEND CXX_FLAGS -Wextra-semi) endif() endif() set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) endfunction() function(ggml_get_system_arch) if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) set(GGML_SYSTEM_ARCH "ARM" PARENT_SCOPE) elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc|power") set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") set(GGML_SYSTEM_ARCH "riscv64" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") set(GGML_SYSTEM_ARCH "s390x" PARENT_SCOPE) else() set(GGML_SYSTEM_ARCH "UNKNOWN" PARENT_SCOPE) endif() endfunction() ggml-org-ggml-3678254/cmake/ggml-config.cmake.in000066400000000000000000000154021512524704700212460ustar00rootroot00000000000000@PACKAGE_INIT@ @GGML_VARIABLES_EXPANDED@ # Find all dependencies before creating any target. include(CMakeFindDependencyMacro) find_dependency(Threads) if (NOT GGML_SHARED_LIB) set(GGML_CPU_INTERFACE_LINK_LIBRARIES "") set(GGML_CPU_INTERFACE_LINK_OPTIONS "") if (APPLE AND GGML_ACCELERATE) find_library(ACCELERATE_FRAMEWORK Accelerate) if(NOT ACCELERATE_FRAMEWORK) set(${CMAKE_FIND_PACKAGE_NAME}_FOUND 0) return() endif() list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES ${ACCELERATE_FRAMEWORK}) endif() if (GGML_OPENMP_ENABLED) find_dependency(OpenMP) list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES OpenMP::OpenMP_C OpenMP::OpenMP_CXX) endif() if (GGML_CPU_HBM) find_library(memkind memkind) if(NOT memkind) set(${CMAKE_FIND_PACKAGE_NAME}_FOUND 0) return() endif() list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES memkind) endif() if (GGML_BLAS) find_dependency(BLAS) list(APPEND GGML_BLAS_INTERFACE_LINK_LIBRARIES ${BLAS_LIBRARIES}) list(APPEND GGML_BLAS_INTERFACE_LINK_OPTIONS ${BLAS_LINKER_FLAGS}) endif() if (GGML_CUDA) set(GGML_CUDA_INTERFACE_LINK_LIBRARIES "") find_dependency(CUDAToolkit) if (GGML_STATIC) list(APPEND GGML_CUDA_INTERFACE_LINK_LIBRARIES $) if (WIN32) list(APPEND GGML_CUDA_INTERFACE_LINK_LIBRARIES $ $) else() list(APPEND GGML_CUDA_INTERFACE_LINK_LIBRARIES $ $) endif() endif() if (NOT GGML_CUDA_NO_VMM) list(APPEND GGML_CUDA_INTERFACE_LINK_LIBRARIES $) endif() endif() if (GGML_METAL) find_library(FOUNDATION_LIBRARY Foundation) find_library(METAL_FRAMEWORK Metal) find_library(METALKIT_FRAMEWORK MetalKit) if(NOT FOUNDATION_LIBRARY OR NOT METAL_FRAMEWORK OR NOT METALKIT_FRAMEWORK) set(${CMAKE_FIND_PACKAGE_NAME}_FOUND 0) return() endif() set(GGML_METAL_INTERFACE_LINK_LIBRARIES ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK}) endif() if (GGML_OPENCL) find_dependency(OpenCL) set(GGML_OPENCL_INTERFACE_LINK_LIBRARIES $) endif() if (GGML_VULKAN) find_dependency(Vulkan) set(GGML_VULKAN_INTERFACE_LINK_LIBRARIES $) endif() if (GGML_HIP) find_dependency(hip) find_dependency(hipblas) find_dependency(rocblas) set(GGML_HIP_INTERFACE_LINK_LIBRARIES hip::host roc::rocblas roc::hipblas) endif() if (GGML_SYCL) set(GGML_SYCL_INTERFACE_LINK_LIBRARIES "") find_package(DNNL) if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL") list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES DNNL::dnnl) endif() if (WIN32) find_dependency(IntelSYCL) find_dependency(MKL) list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL) endif() endif() endif() set_and_check(GGML_INCLUDE_DIR "@PACKAGE_GGML_INCLUDE_INSTALL_DIR@") set_and_check(GGML_LIB_DIR "@PACKAGE_GGML_LIB_INSTALL_DIR@") #set_and_check(GGML_BIN_DIR "@PACKAGE_GGML_BIN_INSTALL_DIR@") if(NOT TARGET ggml::ggml) find_package(Threads REQUIRED) find_library(GGML_LIBRARY ggml REQUIRED HINTS ${GGML_LIB_DIR} NO_CMAKE_FIND_ROOT_PATH) add_library(ggml::ggml UNKNOWN IMPORTED) set_target_properties(ggml::ggml PROPERTIES IMPORTED_LOCATION "${GGML_LIBRARY}") find_library(GGML_BASE_LIBRARY ggml-base REQUIRED HINTS ${GGML_LIB_DIR} NO_CMAKE_FIND_ROOT_PATH) add_library(ggml::ggml-base UNKNOWN IMPORTED) set_target_properties(ggml::ggml-base PROPERTIES IMPORTED_LOCATION "${GGML_BASE_LIBRARY}") set(_ggml_all_targets "") if (NOT GGML_BACKEND_DL) foreach(_ggml_backend ${GGML_AVAILABLE_BACKENDS}) string(REPLACE "-" "_" _ggml_backend_pfx "${_ggml_backend}") string(TOUPPER "${_ggml_backend_pfx}" _ggml_backend_pfx) find_library(${_ggml_backend_pfx}_LIBRARY ${_ggml_backend} REQUIRED HINTS ${GGML_LIB_DIR} NO_CMAKE_FIND_ROOT_PATH) message(STATUS "Found ${${_ggml_backend_pfx}_LIBRARY}") add_library(ggml::${_ggml_backend} UNKNOWN IMPORTED) set_target_properties(ggml::${_ggml_backend} PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GGML_INCLUDE_DIR}" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" IMPORTED_LOCATION "${${_ggml_backend_pfx}_LIBRARY}" INTERFACE_COMPILE_FEATURES c_std_90 POSITION_INDEPENDENT_CODE ON) string(REGEX MATCH "^ggml-cpu" is_cpu_variant "${_ggml_backend}") if(is_cpu_variant) list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES "ggml::ggml-base") set_target_properties(ggml::${_ggml_backend} PROPERTIES INTERFACE_LINK_LIBRARIES "${GGML_CPU_INTERFACE_LINK_LIBRARIES}") if(GGML_CPU_INTERFACE_LINK_OPTIONS) set_target_properties(ggml::${_ggml_backend} PROPERTIES INTERFACE_LINK_OPTIONS "${GGML_CPU_INTERFACE_LINK_OPTIONS}") endif() else() list(APPEND ${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES "ggml::ggml-base") set_target_properties(ggml::${_ggml_backend} PROPERTIES INTERFACE_LINK_LIBRARIES "${${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES}") if(${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS) set_target_properties(ggml::${_ggml_backend} PROPERTIES INTERFACE_LINK_OPTIONS "${${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS}") endif() endif() list(APPEND _ggml_all_targets ggml::${_ggml_backend}) endforeach() endif() list(APPEND GGML_INTERFACE_LINK_LIBRARIES ggml::ggml-base "${_ggml_all_targets}") set_target_properties(ggml::ggml PROPERTIES INTERFACE_LINK_LIBRARIES "${GGML_INTERFACE_LINK_LIBRARIES}") add_library(ggml::all INTERFACE IMPORTED) set_target_properties(ggml::all PROPERTIES INTERFACE_LINK_LIBRARIES "${_ggml_all_targets}") endif() check_required_components(ggml) ggml-org-ggml-3678254/docs/000077500000000000000000000000001512524704700153145ustar00rootroot00000000000000ggml-org-ggml-3678254/docs/gguf.md000066400000000000000000001205451512524704700165750ustar00rootroot00000000000000# GGUF GGUF is a file format for storing models for inference with GGML and executors based on GGML. GGUF is a binary format that is designed for fast loading and saving of models, and for ease of reading. Models are traditionally developed using PyTorch or another framework, and then converted to GGUF for use in GGML. It is a successor file format to GGML, GGMF and GGJT, and is designed to be unambiguous by containing all the information needed to load a model. It is also designed to be extensible, so that new information can be added to models without breaking compatibility. For more information about the motivation behind GGUF, see [Historical State of Affairs](#historical-state-of-affairs). ## Specification GGUF is a format based on the existing GGJT, but makes a few changes to the format to make it more extensible and easier to use. The following features are desired: - Single-file deployment: they can be easily distributed and loaded, and do not require any external files for additional information. - Extensible: new features can be added to GGML-based executors/new information can be added to GGUF models without breaking compatibility with existing models. - `mmap` compatibility: models can be loaded using `mmap` for fast loading and saving. - Easy to use: models can be easily loaded and saved using a small amount of code, with no need for external libraries, regardless of the language used. - Full information: all information needed to load a model is contained in the model file, and no additional information needs to be provided by the user. The key difference between GGJT and GGUF is the use of a key-value structure for the hyperparameters (now referred to as metadata), rather than a list of untyped values. This allows for new metadata to be added without breaking compatibility with existing models, and to annotate the model with additional information that may be useful for inference or for identifying the model. ### GGUF Naming Convention GGUF follow a naming convention of `.gguf` where each component is delimitated by a `-` if present. Ultimately this is intended to make it easier for humans to at a glance get the most important details of a model. It is not intended to be perfectly parsable in the field due to the diversity of existing gguf filenames. The components are: 1. **BaseName**: A descriptive name for the model base type or architecture. - This can be derived from gguf metadata `general.basename` substituting spaces for dashes. 1. **SizeLabel**: Parameter weight class (useful for leader boards) represented as `x` - This can be derived from gguf metadata `general.size_label` if available or calculated if missing. - Rounded decimal point is supported in count with a single letter scale prefix to assist in floating point exponent shown below - `Q`: Quadrillion parameters. - `T`: Trillion parameters. - `B`: Billion parameters. - `M`: Million parameters. - `K`: Thousand parameters. - Additional `-` can be appended as needed to indicate other attributes of interest 1. **FineTune**: A descriptive name for the model fine tuning goal (e.g. Chat, Instruct, etc...) - This can be derived from gguf metadata `general.finetune` substituting spaces for dashes. 1. **Version**: (Optional) Denotes the model version number, formatted as `v.` - If model is missing a version number then assume `v1.0` (First Public Release) - This can be derived from gguf metadata `general.version` 1. **Encoding**: Indicates the weights encoding scheme that was applied to the model. Content, type mixture and arrangement however are determined by user code and can vary depending on project needs. 1. **Type**: Indicates the kind of gguf file and the intended purpose for it - If missing, then file is by default a typical gguf tensor model file - `LoRA` : GGUF file is a LoRA adapter - `vocab` : GGUF file with only vocab data and metadata 1. **Shard**: (Optional) Indicates and denotes that the model has been split into multiple shards, formatted as `-of-`. - *ShardNum* : Shard position in this model. Must be 5 digits padded by zeros. - Shard number always starts from `00001` onwards (e.g. First shard always starts at `00001-of-XXXXX` rather than `00000-of-XXXXX`). - *ShardTotal* : Total number of shards in this model. Must be 5 digits padded by zeros. #### Validating Above Naming Convention At a minimum all model files should have at least BaseName, SizeLabel, Version, in order to be easily validated as a file that is keeping with the GGUF Naming Convention. An example of this issue is that it is easy for Encoding to be mistaken as a FineTune if Version is omitted. To validate you can use this regular expression `^(?[A-Za-z0-9\s]*(?:(?:-(?:(?:[A-Za-z\s][A-Za-z0-9\s]*)|(?:[0-9\s]*)))*))-(?:(?(?:\d+x)?(?:\d+\.)?\d+[A-Za-z](?:-[A-Za-z]+(\d+\.)?\d+[A-Za-z]+)?)(?:-(?[A-Za-z0-9\s-]+))?)?-(?:(?v\d+(?:\.\d+)*))(?:-(?(?!LoRA|vocab)[\w_]+))?(?:-(?LoRA|vocab))?(?:-(?\d{5}-of-\d{5}))?\.gguf$` which will check that you got the minimum BaseName, SizeLabel and Version present in the correct order. For example: * `Mixtral-8x7B-v0.1-KQ2.gguf`: - Model Name: Mixtral - Expert Count: 8 - Parameter Count: 7B - Version Number: v0.1 - Weight Encoding Scheme: KQ2 * `Hermes-2-Pro-Llama-3-8B-F16.gguf`: - Model Name: Hermes 2 Pro Llama 3 - Expert Count: 0 - Parameter Count: 8B - Version Number: v1.0 - Weight Encoding Scheme: F16 - Shard: N/A * `Grok-100B-v1.0-Q4_0-00003-of-00009.gguf` - Model Name: Grok - Expert Count: 0 - Parameter Count: 100B - Version Number: v1.0 - Weight Encoding Scheme: Q4_0 - Shard: 3 out of 9 total shards
Example Node.js Regex Function ```js #!/usr/bin/env node const ggufRegex = /^(?[A-Za-z0-9\s]*(?:(?:-(?:(?:[A-Za-z\s][A-Za-z0-9\s]*)|(?:[0-9\s]*)))*))-(?:(?(?:\d+x)?(?:\d+\.)?\d+[A-Za-z](?:-[A-Za-z]+(\d+\.)?\d+[A-Za-z]+)?)(?:-(?[A-Za-z0-9\s-]+))?)?-(?:(?v\d+(?:\.\d+)*))(?:-(?(?!LoRA|vocab)[\w_]+))?(?:-(?LoRA|vocab))?(?:-(?\d{5}-of-\d{5}))?\.gguf$/; function parseGGUFFilename(filename) { const match = ggufRegex.exec(filename); if (!match) return null; const {BaseName = null, SizeLabel = null, FineTune = null, Version = "v1.0", Encoding = null, Type = null, Shard = null} = match.groups; return {BaseName: BaseName, SizeLabel: SizeLabel, FineTune: FineTune, Version: Version, Encoding: Encoding, Type: Type, Shard: Shard}; } const testCases = [ {filename: 'Mixtral-8x7B-v0.1-KQ2.gguf', expected: { BaseName: 'Mixtral', SizeLabel: '8x7B', FineTune: null, Version: 'v0.1', Encoding: 'KQ2', Type: null, Shard: null}}, {filename: 'Grok-100B-v1.0-Q4_0-00003-of-00009.gguf', expected: { BaseName: 'Grok', SizeLabel: '100B', FineTune: null, Version: 'v1.0', Encoding: 'Q4_0', Type: null, Shard: "00003-of-00009"}}, {filename: 'Hermes-2-Pro-Llama-3-8B-v1.0-F16.gguf', expected: { BaseName: 'Hermes-2-Pro-Llama-3', SizeLabel: '8B', FineTune: null, Version: 'v1.0', Encoding: 'F16', Type: null, Shard: null}}, {filename: 'Phi-3-mini-3.8B-ContextLength4k-instruct-v1.0.gguf', expected: { BaseName: 'Phi-3-mini', SizeLabel: '3.8B-ContextLength4k', FineTune: 'instruct', Version: 'v1.0', Encoding: null, Type: null, Shard: null}}, {filename: 'not-a-known-arrangement.gguf', expected: null}, ]; testCases.forEach(({ filename, expected }) => { const result = parseGGUFFilename(filename); const passed = JSON.stringify(result) === JSON.stringify(expected); console.log(`${filename}: ${passed ? "PASS" : "FAIL"}`); if (!passed) { console.log(result); console.log(expected); } }); ```
### File Structure ![image](https://github.com/ggerganov/ggml/assets/1991296/c3623641-3a1d-408e-bfaf-1b7c4e16aa63) *diagram by [@mishig25](https://github.com/mishig25) (GGUF v3)* GGUF files are structured as follows. They use a global alignment specified in the `general.alignment` metadata field, referred to as `ALIGNMENT` below. Where required, the file is padded with `0x00` bytes to the next multiple of `general.alignment`. Fields, including arrays, are written sequentially without alignment unless otherwise specified. Models are little-endian by default. They can also come in big-endian for use with big-endian computers; in this case, all values (including metadata values and tensors) will also be big-endian. At the time of writing, there is no way to determine if a model is big-endian; this may be rectified in future versions. If no additional information is provided, assume the model is little-endian. ```c enum ggml_type: uint32_t { GGML_TYPE_F32 = 0, GGML_TYPE_F16 = 1, GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_1 = 3, // GGML_TYPE_Q4_2 = 4, support has been removed // GGML_TYPE_Q4_3 = 5, support has been removed GGML_TYPE_Q5_0 = 6, GGML_TYPE_Q5_1 = 7, GGML_TYPE_Q8_0 = 8, GGML_TYPE_Q8_1 = 9, GGML_TYPE_Q2_K = 10, GGML_TYPE_Q3_K = 11, GGML_TYPE_Q4_K = 12, GGML_TYPE_Q5_K = 13, GGML_TYPE_Q6_K = 14, GGML_TYPE_Q8_K = 15, GGML_TYPE_IQ2_XXS = 16, GGML_TYPE_IQ2_XS = 17, GGML_TYPE_IQ3_XXS = 18, GGML_TYPE_IQ1_S = 19, GGML_TYPE_IQ4_NL = 20, GGML_TYPE_IQ3_S = 21, GGML_TYPE_IQ2_S = 22, GGML_TYPE_IQ4_XS = 23, GGML_TYPE_I8 = 24, GGML_TYPE_I16 = 25, GGML_TYPE_I32 = 26, GGML_TYPE_I64 = 27, GGML_TYPE_F64 = 28, GGML_TYPE_IQ1_M = 29, GGML_TYPE_BF16 = 30, // GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files // GGML_TYPE_Q4_0_4_8 = 32, // GGML_TYPE_Q4_0_8_8 = 33, GGML_TYPE_TQ1_0 = 34, GGML_TYPE_TQ2_0 = 35, // GGML_TYPE_IQ4_NL_4_4 = 36, // GGML_TYPE_IQ4_NL_4_8 = 37, // GGML_TYPE_IQ4_NL_8_8 = 38, GGML_TYPE_MXFP4 = 39, // MXFP4 (1 block) GGML_TYPE_COUNT = 40, }; enum gguf_metadata_value_type: uint32_t { // The value is a 8-bit unsigned integer. GGUF_METADATA_VALUE_TYPE_UINT8 = 0, // The value is a 8-bit signed integer. GGUF_METADATA_VALUE_TYPE_INT8 = 1, // The value is a 16-bit unsigned little-endian integer. GGUF_METADATA_VALUE_TYPE_UINT16 = 2, // The value is a 16-bit signed little-endian integer. GGUF_METADATA_VALUE_TYPE_INT16 = 3, // The value is a 32-bit unsigned little-endian integer. GGUF_METADATA_VALUE_TYPE_UINT32 = 4, // The value is a 32-bit signed little-endian integer. GGUF_METADATA_VALUE_TYPE_INT32 = 5, // The value is a 32-bit IEEE754 floating point number. GGUF_METADATA_VALUE_TYPE_FLOAT32 = 6, // The value is a boolean. // 1-byte value where 0 is false and 1 is true. // Anything else is invalid, and should be treated as either the model being invalid or the reader being buggy. GGUF_METADATA_VALUE_TYPE_BOOL = 7, // The value is a UTF-8 non-null-terminated string, with length prepended. GGUF_METADATA_VALUE_TYPE_STRING = 8, // The value is an array of other values, with the length and type prepended. /// // Arrays can be nested, and the length of the array is the number of elements in the array, not the number of bytes. GGUF_METADATA_VALUE_TYPE_ARRAY = 9, // The value is a 64-bit unsigned little-endian integer. GGUF_METADATA_VALUE_TYPE_UINT64 = 10, // The value is a 64-bit signed little-endian integer. GGUF_METADATA_VALUE_TYPE_INT64 = 11, // The value is a 64-bit IEEE754 floating point number. GGUF_METADATA_VALUE_TYPE_FLOAT64 = 12, }; // A string in GGUF. struct gguf_string_t { // The length of the string, in bytes. uint64_t len; // The string as a UTF-8 non-null-terminated string. char string[len]; }; union gguf_metadata_value_t { uint8_t uint8; int8_t int8; uint16_t uint16; int16_t int16; uint32_t uint32; int32_t int32; float float32; uint64_t uint64; int64_t int64; double float64; bool bool_; gguf_string_t string; struct { // Any value type is valid, including arrays. gguf_metadata_value_type type; // Number of elements, not bytes uint64_t len; // The array of values. gguf_metadata_value_t array[len]; } array; }; struct gguf_metadata_kv_t { // The key of the metadata. It is a standard GGUF string, with the following caveats: // - It must be a valid ASCII string. // - It must be a hierarchical key, where each segment is `lower_snake_case` and separated by a `.`. // - It must be at most 2^16-1/65535 bytes long. // Any keys that do not follow these rules are invalid. gguf_string_t key; // The type of the value. // Must be one of the `gguf_metadata_value_type` values. gguf_metadata_value_type value_type; // The value. gguf_metadata_value_t value; }; struct gguf_header_t { // Magic number to announce that this is a GGUF file. // Must be `GGUF` at the byte level: `0x47` `0x47` `0x55` `0x46`. // Your executor might do little-endian byte order, so it might be // check for 0x46554747 and letting the endianness cancel out. // Consider being *very* explicit about the byte order here. uint32_t magic; // The version of the format implemented. // Must be `3` for version described in this spec, which introduces big-endian support. // // This version should only be increased for structural changes to the format. // Changes that do not affect the structure of the file should instead update the metadata // to signify the change. uint32_t version; // The number of tensors in the file. // This is explicit, instead of being included in the metadata, to ensure it is always present // for loading the tensors. uint64_t tensor_count; // The number of metadata key-value pairs. uint64_t metadata_kv_count; // The metadata key-value pairs. gguf_metadata_kv_t metadata_kv[metadata_kv_count]; }; uint64_t align_offset(uint64_t offset) { return offset + (ALIGNMENT - (offset % ALIGNMENT)) % ALIGNMENT; } struct gguf_tensor_info_t { // The name of the tensor. It is a standard GGUF string, with the caveat that // it must be at most 64 bytes long. gguf_string_t name; // The number of dimensions in the tensor. // Currently at most 4, but this may change in the future. uint32_t n_dimensions; // The dimensions of the tensor. uint64_t dimensions[n_dimensions]; // The type of the tensor. ggml_type type; // The offset of the tensor's data in this file in bytes. // // This offset is relative to `tensor_data`, not to the start // of the file, to make it easier for writers to write the file. // Readers should consider exposing this offset relative to the // file to make it easier to read the data. // // Must be a multiple of `ALIGNMENT`. That is, `align_offset(offset) == offset`. uint64_t offset; }; struct gguf_file_t { // The header of the file. gguf_header_t header; // Tensor infos, which can be used to locate the tensor data. gguf_tensor_info_t tensor_infos[header.tensor_count]; // Padding to the nearest multiple of `ALIGNMENT`. // // That is, if `sizeof(header) + sizeof(tensor_infos)` is not a multiple of `ALIGNMENT`, // this padding is added to make it so. // // This can be calculated as `align_offset(position) - position`, where `position` is // the position of the end of `tensor_infos` (i.e. `sizeof(header) + sizeof(tensor_infos)`). uint8_t _padding[]; // Tensor data. // // This is arbitrary binary data corresponding to the weights of the model. This data should be close // or identical to the data in the original model file, but may be different due to quantization or // other optimizations for inference. Any such deviations should be recorded in the metadata or as // part of the architecture definition. // // Each tensor's data must be stored within this array, and located through its `tensor_infos` entry. // The offset of each tensor's data must be a multiple of `ALIGNMENT`, and the space between tensors // should be padded to `ALIGNMENT` bytes. uint8_t tensor_data[]; }; ``` ## Standardized key-value pairs The following key-value pairs are standardized. This list may grow in the future as more use cases are discovered. Where possible, names are shared with the original model definitions to make it easier to map between the two. Not all of these are required, but they are all recommended. Keys that are required are bolded. For omitted pairs, the reader should assume that the value is unknown and either default or error as appropriate. The community can develop their own key-value pairs to carry additional data. However, these should be namespaced with the relevant community name to avoid collisions. For example, the `rustformers` community might use `rustformers.` as a prefix for all of their keys. If a particular community key is widely used, it may be promoted to a standardized key. By convention, most counts/lengths/etc are `uint64` unless otherwise specified. This is to allow for larger models to be supported in the future. Some models may use `uint32` for their values; it is recommended that readers support both. ### General #### Required - **`general.architecture: string`**: describes what architecture this model implements. All lowercase ASCII, with only `[a-z0-9]+` characters allowed. Known values include: - `llama` - `mpt` - `gptneox` - `gptj` - `gpt2` - `bloom` - `falcon` - `mamba` - `rwkv` - **`general.quantization_version: uint32`**: The version of the quantization format. Not required if the model is not quantized (i.e. no tensors are quantized). If any tensors are quantized, this _must_ be present. This is separate to the quantization scheme of the tensors itself; the quantization version may change without changing the scheme's name (e.g. the quantization scheme is Q5_K, and the quantization version is 4). - **`general.alignment: uint32`**: the global alignment to use, as described above. This can vary to allow for different alignment schemes, but it must be a multiple of 8. Some writers may not write the alignment. If the alignment is **not** specified, assume it is `32`. #### General metadata - `general.name: string`: The name of the model. This should be a human-readable name that can be used to identify the model. It should be unique within the community that the model is defined in. - `general.author: string`: The author of the model. - `general.version: string`: The version of the model. - `general.organization: string`: The organization of the model. - `general.basename: string`: The base model name / architecture of the model - `general.finetune: string`: What has the base model been optimized toward. - `general.description: string`: free-form description of the model including anything that isn't covered by the other fields - `general.quantized_by: string`: The name of the individual who quantized the model - `general.size_label: string`: Size class of the model, such as number of weights and experts. (Useful for leader boards) - `general.license: string`: License of the model, expressed as a [SPDX license expression](https://spdx.github.io/spdx-spec/v2-draft/SPDX-license-expressions/) (e.g. `"MIT OR Apache-2.0`). Do not include any other information, such as the license text or the URL to the license. - `general.license.name: string`: Human friendly license name - `general.license.link: string`: URL to the license. - `general.url: string`: URL to the model's homepage. This can be a GitHub repo, a paper, etc. - `general.doi: string`: Digital Object Identifier (DOI) https://www.doi.org/ - `general.uuid: string`: [Universally unique identifier](https://en.wikipedia.org/wiki/Universally_unique_identifier) - `general.repo_url: string`: URL to the model's repository such as a GitHub repo or HuggingFace repo - `general.tags: string[]`: List of tags that can be used as search terms for a search engine or social media - `general.languages: string[]`: What languages can the model speak. Encoded as [ISO 639](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) two letter codes - `general.datasets: string[]`: Links or references to datasets that the model was trained upon - `general.file_type: uint32`: An enumerated value describing the type of the majority of the tensors in the file. Optional; can be inferred from the tensor types. - `ALL_F32 = 0` - `MOSTLY_F16 = 1` - `MOSTLY_Q4_0 = 2` - `MOSTLY_Q4_1 = 3` - `MOSTLY_Q4_1_SOME_F16 = 4` - `MOSTLY_Q4_2 = 5` (support removed) - `MOSTLY_Q4_3 = 6` (support removed) - `MOSTLY_Q8_0 = 7` - `MOSTLY_Q5_0 = 8` - `MOSTLY_Q5_1 = 9` - `MOSTLY_Q2_K = 10` - `MOSTLY_Q3_K_S = 11` - `MOSTLY_Q3_K_M = 12` - `MOSTLY_Q3_K_L = 13` - `MOSTLY_Q4_K_S = 14` - `MOSTLY_Q4_K_M = 15` - `MOSTLY_Q5_K_S = 16` - `MOSTLY_Q5_K_M = 17` - `MOSTLY_Q6_K = 18` #### Source metadata Information about where this model came from. This is useful for tracking the provenance of the model, and for finding the original source if the model is modified. For a model that was converted from GGML, for example, these keys would point to the model that was converted from. - `general.source.url: string`: URL to the source of the model's homepage. This can be a GitHub repo, a paper, etc. - `general.source.doi: string`: Source Digital Object Identifier (DOI) https://www.doi.org/ - `general.source.uuid: string`: Source [Universally unique identifier](https://en.wikipedia.org/wiki/Universally_unique_identifier) - `general.source.repo_url: string`: URL to the source of the model's repository such as a GitHub repo or HuggingFace repo - `general.base_model.count: uint32`: Number of parent models - `general.base_model.{id}.name: string`: The name of the parent model. - `general.base_model.{id}.author: string`: The author of the parent model. - `general.base_model.{id}.version: string`: The version of the parent model. - `general.base_model.{id}.organization: string`: The organization of the parent model. - `general.base_model.{id}.url: string`: URL to the source of the parent model's homepage. This can be a GitHub repo, a paper, etc. - `general.base_model.{id}.doi: string`: Parent Digital Object Identifier (DOI) https://www.doi.org/ - `general.base_model.{id}.uuid: string`: Parent [Universally unique identifier](https://en.wikipedia.org/wiki/Universally_unique_identifier) - `general.base_model.{id}.repo_url: string`: URL to the source of the parent model's repository such as a GitHub repo or HuggingFace repo ### LLM In the following, `[llm]` is used to fill in for the name of a specific LLM architecture. For example, `llama` for LLaMA, `mpt` for MPT, etc. If mentioned in an architecture's section, it is required for that architecture, but not all keys are required for all architectures. Consult the relevant section for more information. - `[llm].context_length: uint64`: Also known as `n_ctx`. length of the context (in tokens) that the model was trained on. For most architectures, this is the hard limit on the length of the input. Architectures, like RWKV, that are not reliant on transformer-style attention may be able to handle larger inputs, but this is not guaranteed. - `[llm].embedding_length: uint64`: Also known as `n_embd`. Embedding layer size. - `[llm].block_count: uint64`: The number of blocks of attention+feed-forward layers (i.e. the bulk of the LLM). Does not include the input or embedding layers. - `[llm].feed_forward_length: uint64`: Also known as `n_ff`. The length of the feed-forward layer. - `[llm].use_parallel_residual: bool`: Whether or not the parallel residual logic should be used. - `[llm].tensor_data_layout: string`: When a model is converted to GGUF, tensors may be rearranged to improve performance. This key describes the layout of the tensor data. This is not required; if not present, it is assumed to be `reference`. - `reference`: tensors are laid out in the same order as the original model - further options can be found for each architecture in their respective sections - `[llm].expert_count: uint32`: Number of experts in MoE models (optional for non-MoE arches). - `[llm].expert_used_count: uint32`: Number of experts used during each token token evaluation (optional for non-MoE arches). #### Attention - `[llm].attention.head_count: uint64`: Also known as `n_head`. Number of attention heads. - `[llm].attention.head_count_kv: uint64`: The number of heads per group used in Grouped-Query-Attention. If not present or if present and equal to `[llm].attention.head_count`, the model does not use GQA. - `[llm].attention.max_alibi_bias: float32`: The maximum bias to use for ALiBI. - `[llm].attention.clamp_kqv: float32`: Value (`C`) to clamp the values of the `Q`, `K`, and `V` tensors between (`[-C, C]`). - `[llm].attention.layer_norm_epsilon: float32`: Layer normalization epsilon. - `[llm].attention.layer_norm_rms_epsilon: float32`: Layer RMS normalization epsilon. - `[llm].attention.key_length: uint32`: The optional size of a key head, $d_k$. If not specified, it will be `n_embd / n_head`. - `[llm].attention.value_length: uint32`: The optional size of a value head, $d_v$. If not specified, it will be `n_embd / n_head`. #### RoPE - `[llm].rope.dimension_count: uint64`: The number of rotary dimensions for RoPE. - `[llm].rope.freq_base: float32`: The base frequency for RoPE. ##### Scaling The following keys describe RoPE scaling parameters: - `[llm].rope.scaling.type: string`: Can be `none`, `linear`, or `yarn`. - `[llm].rope.scaling.factor: float32`: A scale factor for RoPE to adjust the context length. - `[llm].rope.scaling.original_context_length: uint32_t`: The original context length of the base model. - `[llm].rope.scaling.finetuned: bool`: True if model has been finetuned with RoPE scaling. Note that older models may not have these keys, and may instead use the following key: - `[llm].rope.scale_linear: float32`: A linear scale factor for RoPE to adjust the context length. It is recommended that models use the newer keys if possible, as they are more flexible and allow for more complex scaling schemes. Executors will need to support both indefinitely. #### SSM - `[llm].ssm.conv_kernel: uint32`: The size of the rolling/shift state. - `[llm].ssm.inner_size: uint32`: The embedding size of the states. - `[llm].ssm.state_size: uint32`: The size of the recurrent state. - `[llm].ssm.time_step_rank: uint32`: The rank of time steps. #### Models The following sections describe the metadata for each model architecture. Each key specified _must_ be present. ##### LLaMA - `llama.context_length` - `llama.embedding_length` - `llama.block_count` - `llama.feed_forward_length` - `llama.rope.dimension_count` - `llama.attention.head_count` - `llama.attention.layer_norm_rms_epsilon` ###### Optional - `llama.rope.scale` - `llama.attention.head_count_kv` - `llama.tensor_data_layout`: - `Meta AI original pth`: ```python def permute(weights: NDArray, n_head: int) -> NDArray: return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) .swapaxes(1, 2) .reshape(weights.shape)) ``` - `llama.expert_count` - `llama.expert_used_count` ##### MPT - `mpt.context_length` - `mpt.embedding_length` - `mpt.block_count` - `mpt.attention.head_count` - `mpt.attention.alibi_bias_max` - `mpt.attention.clip_kqv` - `mpt.attention.layer_norm_epsilon` ##### GPT-NeoX - `gptneox.context_length` - `gptneox.embedding_length` - `gptneox.block_count` - `gptneox.use_parallel_residual` - `gptneox.rope.dimension_count` - `gptneox.attention.head_count` - `gptneox.attention.layer_norm_epsilon` ###### Optional - `gptneox.rope.scale` ##### GPT-J - `gptj.context_length` - `gptj.embedding_length` - `gptj.block_count` - `gptj.rope.dimension_count` - `gptj.attention.head_count` - `gptj.attention.layer_norm_epsilon` ###### Optional - `gptj.rope.scale` ##### GPT-2 - `gpt2.context_length` - `gpt2.embedding_length` - `gpt2.block_count` - `gpt2.attention.head_count` - `gpt2.attention.layer_norm_epsilon` ##### BLOOM - `bloom.context_length` - `bloom.embedding_length` - `bloom.block_count` - `bloom.feed_forward_length` - `bloom.attention.head_count` - `bloom.attention.layer_norm_epsilon` ##### Falcon - `falcon.context_length` - `falcon.embedding_length` - `falcon.block_count` - `falcon.attention.head_count` - `falcon.attention.head_count_kv` - `falcon.attention.use_norm` - `falcon.attention.layer_norm_epsilon` ###### Optional - `falcon.tensor_data_layout`: - `jploski` (author of the original GGML implementation of Falcon): ```python # The original query_key_value tensor contains n_head_kv "kv groups", # each consisting of n_head/n_head_kv query weights followed by one key # and one value weight (shared by all query heads in the kv group). # This layout makes it a big pain to work with in GGML. # So we rearrange them here,, so that we have n_head query weights # followed by n_head_kv key weights followed by n_head_kv value weights, # in contiguous fashion. if "query_key_value" in src: qkv = model[src].view( n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head) k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) model[src] = torch.cat((q,k,v)).reshape_as(model[src]) ``` ##### Mamba - `mamba.context_length` - `mamba.embedding_length` - `mamba.block_count` - `mamba.ssm.conv_kernel` - `mamba.ssm.inner_size` - `mamba.ssm.state_size` - `mamba.ssm.time_step_rank` - `mamba.attention.layer_norm_rms_epsilon` ##### RWKV The vocabulary size is the same as the number of rows in the `head` matrix. - `rwkv.architecture_version: uint32`: The only allowed value currently is 4. Version 5 is expected to appear some time in the future. - `rwkv.context_length: uint64`: Length of the context used during training or fine-tuning. RWKV is able to handle larger context than this limit, but the output quality may suffer. - `rwkv.block_count: uint64` - `rwkv.embedding_length: uint64` - `rwkv.feed_forward_length: uint64` ##### Whisper Keys that do not have types defined should be assumed to share definitions with `llm.` keys. (For example, `whisper.context_length` is equivalent to `llm.context_length`.) This is because they are both transformer models. - `whisper.encoder.context_length` - `whisper.encoder.embedding_length` - `whisper.encoder.block_count` - `whisper.encoder.mels_count: uint64` - `whisper.encoder.attention.head_count` - `whisper.decoder.context_length` - `whisper.decoder.embedding_length` - `whisper.decoder.block_count` - `whisper.decoder.attention.head_count` #### Prompting **TODO**: Include prompt format, and/or metadata about how it should be used (instruction, conversation, autocomplete, etc). ### LoRA **TODO**: Figure out what metadata is needed for LoRA. Probably desired features: - match an existing model exactly, so that it can't be misapplied - be marked as a LoRA so executors won't try to run it by itself Should this be an architecture, or should it share the details of the original model with additional fields to mark it as a LoRA? ### Tokenizer The following keys are used to describe the tokenizer of the model. It is recommended that model authors support as many of these as possible, as it will allow for better tokenization quality with supported executors. #### GGML GGML supports an embedded vocabulary that enables inference of the model, but implementations of tokenization using this vocabulary (i.e. `llama.cpp`'s tokenizer) may have lower accuracy than the original tokenizer used for the model. When a more accurate tokenizer is available and supported, it should be used instead. It is not guaranteed to be standardized across models, and may change in the future. It is recommended that model authors use a more standardized tokenizer if possible. - `tokenizer.ggml.model: string`: The name of the tokenizer model. - `llama`: Llama style SentencePiece (tokens and scores extracted from HF `tokenizer.model`) - `replit`: Replit style SentencePiece (tokens and scores extracted from HF `spiece.model`) - `gpt2`: GPT-2 / GPT-NeoX style BPE (tokens extracted from HF `tokenizer.json`) - `rwkv`: RWKV tokenizer - `tokenizer.ggml.tokens: array[string]`: A list of tokens indexed by the token ID used by the model. - `tokenizer.ggml.scores: array[float32]`: If present, the score/probability of each token. If not present, all tokens are assumed to have equal probability. If present, it must have the same length and index as `tokens`. - `tokenizer.ggml.token_type: array[int32]`: The token type (1=normal, 2=unknown, 3=control, 4=user defined, 5=unused, 6=byte). If present, it must have the same length and index as `tokens`. - `tokenizer.ggml.merges: array[string]`: If present, the merges of the tokenizer. If not present, the tokens are assumed to be atomic. - `tokenizer.ggml.added_tokens: array[string]`: If present, tokens that were added after training. ##### Special tokens - `tokenizer.ggml.bos_token_id: uint32`: Beginning of sequence marker - `tokenizer.ggml.eos_token_id: uint32`: End of sequence marker - `tokenizer.ggml.unknown_token_id: uint32`: Unknown token - `tokenizer.ggml.separator_token_id: uint32`: Separator token - `tokenizer.ggml.padding_token_id: uint32`: Padding token #### Hugging Face Hugging Face maintains their own `tokenizers` library that supports a wide variety of tokenizers. If your executor uses this library, it may be able to use the model's tokenizer directly. - `tokenizer.huggingface.json: string`: the entirety of the HF `tokenizer.json` for a given model (e.g. ). Included for compatibility with executors that support HF tokenizers directly. #### Other Other tokenizers may be used, but are not necessarily standardized. They may be executor-specific. They will be documented here as they are discovered/further developed. - `tokenizer.rwkv.world: string`: a RWKV World tokenizer, like [this](https://github.com/BlinkDL/ChatRWKV/blob/main/tokenizer/rwkv_vocab_v20230424.txt). This text file should be included verbatim. - `tokenizer.chat_template : string`: a Jinja template that specifies the input format expected by the model. For more details see: ### Computation graph This is a future extension and still needs to be discussed, and may necessitate a new GGUF version. At the time of writing, the primary blocker is the stabilization of the computation graph format. A sample computation graph of GGML nodes could be included in the model itself, allowing an executor to run the model without providing its own implementation of the architecture. This would allow for a more consistent experience across executors, and would allow for more complex architectures to be supported without requiring the executor to implement them. ## Standardized tensor names To minimize complexity and maximize compatibility, it is recommended that models using the transformer architecture use the following naming convention for their tensors: ### Base layers `AA.weight` `AA.bias` where `AA` can be: - `token_embd`: Token embedding layer - `pos_embd`: Position embedding layer - `output_norm`: Output normalization layer - `output`: Output layer ### Attention and feed-forward layer blocks `blk.N.BB.weight` `blk.N.BB.bias` where N signifies the block number a layer belongs to, and where `BB` could be: - `attn_norm`: Attention normalization layer - `attn_norm_2`: Attention normalization layer - `attn_qkv`: Attention query-key-value layer - `attn_q`: Attention query layer - `attn_k`: Attention key layer - `attn_v`: Attention value layer - `attn_output`: Attention output layer - `ffn_norm`: Feed-forward network normalization layer - `ffn_up`: Feed-forward network "up" layer - `ffn_gate`: Feed-forward network "gate" layer - `ffn_down`: Feed-forward network "down" layer - `ffn_gate_inp`: Expert-routing layer for the Feed-forward network in MoE models - `ffn_gate_exp`: Feed-forward network "gate" layer per expert in MoE models - `ffn_down_exp`: Feed-forward network "down" layer per expert in MoE models - `ffn_up_exp`: Feed-forward network "up" layer per expert in MoE models - `ssm_in`: State space model input projections layer - `ssm_conv1d`: State space model rolling/shift layer - `ssm_x`: State space model selective parametrization layer - `ssm_a`: State space model state compression layer - `ssm_d`: State space model skip connection layer - `ssm_dt`: State space model time step layer - `ssm_out`: State space model output projection layer ## Version History This document is actively updated to describe the current state of the metadata, and these changes are not tracked outside of the commits. However, the format _itself_ has changed. The following sections describe the changes to the format itself. ### v3 Adds big-endian support. ### v2 Most countable values (lengths, etc) were changed from `uint32` to `uint64` to allow for larger models to be supported in the future. ### v1 Initial version. ## Historical State of Affairs The following information is provided for context, but is not necessary to understand the rest of this document. ### Overview At present, there are three GGML file formats floating around for LLMs: - **GGML** (unversioned): baseline format, with no versioning or alignment. - **GGMF** (versioned): the same as GGML, but with versioning. Only one version exists. - **GGJT**: Aligns the tensors to allow for use with `mmap`, which requires alignment. v1, v2 and v3 are identical, but the latter versions use a different quantization scheme that is incompatible with previous versions. GGML is primarily used by the examples in `ggml`, while GGJT is used by `llama.cpp` models. Other executors may use any of the three formats, but this is not 'officially' supported. These formats share the same fundamental structure: - a magic number with an optional version number - model-specific hyperparameters, including - metadata about the model, such as the number of layers, the number of heads, etc. - a `ftype` that describes the type of the majority of the tensors, - for GGML files, the quantization version is encoded in the `ftype` divided by 1000 - an embedded vocabulary, which is a list of strings with length prepended. The GGMF/GGJT formats embed a float32 score next to the strings. - finally, a list of tensors with their length-prepended name, type, and (aligned, in the case of GGJT) tensor data Notably, this structure does not identify what model architecture the model belongs to, nor does it offer any flexibility for changing the structure of the hyperparameters. This means that the only way to add new hyperparameters is to add them to the end of the list, which is a breaking change for existing models. ### Drawbacks Unfortunately, over the last few months, there are a few issues that have become apparent with the existing models: - There's no way to identify which model architecture a given model is for, because that information isn't present - Similarly, existing programs cannot intelligently fail upon encountering new architectures - Adding or removing any new hyperparameters is a breaking change, which is impossible for a reader to detect without using heuristics - Each model architecture requires its own conversion script to their architecture's variant of GGML - Maintaining backwards compatibility without breaking the structure of the format requires clever tricks, like packing the quantization version into the ftype, which are not guaranteed to be picked up by readers/writers, and are not consistent between the two formats ### Why not other formats? There are a few other formats that could be used, but issues include: - requiring additional dependencies to load or save the model, which is complicated in a C environment - limited or no support for 4-bit quantization - existing cultural expectations (e.g. whether or not the model is a directory or a file) - lack of support for embedded vocabularies - lack of control over direction of future development Ultimately, it is likely that GGUF will remain necessary for the foreseeable future, and it is better to have a single format that is well-documented and supported by all executors than to contort an existing format to fit the needs of GGML. ggml-org-ggml-3678254/examples/000077500000000000000000000000001512524704700162025ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/CMakeLists.txt000066400000000000000000000015471512524704700207510ustar00rootroot00000000000000if (GGML_ALL_WARNINGS) if (NOT MSVC) set(cxx_flags # TODO(marella): Add other warnings. -Wpedantic -Wunused-variable -Wno-unused-function -Wno-multichar ) add_compile_options("$<$:${cxx_flags}>") endif() endif() add_library(common STATIC common.cpp) target_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_library(common-ggml STATIC common-ggml.cpp) target_link_libraries(common-ggml PRIVATE ggml) target_include_directories(common-ggml PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_subdirectory(yolo) if (NOT GGML_BACKEND_DL) add_subdirectory(gpt-2) add_subdirectory(gpt-j) add_subdirectory(mnist) add_subdirectory(sam) add_subdirectory(simple) add_subdirectory(magika) endif() if (GGML_METAL) add_subdirectory(perf-metal) endif() ggml-org-ggml-3678254/examples/common-ggml.cpp000066400000000000000000000206531512524704700211300ustar00rootroot00000000000000#include "common-ggml.h" #include #include static const std::map GGML_FTYPE_MAP = { {"q4_0", GGML_FTYPE_MOSTLY_Q4_0}, {"q4_1", GGML_FTYPE_MOSTLY_Q4_1}, {"q5_0", GGML_FTYPE_MOSTLY_Q5_0}, {"q5_1", GGML_FTYPE_MOSTLY_Q5_1}, {"q8_0", GGML_FTYPE_MOSTLY_Q8_0}, {"q2_k", GGML_FTYPE_MOSTLY_Q2_K}, {"q3_k", GGML_FTYPE_MOSTLY_Q3_K}, {"q4_k", GGML_FTYPE_MOSTLY_Q4_K}, {"q5_k", GGML_FTYPE_MOSTLY_Q5_K}, {"q6_k", GGML_FTYPE_MOSTLY_Q6_K}, }; void ggml_print_ftypes(FILE * fp) { for (auto it = GGML_FTYPE_MAP.begin(); it != GGML_FTYPE_MAP.end(); it++) { fprintf(fp, " type = \"%s\" or %d\n", it->first.c_str(), it->second); } } enum ggml_ftype ggml_parse_ftype(const char * str) { enum ggml_ftype ftype; if (str[0] == 'q') { const auto it = GGML_FTYPE_MAP.find(str); if (it == GGML_FTYPE_MAP.end()) { fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, str); return GGML_FTYPE_UNKNOWN; } ftype = it->second; } else { ftype = (enum ggml_ftype) atoi(str); } return ftype; } bool ggml_common_quantize_0( std::ifstream & finp, std::ofstream & fout, const ggml_ftype ftype, const std::vector & to_quant, const std::vector & to_skip) { ggml_type qtype = GGML_TYPE_F32; switch (ftype) { case GGML_FTYPE_MOSTLY_Q4_0: qtype = GGML_TYPE_Q4_0; break; case GGML_FTYPE_MOSTLY_Q4_1: qtype = GGML_TYPE_Q4_1; break; case GGML_FTYPE_MOSTLY_Q5_0: qtype = GGML_TYPE_Q5_0; break; case GGML_FTYPE_MOSTLY_Q5_1: qtype = GGML_TYPE_Q5_1; break; case GGML_FTYPE_MOSTLY_Q8_0: qtype = GGML_TYPE_Q8_0; break; case GGML_FTYPE_MOSTLY_Q2_K: qtype = GGML_TYPE_Q2_K; break; case GGML_FTYPE_MOSTLY_Q3_K: qtype = GGML_TYPE_Q3_K; break; case GGML_FTYPE_MOSTLY_Q4_K: qtype = GGML_TYPE_Q4_K; break; case GGML_FTYPE_MOSTLY_Q5_K: qtype = GGML_TYPE_Q5_K; break; case GGML_FTYPE_MOSTLY_Q6_K: qtype = GGML_TYPE_Q6_K; break; case GGML_FTYPE_UNKNOWN: case GGML_FTYPE_ALL_F32: case GGML_FTYPE_MOSTLY_F16: case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: case GGML_FTYPE_MOSTLY_IQ2_XXS: case GGML_FTYPE_MOSTLY_IQ2_XS: case GGML_FTYPE_MOSTLY_IQ2_S: case GGML_FTYPE_MOSTLY_IQ3_XXS: case GGML_FTYPE_MOSTLY_IQ3_S: case GGML_FTYPE_MOSTLY_IQ1_S: case GGML_FTYPE_MOSTLY_IQ4_NL: case GGML_FTYPE_MOSTLY_IQ4_XS: case GGML_FTYPE_MOSTLY_IQ1_M: case GGML_FTYPE_MOSTLY_BF16: case GGML_FTYPE_MOSTLY_MXFP4: { fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype); return false; } }; if (!ggml_is_quantized(qtype)) { fprintf(stderr, "%s: invalid quantization type %d (%s)\n", __func__, qtype, ggml_type_name(qtype)); return false; } size_t total_size_org = 0; size_t total_size_new = 0; std::vector work; std::vector data_u8; std::vector data_f16; std::vector data_f32; while (true) { int32_t n_dims; int32_t length; int32_t ttype; finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); finp.read(reinterpret_cast(&length), sizeof(length)); finp.read(reinterpret_cast(&ttype), sizeof(ttype)); if (finp.eof()) { break; } int32_t nelements = 1; int32_t ne[4] = { 1, 1, 1, 1 }; for (int i = 0; i < n_dims; ++i) { finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); finp.read (&name[0], length); printf("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ggml_type_name((ggml_type) ttype)); bool quantize = false; // check if we should quantize this tensor for (const auto & s : to_quant) { if (std::regex_match(name, std::regex(s))) { quantize = true; break; } } // check if we should skip this tensor for (const auto & s : to_skip) { if (std::regex_match(name, std::regex(s))) { quantize = false; break; } } // quantize only 2D tensors quantize &= (n_dims == 2); if (quantize) { if (ttype != GGML_TYPE_F32 && ttype != GGML_TYPE_F16) { fprintf(stderr, "%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); return false; } if (ttype == GGML_TYPE_F16) { data_f16.resize(nelements); finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); data_f32.resize(nelements); for (int i = 0; i < nelements; ++i) { data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); } } else { data_f32.resize(nelements); finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); } ttype = qtype; } else { const int bpe = (ttype == 0) ? sizeof(float) : sizeof(uint16_t); data_u8.resize(nelements*bpe); finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); } fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); fout.write(reinterpret_cast(&length), sizeof(length)); fout.write(reinterpret_cast(&ttype), sizeof(ttype)); for (int i = 0; i < n_dims; ++i) { fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); } fout.write(&name[0], length); if (quantize) { work.resize(nelements); // for quantization size_t cur_size = 0; switch ((ggml_type) ttype) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: { cur_size = ggml_quantize_chunk((ggml_type) ttype, data_f32.data(), work.data(), 0, nelements/ne[0], ne[0], nullptr); } break; case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_I64: case GGML_TYPE_F64: case GGML_TYPE_Q8_1: case GGML_TYPE_Q8_K: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ1_M: case GGML_TYPE_BF16: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_MXFP4: case GGML_TYPE_COUNT: { fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); return false; } } fout.write(reinterpret_cast(work.data()), cur_size); total_size_new += cur_size; printf("size = %8.2f MB -> %8.2f MB\n", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); } else { printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); total_size_new += data_u8.size(); } total_size_org += nelements * sizeof(float); } printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, ftype, ggml_type_name(qtype)); return true; } ggml-org-ggml-3678254/examples/common-ggml.h000066400000000000000000000006321512524704700205700ustar00rootroot00000000000000#pragma once #include "ggml.h" #include #include #include enum ggml_ftype ggml_parse_ftype(const char * str); void ggml_print_ftypes(FILE * fp = stderr); bool ggml_common_quantize_0( std::ifstream & finp, std::ofstream & fout, const ggml_ftype ftype, const std::vector & to_quant, const std::vector & to_skip); ggml-org-ggml-3678254/examples/common.cpp000066400000000000000000000553531512524704700202110ustar00rootroot00000000000000#define _USE_MATH_DEFINES // for M_PI #include "common.h" #include #include #include #include #include #include #include // Function to check if the next argument exists static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) { if (i + 1 < argc && argv[i + 1][0] != '-') { return argv[++i]; } else { fprintf(stderr, "error: %s requires one argument.\n", flag.c_str()); gpt_print_usage(argc, argv, params); exit(0); } } bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { for (int i = 1; i < argc; i++) { std::string arg = argv[i]; if (arg == "-s" || arg == "--seed") { params.seed = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-p" || arg == "--prompt") { params.prompt = get_next_arg(i, argc, argv, arg, params); } else if (arg == "-n" || arg == "--n_predict") { params.n_predict = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-np" || arg == "--n_parallel") { params.n_parallel = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--top_k") { params.top_k = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--top_p") { params.top_p = std::stof(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--temp") { params.temp = std::stof(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--repeat-last-n") { params.repeat_last_n = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--repeat-penalty") { params.repeat_penalty = std::stof(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-b" || arg == "--batch_size") { params.n_batch= std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-c" || arg == "--context") { params.n_ctx= std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") { params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "--ignore-eos") { params.ignore_eos = true; } else if (arg == "-m" || arg == "--model") { params.model = get_next_arg(i, argc, argv, arg, params); } else if (arg == "-i" || arg == "--interactive") { params.interactive = true; } else if (arg == "-ip" || arg == "--interactive-port") { params.interactive = true; params.interactive_port = std::stoi(get_next_arg(i, argc, argv, arg, params)); } else if (arg == "-h" || arg == "--help") { gpt_print_usage(argc, argv, params); exit(0); } else if (arg == "-f" || arg == "--file") { get_next_arg(i, argc, argv, arg, params); std::ifstream file(argv[i]); if (!file) { fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); break; } std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.prompt)); if (params.prompt.back() == '\n') { params.prompt.pop_back(); } } else if (arg == "-tt" || arg == "--token_test") { params.token_test = get_next_arg(i, argc, argv, arg, params); } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); gpt_print_usage(argc, argv, params); exit(0); } } return true; } void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); fprintf(stderr, " prompt to start generation with (default: random)\n"); fprintf(stderr, " -f FNAME, --file FNAME\n"); fprintf(stderr, " load prompt from a file\n"); fprintf(stderr, " -tt TOKEN_TEST, --token_test TOKEN_TEST\n"); fprintf(stderr, " test tokenization\n"); fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d)\n", params.n_predict); fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k); fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n); fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " -c N, --context N context / KV cache size (default: %d)\n", params.n_ctx); fprintf(stderr, " --ignore-eos ignore EOS token during generation\n"); fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); fprintf(stderr, "\n"); } std::string gpt_random_prompt(std::mt19937 & rng) { const int r = rng() % 10; switch (r) { case 0: return "So"; case 1: return "Once upon a time"; case 2: return "When"; case 3: return "The"; case 4: return "After"; case 5: return "If"; case 6: return "import"; case 7: return "He"; case 8: return "She"; case 9: return "They"; } return "The"; } std::string trim(const std::string & s) { std::regex e("^\\s+|\\s+$"); return std::regex_replace(s, e, ""); } std::string replace(const std::string & s, const std::string & from, const std::string & to) { std::string result = s; size_t pos = 0; while ((pos = result.find(from, pos)) != std::string::npos) { result.replace(pos, from.length(), to); pos += to.length(); } return result; } void gpt_vocab::add_special_token(const std::string & token) { special_tokens.push_back(token); } std::map json_parse(const std::string & fname) { std::map result; // read file into string std::string json; { std::ifstream ifs(fname); if (!ifs) { fprintf(stderr, "Failed to open %s\n", fname.c_str()); exit(1); } json = std::string((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); } if (json[0] != '{') { return result; } // parse json { bool has_key = false; bool in_token = false; std::string str_key = ""; std::string str_val = ""; int n = json.size(); for (int i = 1; i < n; ++i) { if (!in_token) { if (json[i] == ' ') continue; if (json[i] == '"') { in_token = true; continue; } } else { if (json[i] == '\\' && i+1 < n) { if (has_key == false) { str_key += json[i]; } else { str_val += json[i]; } ++i; } else if (json[i] == '"') { if (has_key == false) { has_key = true; ++i; while (json[i] == ' ') ++i; ++i; // : while (json[i] == ' ') ++i; if (json[i] != '\"') { while (json[i] != ',' && json[i] != '}') { str_val += json[i++]; } has_key = false; } else { in_token = true; continue; } } else { has_key = false; } str_key = ::replace(str_key, "\\u0120", " " ); // \u0120 -> space str_key = ::replace(str_key, "\\u010a", "\n"); // \u010a -> new line str_key = ::replace(str_key, "\\\"", "\""); // \\\" -> " try { result[str_key] = std::stoi(str_val); } catch (...) { //fprintf(stderr, "%s: ignoring key '%s' with value '%s'\n", fname.c_str(), str_key.c_str(), str_val.c_str()); } str_key = ""; str_val = ""; in_token = false; continue; } if (has_key == false) { str_key += json[i]; } else { str_val += json[i]; } } } } return result; } void gpt_split_words(std::string str, std::vector& words) { const std::string pattern = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)"; const std::regex re(pattern); std::smatch m; while (std::regex_search(str, m, re)) { for (auto x : m) { words.push_back(x); } str = m.suffix(); } } std::vector gpt_tokenize(const gpt_vocab & vocab, const std::string & text) { std::vector words; // first split the text into words { std::string str = text; // Generate the subpattern from the special_tokens vector if it's not empty if (!vocab.special_tokens.empty()) { const std::regex escape(R"([\[\\\^\$\.\|\?\*\+\(\)\{\}])"); std::string special_tokens_subpattern; for (const auto & token : vocab.special_tokens) { if (!special_tokens_subpattern.empty()) { special_tokens_subpattern += "|"; } special_tokens_subpattern += std::regex_replace(token, escape, R"(\$&)"); } std::regex re(special_tokens_subpattern); std::smatch m; // Split the text by special tokens. while (std::regex_search(str, m, re)) { // Split the substrings in-between special tokens into words. gpt_split_words(m.prefix(), words); // Add matched special tokens as words. for (auto x : m) { words.push_back(x); } str = m.suffix(); } // Remaining text without special tokens will be handled below. } gpt_split_words(str, words); } // find the longest token that forms each word in words: std::vector tokens; for (const auto & word : words) { for (int i = 0; i < (int) word.size(); ){ for (int j = word.size() - 1; j >= i; j--){ auto cand = word.substr(i, j-i+1); auto it = vocab.token_to_id.find(cand); if (it != vocab.token_to_id.end()){ // word.substr(i, j-i+1) in vocab tokens.push_back(it->second); i = j + 1; break; } else if (j == i){ // word.substr(i, 1) has no matching fprintf(stderr, "%s: unknown token '%s'\n", __func__, word.substr(i, 1).data()); i++; } } } } return tokens; } static std::vector parse_tokens_from_string(const std::string& input, char delimiter) { std::vector output; std::stringstream ss(input); std::string token; while (std::getline(ss, token, delimiter)) { output.push_back(std::stoi(token)); } return output; } static std::map> extract_tests_from_file(const std::string & fpath_test){ if (fpath_test.empty()){ fprintf(stderr, "%s : No test file found.\n", __func__); return std::map>(); } std::map> tests; auto fin = std::ifstream(fpath_test, std::ios_base::in); const char * delimeter = " => "; const char del_tok = ','; std::string line; while (std::getline(fin, line)) { size_t delimiterPos = line.find(delimeter); if (delimiterPos != std::string::npos) { std::string text = line.substr(0, delimiterPos); std::string s_tokens = line.substr(delimiterPos + std::strlen(delimeter)); tests[text] = parse_tokens_from_string(s_tokens, del_tok); } } return tests; } void test_gpt_tokenizer(gpt_vocab & vocab, const std::string & fpath_test){ std::map> tests = extract_tests_from_file(fpath_test); size_t n_fails = 0; for (const auto & test : tests) { std::vector tokens = gpt_tokenize(vocab, test.first); if (tokens != test.second){ n_fails++; // print out failure cases fprintf(stderr, "%s : failed test: '%s'\n", __func__, test.first.c_str()); fprintf(stderr, "%s : tokens in hf: ", __func__); for (const auto & t : test.second) { fprintf(stderr, "%s(%d), ", vocab.id_to_token[t].c_str(), t); } fprintf(stderr, "\n"); fprintf(stderr, "%s : tokens in ggml: ", __func__); for (const auto & t : tokens) { fprintf(stderr, "%s(%d), ", vocab.id_to_token[t].c_str(), t); } fprintf(stderr, "\n"); } } fprintf(stderr, "%s : %zu tests failed out of %zu tests.\n", __func__, n_fails, tests.size()); } bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab) { printf("%s: loading vocab from '%s'\n", __func__, fname.c_str()); vocab.token_to_id = ::json_parse(fname); for (const auto & kv : vocab.token_to_id) { vocab.id_to_token[kv.second] = kv.first; } printf("%s: vocab size = %d\n", __func__, (int) vocab.token_to_id.size()); // print the vocabulary //for (auto kv : vocab.token_to_id) { // printf("'%s' -> %d\n", kv.first.data(), kv.second); //} return true; } gpt_vocab::id gpt_sample_top_k_top_p( const gpt_vocab & vocab, const float * logits, int top_k, double top_p, double temp, std::mt19937 & rng) { int n_logits = vocab.id_to_token.size(); std::vector> logits_id; logits_id.reserve(n_logits); { const double scale = 1.0/temp; for (int i = 0; i < n_logits; ++i) { logits_id.push_back(std::make_pair(logits[i]*scale, i)); } } // find the top K tokens std::partial_sort( logits_id.begin(), logits_id.begin() + top_k, logits_id.end(), [](const std::pair & a, const std::pair & b) { return a.first > b.first; }); logits_id.resize(top_k); double maxl = -INFINITY; for (const auto & kv : logits_id) { maxl = std::max(maxl, kv.first); } // compute probs for the top K tokens std::vector probs; probs.reserve(logits_id.size()); double sum = 0.0; for (const auto & kv : logits_id) { double p = exp(kv.first - maxl); probs.push_back(p); sum += p; } // normalize the probs for (auto & p : probs) { p /= sum; } if (top_p < 1.0f) { double cumsum = 0.0f; for (int i = 0; i < top_k; i++) { cumsum += probs[i]; if (cumsum >= top_p) { top_k = i + 1; probs.resize(top_k); logits_id.resize(top_k); break; } } cumsum = 1.0/cumsum; for (int i = 0; i < (int) probs.size(); i++) { probs[i] *= cumsum; } } //printf("\n"); //for (int i = 0; i < (int) probs.size(); i++) { // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); //} //exit(0); std::discrete_distribution<> dist(probs.begin(), probs.end()); int idx = dist(rng); return logits_id[idx].second; } gpt_vocab::id gpt_sample_top_k_top_p_repeat( const gpt_vocab & vocab, const float * logits, const int32_t * last_n_tokens_data, size_t last_n_tokens_data_size, int top_k, double top_p, double temp, int repeat_last_n, float repeat_penalty, std::mt19937 & rng) { int n_logits = vocab.id_to_token.size(); const auto * plogits = logits; const auto last_n_tokens = std::vector(last_n_tokens_data, last_n_tokens_data + last_n_tokens_data_size); if (temp <= 0) { // select the token with the highest logit directly float max_logit = plogits[0]; gpt_vocab::id max_id = 0; for (int i = 1; i < n_logits; ++i) { if (plogits[i] > max_logit) { max_logit = plogits[i]; max_id = i; } } return max_id; } std::vector> logits_id; logits_id.reserve(n_logits); { const float scale = 1.0f/temp; for (int i = 0; i < n_logits; ++i) { // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858) // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main if (repeat_last_n > 0 && std::find(last_n_tokens.end()-repeat_last_n, last_n_tokens.end(), i) != last_n_tokens.end()) { // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability if (plogits[i] < 0.0f) { logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i)); } else { logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i)); } } else { logits_id.push_back(std::make_pair(plogits[i]*scale, i)); } } } // find the top K tokens std::partial_sort( logits_id.begin(), logits_id.begin() + top_k, logits_id.end(), [](const std::pair & a, const std::pair & b) { return a.first > b.first; }); logits_id.resize(top_k); double maxl = -INFINITY; for (const auto & kv : logits_id) { maxl = std::max(maxl, kv.first); } // compute probs for the top K tokens std::vector probs; probs.reserve(logits_id.size()); double sum = 0.0; for (const auto & kv : logits_id) { double p = exp(kv.first - maxl); probs.push_back(p); sum += p; } // normalize the probs for (auto & p : probs) { p /= sum; } if (top_p < 1.0f) { double cumsum = 0.0f; for (int i = 0; i < top_k; i++) { cumsum += probs[i]; if (cumsum >= top_p) { top_k = i + 1; probs.resize(top_k); logits_id.resize(top_k); break; } } cumsum = 1.0/cumsum; for (int i = 0; i < (int) probs.size(); i++) { probs[i] *= cumsum; } } // printf("\n"); // for (int i = 0; i < (int) probs.size(); i++) { // for (int i = 0; i < 10; i++) { // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); // } std::discrete_distribution<> dist(probs.begin(), probs.end()); int idx = dist(rng); return logits_id[idx].second; } void high_pass_filter(std::vector & data, float cutoff, float sample_rate) { const float rc = 1.0f / (2.0f * M_PI * cutoff); const float dt = 1.0f / sample_rate; const float alpha = dt / (rc + dt); float y = data[0]; for (size_t i = 1; i < data.size(); i++) { y = alpha * (y + data[i] - data[i - 1]); data[i] = y; } } bool vad_simple(std::vector & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) { const int n_samples = pcmf32.size(); const int n_samples_last = (sample_rate * last_ms) / 1000; if (n_samples_last >= n_samples) { // not enough samples - assume no speech return false; } if (freq_thold > 0.0f) { high_pass_filter(pcmf32, freq_thold, sample_rate); } float energy_all = 0.0f; float energy_last = 0.0f; for (int i = 0; i < n_samples; i++) { energy_all += fabsf(pcmf32[i]); if (i >= n_samples - n_samples_last) { energy_last += fabsf(pcmf32[i]); } } energy_all /= n_samples; energy_last /= n_samples_last; if (verbose) { fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold); } if (energy_last > vad_thold*energy_all) { return false; } return true; } float similarity(const std::string & s0, const std::string & s1) { const size_t len0 = s0.size() + 1; const size_t len1 = s1.size() + 1; std::vector col(len1, 0); std::vector prevCol(len1, 0); for (size_t i = 0; i < len1; i++) { prevCol[i] = i; } for (size_t i = 0; i < len0; i++) { col[0] = i; for (size_t j = 1; j < len1; j++) { col[j] = std::min(std::min(1 + col[j - 1], 1 + prevCol[j]), prevCol[j - 1] + (i > 0 && s0[i - 1] == s1[j - 1] ? 0 : 1)); } col.swap(prevCol); } const float dist = prevCol[len1 - 1]; return 1.0f - (dist / std::max(s0.size(), s1.size())); } bool is_file_exist(const char * filename) { std::ifstream infile(filename); return infile.good(); } ggml-org-ggml-3678254/examples/common.h000066400000000000000000000226351512524704700176530ustar00rootroot00000000000000// Various helper functions and utilities #pragma once #include #include #include #include #include #include #include #include // // GPT CLI argument parsing // struct gpt_params { int32_t seed = -1; // RNG seed int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); int32_t n_predict = 200; // new tokens to predict int32_t n_parallel = 1; // number of parallel streams int32_t n_batch = 32; // batch size for prompt processing int32_t n_ctx = 2048; // context size (this is the KV cache max size) int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU bool ignore_eos = false; // ignore EOS token when generating text // sampling parameters int32_t top_k = 40; float top_p = 0.9f; float temp = 0.9f; int32_t repeat_last_n = 64; float repeat_penalty = 1.00f; std::string model = "models/gpt-2-117M/ggml-model.bin"; // model path std::string prompt = ""; std::string token_test = ""; bool interactive = false; int32_t interactive_port = -1; }; bool gpt_params_parse(int argc, char ** argv, gpt_params & params); void gpt_print_usage(int argc, char ** argv, const gpt_params & params); std::string gpt_random_prompt(std::mt19937 & rng); // // Vocab utils // std::string trim(const std::string & s); std::string replace( const std::string & s, const std::string & from, const std::string & to); struct gpt_vocab { using id = int32_t; using token = std::string; std::map token_to_id; std::map id_to_token; std::vector special_tokens; void add_special_token(const std::string & token); }; // poor-man's JSON parsing std::map json_parse(const std::string & fname); std::string convert_to_utf8(const std::wstring & input); std::wstring convert_to_wstring(const std::string & input); void gpt_split_words(std::string str, std::vector& words); // split text into tokens // // ref: https://github.com/openai/gpt-2/blob/a74da5d99abaaba920de8131d64da2862a8f213b/src/encoder.py#L53 // // Regex (Python): // r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" // // Regex (C++): // R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)" // std::vector gpt_tokenize(const gpt_vocab & vocab, const std::string & text); // test outputs of gpt_tokenize // // - compare with tokens generated by the huggingface tokenizer // - test cases are chosen based on the model's main language (under 'prompt' directory) // - if all sentences are tokenized identically, print 'All tests passed.' // - otherwise, print sentence, huggingface tokens, ggml tokens // void test_gpt_tokenizer(gpt_vocab & vocab, const std::string & fpath_test); // load the tokens from encoder.json bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab); // sample next token given probabilities for each embedding // // - consider only the top K tokens // - from them, consider only the top tokens with cumulative probability > P // // TODO: not sure if this implementation is correct // TODO: temperature is not implemented // gpt_vocab::id gpt_sample_top_k_top_p( const gpt_vocab & vocab, const float * logits, int top_k, double top_p, double temp, std::mt19937 & rng); gpt_vocab::id gpt_sample_top_k_top_p_repeat( const gpt_vocab & vocab, const float * logits, const int32_t * last_n_tokens_data, size_t last_n_tokens_data_size, int top_k, double top_p, double temp, int repeat_last_n, float repeat_penalty, std::mt19937 & rng); // // Audio utils // // Write PCM data into WAV audio file class wav_writer { private: std::ofstream file; uint32_t dataSize = 0; std::string wav_filename; bool write_header(const uint32_t sample_rate, const uint16_t bits_per_sample, const uint16_t channels) { file.write("RIFF", 4); file.write("\0\0\0\0", 4); // Placeholder for file size file.write("WAVE", 4); file.write("fmt ", 4); const uint32_t sub_chunk_size = 16; const uint16_t audio_format = 1; // PCM format const uint32_t byte_rate = sample_rate * channels * bits_per_sample / 8; const uint16_t block_align = channels * bits_per_sample / 8; file.write(reinterpret_cast(&sub_chunk_size), 4); file.write(reinterpret_cast(&audio_format), 2); file.write(reinterpret_cast(&channels), 2); file.write(reinterpret_cast(&sample_rate), 4); file.write(reinterpret_cast(&byte_rate), 4); file.write(reinterpret_cast(&block_align), 2); file.write(reinterpret_cast(&bits_per_sample), 2); file.write("data", 4); file.write("\0\0\0\0", 4); // Placeholder for data size return true; } // It is assumed that PCM data is normalized to a range from -1 to 1 bool write_audio(const float * data, size_t length) { for (size_t i = 0; i < length; ++i) { const int16_t intSample = int16_t(data[i] * 32767); file.write(reinterpret_cast(&intSample), sizeof(int16_t)); dataSize += sizeof(int16_t); } if (file.is_open()) { file.seekp(4, std::ios::beg); uint32_t fileSize = 36 + dataSize; file.write(reinterpret_cast(&fileSize), 4); file.seekp(40, std::ios::beg); file.write(reinterpret_cast(&dataSize), 4); file.seekp(0, std::ios::end); } return true; } bool open_wav(const std::string & filename) { if (filename != wav_filename) { if (file.is_open()) { file.close(); } } if (!file.is_open()) { file.open(filename, std::ios::binary); wav_filename = filename; dataSize = 0; } return file.is_open(); } public: bool open(const std::string & filename, const uint32_t sample_rate, const uint16_t bits_per_sample, const uint16_t channels) { if (open_wav(filename)) { write_header(sample_rate, bits_per_sample, channels); } else { return false; } return true; } bool close() { file.close(); return true; } bool write(const float * data, size_t length) { return write_audio(data, length); } ~wav_writer() { if (file.is_open()) { file.close(); } } }; // Apply a high-pass frequency filter to PCM audio // Suppresses frequencies below cutoff Hz void high_pass_filter( std::vector & data, float cutoff, float sample_rate); // Basic voice activity detection (VAD) using audio energy adaptive threshold bool vad_simple( std::vector & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose); // compute similarity between two strings using Levenshtein distance float similarity(const std::string & s0, const std::string & s1); // // Terminal utils // #define SQR(X) ((X) * (X)) #define UNCUBE(x) x < 48 ? 0 : x < 115 ? 1 : (x - 35) / 40 /** * Quantizes 24-bit RGB to xterm256 code range [16,256). */ static int rgb2xterm256(int r, int g, int b) { unsigned char cube[] = {0, 0137, 0207, 0257, 0327, 0377}; int av, ir, ig, ib, il, qr, qg, qb, ql; av = r * .299 + g * .587 + b * .114 + .5; ql = (il = av > 238 ? 23 : (av - 3) / 10) * 10 + 8; qr = cube[(ir = UNCUBE(r))]; qg = cube[(ig = UNCUBE(g))]; qb = cube[(ib = UNCUBE(b))]; if (SQR(qr - r) + SQR(qg - g) + SQR(qb - b) <= SQR(ql - r) + SQR(ql - g) + SQR(ql - b)) return ir * 36 + ig * 6 + ib + 020; return il + 0350; } static std::string set_xterm256_foreground(int r, int g, int b) { int x = rgb2xterm256(r, g, b); std::ostringstream oss; oss << "\033[38;5;" << x << "m"; return oss.str(); } // Lowest is red, middle is yellow, highest is green. Color scheme from // Paul Tol; it is colorblind friendly https://sronpersonalpages.nl/~pault const std::vector k_colors = { set_xterm256_foreground(220, 5, 12), set_xterm256_foreground(232, 96, 28), set_xterm256_foreground(241, 147, 45), set_xterm256_foreground(246, 193, 65), set_xterm256_foreground(247, 240, 86), set_xterm256_foreground(144, 201, 135), set_xterm256_foreground( 78, 178, 101), }; // ANSI formatting codes static std::string set_inverse() { return "\033[7m"; } static std::string set_underline() { return "\033[4m"; } static std::string set_dim() { return "\033[2m"; } // Style scheme for different confidence levels const std::vector k_styles = { set_inverse(), // Low confidence - inverse (highlighted) set_underline(), // Medium confidence - underlined set_dim(), // High confidence - dim }; // // Other utils // // check if file exists using ifstream bool is_file_exist(const char * filename); ggml-org-ggml-3678254/examples/gpt-2/000077500000000000000000000000001512524704700171335ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/gpt-2/CMakeLists.txt000066400000000000000000000016421512524704700216760ustar00rootroot00000000000000# # gpt-2 set(TEST_TARGET gpt-2-ctx) add_executable(${TEST_TARGET} main-ctx.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) set(TEST_TARGET gpt-2-alloc) add_executable(${TEST_TARGET} main-alloc.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) set(TEST_TARGET gpt-2-backend) add_executable(${TEST_TARGET} main-backend.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) set(TEST_TARGET gpt-2-sched) add_executable(${TEST_TARGET} main-sched.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) # # gpt-2-quantize set(TEST_TARGET gpt-2-quantize) add_executable(${TEST_TARGET} quantize.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) # # gpt-2-batched set(TEST_TARGET gpt-2-batched) add_executable(${TEST_TARGET} main-batched.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) ggml-org-ggml-3678254/examples/gpt-2/README.md000066400000000000000000000211331512524704700204120ustar00rootroot00000000000000# gpt-2 This is a C++ example running GPT-2 inference using the [ggml](https://github.com/ggerganov/ggml) library. The program runs on the CPU - no video card is required. The [Cerebras-GPT](https://huggingface.co/cerebras) models are also supported. The example supports the following GPT-2 models: | Model | Description | Disk Size | | --- | --- | --- | | 117M | Small model | 240 MB | | 345M | Medium model | 680 MB | | 774M | Large model | 1.5 GB | | 1558M | XL model | 3.0 GB | Sample performance on MacBook M1 Pro: | Model | Size | Time / Token | | --- | --- | --- | | GPT-2 | 117M | 5 ms | | GPT-2 | 345M | 12 ms | | GPT-2 | 774M | 23 ms | | GPT-2 | 1558M | 42 ms | *TODO: add tables for Cerebras-GPT models* Sample output: ```bash $ ./bin/gpt-2 -h usage: ./bin/gpt-2 [options] options: -h, --help show this help message and exit -s SEED, --seed SEED RNG seed (default: -1) -t N, --threads N number of threads to use during computation (default: 8) -p PROMPT, --prompt PROMPT prompt to start generation with (default: random) -n N, --n_predict N number of tokens to predict (default: 200) --top_k N top-k sampling (default: 40) --top_p N top-p sampling (default: 0.9) --temp N temperature (default: 1.0) -b N, --batch_size N batch size for prompt processing (default: 8) -m FNAME, --model FNAME model path (default: models/gpt-2-117M/ggml-model.bin) $ ./bin/gpt-2 gpt2_model_load: loading model from 'models/gpt-2-117M/ggml-model.bin' gpt2_model_load: n_vocab = 50257 gpt2_model_load: n_ctx = 1024 gpt2_model_load: n_embd = 768 gpt2_model_load: n_head = 12 gpt2_model_load: n_layer = 12 gpt2_model_load: f16 = 1 gpt2_model_load: ggml ctx size = 311.12 MB gpt2_model_load: memory size = 72.00 MB, n_mem = 12288 gpt2_model_load: model size = 239.08 MB main: number of tokens in prompt = 1 So this is going to be the end of the line for us. If the Dolphins continue to do their business, it's possible that the team could make a bid to bring in new defensive coordinator Scott Linehan. Linehan's job is a little daunting, but he's a great coach and an excellent coach. I don't believe we're going to make the playoffs. We're going to have to work hard to keep our heads down and get ready to go.<|endoftext|> main: mem per token = 2048612 bytes main: load time = 106.32 ms main: sample time = 7.10 ms main: predict time = 506.40 ms / 5.06 ms per token main: total time = 629.84 ms ``` ## Downloading and converting the original models (GPT-2) You can download the original model files using the [download-model.sh](download-model.sh) Bash script. The models are in Tensorflow format, so in order to use them with ggml, you need to convert them to appropriate format. This is done via the [convert-ckpt-to-ggml.py](convert-ckpt-to-ggml.py) python script. Here is the entire process for the GPT-2 117M model (download from official site + conversion): ```bash cd ggml/build ../examples/gpt-2/download-model.sh 117M Downloading model 117M ... models/gpt-2-117M/checkpoint 100%[=============================>] 77 --.-KB/s in 0s models/gpt-2-117M/encoder.json 100%[=============================>] 1018K 1.20MB/s in 0.8s models/gpt-2-117M/hparams.json 100%[=============================>] 90 --.-KB/s in 0s models/gpt-2-117M/model.ckpt.data-00000-of-00001 100%[=============================>] 474.70M 1.21MB/s in 8m 39s models/gpt-2-117M/model.ckpt.index 100%[=============================>] 5.09K --.-KB/s in 0s models/gpt-2-117M/model.ckpt.meta 100%[=============================>] 460.11K 806KB/s in 0.6s models/gpt-2-117M/vocab.bpe 100%[=============================>] 445.62K 799KB/s in 0.6s Done! Model '117M' saved in 'models/gpt-2-117M/' Run the convert-ckpt-to-ggml.py script to convert the model to ggml format. python /Users/john/ggml/examples/gpt-2/convert-ckpt-to-ggml.py models/gpt-2-117M/ 1 ``` This conversion requires that you have python and Tensorflow installed on your computer. Still, if you want to avoid this, you can download the already converted ggml models as described below. ## Downloading and converting the original models (Cerebras-GPT) Clone the respective repository from here: https://huggingface.co/cerebras Use the [convert-cerebras-to-ggml.py](convert-cerebras-to-ggml.py) script to convert the model to `ggml` format: ```bash cd ggml/build git clone https://huggingface.co/cerebras/Cerebras-GPT-111M models/ python ../examples/gpt-2/convert-cerebras-to-ggml.py models/Cerebras-GPT-111M/ ``` ## Downloading the ggml model directly (GPT-2) For convenience, I will be hosting the converted ggml model files in order to make it easier to run the examples. This way, you can directly download a single binary file and start using it. No python or Tensorflow is required. Here is how to get the 117M ggml model: ```bash cd ggml/build ../examples/gpt-2/download-ggml-model.sh 117M Downloading ggml model 117M ... models/gpt-2-117M/ggml-model.bin 100%[===============================>] 239.58M 8.52MB/s in 28s Done! Model '117M' saved in 'models/gpt-2-117M/ggml-model.bin' You can now use it like this: $ ./bin/gpt-2 -m models/gpt-2-117M/ggml-model.bin -p "This is an example" ``` At some point, I might decide to stop hosting these models. So in that case, simply revert to the manual process above. ## Quantizing the models You can also try to quantize the `ggml` models via 4-bit integer quantization. Keep in mind that for smaller models, this will render them completely useless. You generally want to quantize larger models. ```bash # quantize GPT-2 F16 to Q4_0 (faster but less precise) ./bin/gpt-2-quantize models/gpt-2-1558M/ggml-model-f16.bin models/gpt-2-1558M/ggml-model-q4_0.bin 2 ./bin/gpt-2 -m models/gpt-2-1558M/ggml-model-q4_0.bin -p "This is an example" # quantize Cerebras F16 to Q4_1 (slower but more precise) ./bin/gpt-2-quantize models/Cerebras-GPT-6.7B/ggml-model-f16.bin models/Cerebras-GPT-6.7B/ggml-model-q4_1.bin 3 ./bin/gpt-2 -m models/Cerebras-GPT-6.7B/ggml-model-q4_1.bin -p "This is an example" ``` ## Batched generation example You can try the batched generation from a given prompt using the gpt-2-batched binary. Sample output: ```bash $ gpt-2-batched -np 5 -m models/gpt-2-117M/ggml-model.bin -p "Hello my name is" -n 50 main: seed = 1697037431 gpt2_model_load: loading model from 'models/gpt-2-117M/ggml-model.bin' gpt2_model_load: n_vocab = 50257 gpt2_model_load: n_ctx = 1024 gpt2_model_load: n_embd = 768 gpt2_model_load: n_head = 12 gpt2_model_load: n_layer = 12 gpt2_model_load: ftype = 1 gpt2_model_load: qntvr = 0 gpt2_model_load: ggml tensor size = 320 bytes gpt2_model_load: backend buffer size = 312.72 MB ggml_init_cublas: found 1 CUDA devices: Device 0: NVIDIA GeForce GTX 1660, compute capability 7.5 gpt2_model_load: using CPU backend gpt2_model_load: memory size = 72.00 MB, n_mem = 12288 gpt2_model_load: model size = 239.08 MB extract_tests_from_file : No test file found. test_gpt_tokenizer : 0 tests failed out of 0 tests. main: compute buffer size: 3.26 MB main: generating 5 sequences ... main: prompt: 'Hello my name is' main: number of tokens in prompt = 4, first 8 tokens: 15496 616 1438 318 sequence 0: Hello my name is John. You can call me any way you want, if you want, but for my very first date, I will be on the phone with you. We're both in our early 20s, but I feel like it's all sequence 1: Hello my name is Robert, and I want to say that we're proud to have your company here on the world's largest platform for sharing your stories with us. This is a huge opportunity for our community. We have hundreds of people on this team and sequence 2: Hello my name is Jack. I'm the one who created you. Jack is a boy with a big smile and a big heart. He is a handsome guy. He loves the outdoors and loves the people he meets. He wants to be a sequence 3: Hello my name is John. I am a Canadian citizen with a large number of family in Quebec and I am interested in studying. My aim is to take up a post in the Journal of the International Academy of Sciences of Canada which I am currently finishing. sequence 4: Hello my name is Dan. I am an entrepreneur. I am a great father. I am a great husband. I am a great husband. I am a great dad. And I am a great husband. I love my life. I love main: load time = 880.80 ms main: sample time = 91.43 ms main: predict time = 2518.29 ms main: total time = 3544.32 ms ``` ggml-org-ggml-3678254/examples/gpt-2/convert-cerebras-to-ggml.py000066400000000000000000000142571512524704700243260ustar00rootroot00000000000000# Convert Cerebras models to ggml format # # ref: https://www.cerebras.net/blog/cerebras-gpt-a-family-of-open-compute-efficient-large-language-models/ # import sys import struct import json import torch import numpy as np import re from transformers import AutoModelForCausalLM # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) if len(sys.argv) < 2: print("Usage: convert-cerebras-to-ggml.py dir-model [use-f32]\n") sys.exit(1) # output in the same directory as the model dir_model = sys.argv[1] fname_out = sys.argv[1] + "/ggml-model-f16.bin" with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: encoder = json.load(f) with open(dir_model + "/config.json", "r", encoding="utf-8") as f: hparams = json.load(f) # use 16-bit or 32-bit floats use_f16 = True if len(sys.argv) > 2: use_f16 = False fname_out = sys.argv[1] + "/ggml-model-f32.bin" model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True) #print (model) list_vars = model.state_dict() #print (list_vars) print(hparams) fout = open(fname_out, "wb") fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex fout.write(struct.pack("i", hparams["vocab_size"])) fout.write(struct.pack("i", hparams["n_positions"])) fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) fout.write(struct.pack("i", use_f16)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} fout.write(struct.pack("i", len(encoder))) for key in encoder: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for name in list_vars.keys(): data = list_vars[name].squeeze().numpy() print("Processing variable: " + name + " with shape: ", data.shape) # rename headers to keep compatibility if name == "transformer.ln_f.weight": name = "model/ln_f/g" elif name == "transformer.ln_f.bias": name = "model/ln_f/b" elif name == "transformer.wte.weight": name = "model/wte" elif name == "transformer.wpe.weight": name = "model/wpe" elif name == "lm_head.weight": name = "model/lm_head" elif re.match(r"transformer.h\.\d+\.ln_1\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_1/g" elif re.match(r"transformer.h\.\d+\.ln_1\.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_1/b" elif re.match(r"transformer.h\.\d+\.attn\.c_attn\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_attn/w" elif re.match(r"transformer.h\.\d+\.attn\.c_attn\.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_attn/b" elif re.match(r"transformer.h\.\d+\.attn\.c_proj\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_proj/w" elif re.match(r"transformer.h.\d+.attn.c_proj.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_proj/b" elif re.match(r"transformer.h.\d+.ln_2.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_2/g" elif re.match(r"transformer.h.\d+.ln_2.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_2/b" elif re.match(r"transformer.h.\d+.mlp.c_fc.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_fc/w" elif re.match(r"transformer.h.\d+.mlp.c_fc.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_fc/b" elif re.match(r"transformer.h.\d+.mlp.c_proj.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_proj/w" elif re.match(r"transformer.h.\d+.mlp.c_proj.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_proj/b" else: print("Unrecognized variable name. %s", name) # we don't need these if name.endswith("attn.masked_bias") or name.endswith(".attn.bias"): print(" Skipping variable: " + name) continue n_dims = len(data.shape); # ftype == 0 -> float32, ftype == 1 -> float16 ftype = 0; if use_f16: if (name == "model/wte" or name == "model/lm_head" or name[-2:] == "/g" or name[-2:] == "/w") and n_dims == 2: print(" Converting to float16") data = data.astype(np.float16) ftype = 1 else: print(" Converting to float32") data = data.astype(np.float32) ftype = 0 # for efficiency - transpose the projection matrices # "model/h.*/attn/c_attn/w" # "model/h.*/attn/c_proj/w" # "model/h.*/mlp/c_fc/w" # "model/h.*/mlp/c_proj/w" if name[-14:] == "/attn/c_attn/w" or \ name[-14:] == "/attn/c_proj/w" or \ name[-11:] == "/mlp/c_fc/w" or \ name[-13:] == "/mlp/c_proj/w": print(" Transposing") data = data.transpose() # header str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype)) for i in range(n_dims): fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) fout.write(str); # data data.tofile(fout) fout.close() print("Done. Output file: " + fname_out) print("") ggml-org-ggml-3678254/examples/gpt-2/convert-ckpt-to-ggml.py000066400000000000000000000114111512524704700234660ustar00rootroot00000000000000# Convert a model checkpoint to a ggml compatible file # # Load the model using TensorFlow. # Iterate over all variables and write them to a binary file. # # For each variable, write the following: # - Number of dimensions (int) # - Name length (int) # - Dimensions (int[n_dims]) # - Name (char[name_length]) # - Data (float[n_dims]) # # By default, the bigger matrices are converted to 16-bit floats. # This can be disabled by adding the "use-f32" CLI argument. # # At the start of the ggml file we write the model parameters # and vocabulary. # import sys import json import struct import numpy as np import tensorflow as tf # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) # helper method to convert a numpy array to different float types def convert_to_ftype(data, ftype): # fp16 if ftype == 1: return data.astype(np.float16) assert False, "Invalid ftype: " + str(ftype) if len(sys.argv) < 3: print("Usage: convert-ckpt-to-ggml.py dir-model ftype\n") print(" ftype == 0 -> float32") print(" ftype == 1 -> float16") sys.exit(1) # output in the same directory as the model dir_model = sys.argv[1] fname_out = sys.argv[1] + "/ggml-model.bin" with open(dir_model + "/encoder.json", "r", encoding="utf-8") as f: encoder = json.load(f) with open(dir_model + "/hparams.json", "r", encoding="utf-8") as f: hparams = json.load(f) # possible data types # ftype == 0 -> float32 # ftype == 1 -> float16 # # map from ftype to string ftype_str = ["f32", "f16"] ftype = 1 if len(sys.argv) > 2: ftype = int(sys.argv[2]) if ftype < 0 or ftype > 1: print("Invalid ftype: " + str(ftype)) sys.exit(1) fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" list_vars = tf.train.list_variables(dir_model) fout = open(fname_out, "wb") fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex fout.write(struct.pack("i", hparams["n_vocab"])) fout.write(struct.pack("i", hparams["n_ctx"])) fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) fout.write(struct.pack("i", ftype)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} fout.write(struct.pack("i", len(encoder))) for key in encoder: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for name, shape in list_vars: print("Processing variable: " + name + " with shape: ", shape) data = tf.train.load_variable(dir_model, name).squeeze() n_dims = len(data.shape); # for efficiency - transpose the projection matrices # "model/h.*/attn/c_attn/w" # "model/h.*/attn/c_proj/w" # "model/h.*/mlp/c_fc/w" # "model/h.*/mlp/c_proj/w" if name[-14:] == "/attn/c_attn/w" or \ name[-14:] == "/attn/c_proj/w" or \ name[-11:] == "/mlp/c_fc/w" or \ name[-13:] == "/mlp/c_proj/w": print(" Transposing") data = data.transpose() dshape = data.shape ftype_cur = 0 if ftype != 0: # match name: # "model/wte" # "model/h.*/attn/c_attn/w" # "model/h.*/attn/c_proj/w" # "model/h.*/mlp/c_fc/w" # "model/h.*/mlp/c_proj/w" if name == "model/wte" or name[-2:] == "/w": print(" Converting to " + ftype_str[ftype]) data = convert_to_ftype(data, ftype) ftype_cur = ftype else: print(" Converting to float32") data = data.astype(np.float32) ftype_cur = 0 # header str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): fout.write(struct.pack("i", dshape[n_dims - 1 - i])) fout.write(str); # data data.tofile(fout) fout.close() print("Done. Output file: " + fname_out) print("") ggml-org-ggml-3678254/examples/gpt-2/convert-h5-to-ggml.py000066400000000000000000000144441512524704700230520ustar00rootroot00000000000000# Convert GPT-2 h5 transformer model to ggml format # # Load the model using GPT2Model. # Iterate over all variables and write them to a binary file. # # For each variable, write the following: # - Number of dimensions (int) # - Name length (int) # - Dimensions (int[n_dims]) # - Name (char[name_length]) # - Data (float[n_dims]) # # By default, the bigger matrices are converted to 16-bit floats. # This can be disabled by adding the "use-f32" CLI argument. # # At the start of the ggml file we write the model parameters # and vocabulary. # import sys import struct import json import numpy as np import re from transformers import GPT2Model # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) if len(sys.argv) < 2: print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n") sys.exit(1) # output in the same directory as the model dir_model = sys.argv[1] fname_out = sys.argv[1] + "/ggml-model.bin" with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: encoder = json.load(f) with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: encoder_added = json.load(f) with open(dir_model + "/config.json", "r", encoding="utf-8") as f: hparams = json.load(f) # use 16-bit or 32-bit floats use_f16 = True if len(sys.argv) > 2: use_f16 = False fname_out = sys.argv[1] + "/ggml-model-f32.bin" model = GPT2Model.from_pretrained(dir_model, low_cpu_mem_usage=True) #print (model) list_vars = model.state_dict() #print (list_vars) fout = open(fname_out, "wb") fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex fout.write(struct.pack("i", hparams["vocab_size"])) fout.write(struct.pack("i", hparams["n_positions"])) fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) #fout.write(struct.pack("i", hparams["rotary_dim"])) fout.write(struct.pack("i", use_f16)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} fout.write(struct.pack("i", len(encoder) + len(encoder_added))) for key in encoder: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for key in encoder_added: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for name in list_vars.keys(): data = list_vars[name].squeeze().numpy() print("Processing variable: " + name + " with shape: ", data.shape) # we don't need these if name.endswith("attn.masked_bias") or name.endswith(".attn.bias"): print(" Skipping variable: " + name) continue n_dims = len(data.shape); # ftype == 0 -> float32, ftype == 1 -> float16 ftype = 0; if use_f16: if name[-7:] == ".weight" and n_dims == 2: print(" Converting to float16") data = data.astype(np.float16) ftype = 1 else: print(" Converting to float32") data = data.astype(np.float32) ftype = 0 # for efficiency - transpose these matrices: # "transformer.h.*.mlp.c_proj.weight if name.endswith(".mlp.c_proj.weight"): print(" Transposing") data = data.transpose() # rename headers to keep compatibility if name == "ln_f.weight": name = "model/ln_f/g" elif name == "ln_f.bias": name = "model/ln_f/b" elif name == "wte.weight": name = "model/wte" elif name == "wpe.weight": name = "model/wpe" elif re.match(r"h\.\d+\.ln_1\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_1/g" elif re.match(r"h\.\d+\.ln_1\.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_1/b" elif re.match(r"h\.\d+\.attn\.c_attn\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_attn/w" elif re.match(r"h\.\d+\.attn\.c_attn\.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_attn/b" elif re.match(r"h\.\d+\.attn\.c_proj\.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_proj/w" elif re.match(r"h.\d+.attn.c_proj.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/attn/c_proj/b" elif re.match(r"h.\d+.ln_2.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_2/g" elif re.match(r"h.\d+.ln_2.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/ln_2/b" elif re.match(r"h.\d+.mlp.c_fc.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_fc/w" elif re.match(r"h.\d+.mlp.c_fc.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_fc/b" elif re.match(r"h.\d+.mlp.c_proj.weight", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_proj/w" elif re.match(r"h.\d+.mlp.c_proj.bias", name): i = re.findall("\d+", name)[0] name = f"model/h{i}/mlp/c_proj/b" else: print("Unrecognized variable name. %s", name) str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype)) for i in range(n_dims): fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) fout.write(str); # data data.tofile(fout) fout.close() print("Done. Output file: " + fname_out) print("") ggml-org-ggml-3678254/examples/gpt-2/download-ggml-model.sh000077500000000000000000000033301512524704700233220ustar00rootroot00000000000000#!/bin/bash # This script downloads GPT-2 model files that have already been converted to ggml format. # This way you don't have to convert them yourself. # # If you want to download the original GPT-2 model files, use the "download-model.sh" script instead. #src="https://ggml.ggerganov.com" #pfx="ggml-model-gpt-2" src="https://huggingface.co/ggerganov/ggml" pfx="resolve/main/ggml-model-gpt-2" ggml_path=$(dirname $(realpath $0)) # GPT-2 models models=( "117M" "345M" "774M" "1558M" ) # list available models function list_models { printf "\n" printf " Available models:" for model in "${models[@]}"; do printf " $model" done printf "\n\n" } if [ "$#" -ne 1 ]; then printf "Usage: $0 \n" list_models exit 1 fi model=$1 if [[ ! " ${models[@]} " =~ " ${model} " ]]; then printf "Invalid model: $model\n" list_models exit 1 fi # download ggml model printf "Downloading ggml model $model ...\n" mkdir -p models/gpt-2-$model if [ -x "$(command -v wget)" ]; then wget --quiet --show-progress -O models/gpt-2-$model/ggml-model.bin $src/$pfx-$model.bin elif [ -x "$(command -v curl)" ]; then curl -L --output models/gpt-2-$model/ggml-model.bin $src/$pfx-$model.bin else printf "Either wget or curl is required to download models.\n" exit 1 fi if [ $? -ne 0 ]; then printf "Failed to download ggml model $model \n" printf "Please try again later or download the original GPT-2 model files and convert them yourself.\n" exit 1 fi printf "Done! Model '$model' saved in 'models/gpt-2-$model/ggml-model.bin'\n" printf "You can now use it like this:\n\n" printf " $ ./bin/gpt-2 -m models/gpt-2-$model/ggml-model.bin -p \"This is an example\"\n" printf "\n" ggml-org-ggml-3678254/examples/gpt-2/download-model.sh000077500000000000000000000021351512524704700224000ustar00rootroot00000000000000#!/bin/bash ggml_path=$(dirname $(realpath $0)) # GPT-2 models models=( "117M" "345M" "774M" "1558M" ) # list available models function list_models { printf "\n" printf " Available models:" for model in "${models[@]}"; do printf " $model" done printf "\n\n" } if [ "$#" -ne 1 ]; then printf "Usage: $0 \n" list_models exit 1 fi model=$1 if [[ ! " ${models[@]} " =~ " ${model} " ]]; then printf "Invalid model: $model\n" list_models exit 1 fi # download model printf "Downloading model $model ...\n" mkdir -p models/gpt-2-$model for file in checkpoint encoder.json hparams.json model.ckpt.data-00000-of-00001 model.ckpt.index model.ckpt.meta vocab.bpe; do wget --quiet --show-progress -O models/gpt-2-$model/$file https://openaipublic.blob.core.windows.net/gpt-2/models/$model/$file done printf "Done! Model '$model' saved in 'models/gpt-2-$model/'\n\n" printf "Run the convert-ckpt-to-ggml.py script to convert the model to ggml format.\n" printf "\n" printf " python $ggml_path/convert-ckpt-to-ggml.py models/gpt-2-$model/\n" printf "\n" ggml-org-ggml-3678254/examples/gpt-2/main-alloc.cpp000066400000000000000000000742641512524704700216700ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; float eps = 1e-5f; }; struct gpt2_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; struct ggml_tensor * ln_2_g; struct ggml_tensor * ln_2_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // mlp struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gpt2_model { gpt2_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // token embedding struct ggml_tensor * wpe; // position embedding struct ggml_tensor * lm_head; // language model head std::vector layers; // key + value memory struct ggml_tensor * memory_k; struct ggml_tensor * memory_v; // struct ggml_context * ctx_w; std::map tensors; }; // load the model's weights from a file bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab) { printf("%s: loading model from '%s'\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx_w; size_t ctx_size = 0; { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b ctx_size += ggml_row_size(wtype, n_vocab*n_embd); // wte ctx_size += ggml_row_size(GGML_TYPE_F32 , n_ctx*n_embd); // wpe ctx_size += ggml_row_size(wtype, n_vocab*n_embd); // lm_head ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_g ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_b ctx_size += n_layer*(ggml_row_size(wtype, 3*n_embd*n_embd)); // c_attn_attn_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 3*n_embd)); // c_attn_attn_b ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_attn_proj_b ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_proj_b ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_k ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_v ctx_size += (6 + 12*n_layer)*512; // object overhead printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor)); printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } // create the ggml context { struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; model.ctx_w = ggml_init(params); if (!model.ctx_w) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // prepare memory for the weights { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); // map by name model.tensors["model/ln_f/g"] = model.ln_f_g; model.tensors["model/ln_f/b"] = model.ln_f_b; model.tensors["model/wte"] = model.wte; model.tensors["model/wpe"] = model.wpe; model.tensors["model/lm_head"] = model.lm_head; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; } } // key + value memory { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights { size_t total_size = 0; bool has_lm_head = false; while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); // GPT-2 models share the WTE tensor as the LM head if (name == "model/wte" && has_lm_head == false) { memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor)); } if (name == "model/lm_head") { has_lm_head = true; } total_size += ggml_nbytes(tensor); } printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); } fin.close(); return true; } // build the computation graph struct ggml_cgraph * gpt2_graph( const gpt2_model & model, const int n_past, const int n_tokens) { const int N = n_tokens; const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; // since we are using ggml-alloc, this buffer only needs enough space to hold the ggml_tensor and ggml_cgraph structs, but not the tensor data static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(); static std::vector buf(buf_size); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf.data(), /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() }; struct ggml_context * ctx = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph(ctx); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N); // at this point, the tensor data is not allocated yet and cannot be set // we will find the tensor after the graph is allocated by its name, and set the data then ggml_set_name(embd, "embd"); // setting a tensor as an input will ensure that it is allocated at the beginning of the graph // this is important to ensure that the input tensors are not overwritten before they are used ggml_set_input(embd); struct ggml_tensor * position = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N); ggml_set_name(position, "position"); ggml_set_input(position); // wte + wpe struct ggml_tensor * inpL = ggml_add(ctx, ggml_get_rows(ctx, model.wte, embd), ggml_get_rows(ctx, model.wpe, position)); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { // [ 768, N] cur = ggml_norm(ctx, inpL, hparams.eps); // cur = ln_1_g*cur + ln_1_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, ggml_repeat(ctx, model.layers[il].ln_1_g, cur), cur), ggml_repeat(ctx, model.layers[il].ln_1_b, cur)); } // attn // [2304, 768] - model.layers[il].c_attn_attn_w // [2304, 1] - model.layers[il].c_attn_attn_b // [ 768, N] - cur (in) // [2304, N] - cur (out) // // cur = attn_w*cur + attn_b // [2304, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx, ggml_repeat(ctx, model.layers[il].c_attn_attn_b, cur), cur); } // self-attention { struct ggml_tensor * Qcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); struct ggml_tensor * Vcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); // store key and value to memory if (N >= 1) { struct ggml_tensor * k = ggml_view_1d(ctx, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_1d(ctx, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) // [64, N, 12] struct ggml_tensor * Q = ggml_permute(ctx, ggml_cont_3d(ctx, Qcur, n_embd/n_head, n_head, N), 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) // [64, n_past + N, 12] struct ggml_tensor * K = ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); // GG: flash attention //struct ggml_tensor * V = // ggml_cpy(ctx0, // ggml_permute(ctx0, // ggml_reshape_3d(ctx0, // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), // n_embd/n_head, n_head, n_past + N), // 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); // K * Q // [n_past + N, N, 12] struct ggml_tensor * KQ = ggml_mul_mat(ctx, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = ggml_scale(ctx, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] struct ggml_tensor * V_trans = ggml_cont_3d(ctx, ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), n_embd/n_head, n_head, n_past + N), 1, 2, 0, 3), n_past + N, n_embd/n_head, n_head); // KQV = transpose(V) * KQ_soft_max // [64, N, 12] struct ggml_tensor * KQV = ggml_mul_mat(ctx, V_trans, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) // [64, 12, N] struct ggml_tensor * KQV_merged = ggml_permute(ctx, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) // [768, N] cur = ggml_cont_2d(ctx, KQV_merged, n_embd, N); } // projection // [ 768, 768] - model.layers[il].c_attn_proj_w // [ 768, 1] - model.layers[il].c_attn_proj_b // [ 768, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_proj_w, cur); cur = ggml_add(ctx, ggml_repeat(ctx, model.layers[il].c_attn_proj_b, cur), cur); } // add the input cur = ggml_add(ctx, cur, inpL); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx, inpFF, hparams.eps); // cur = ln_2_g*cur + ln_2_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, ggml_repeat(ctx, model.layers[il].ln_2_g, cur), cur), ggml_repeat(ctx, model.layers[il].ln_2_b, cur)); } // fully connected // [3072, 768] - model.layers[il].c_mlp_fc_w // [3072, 1] - model.layers[il].c_mlp_fc_b // [ 768, N] - cur (in) // [3072, N] - cur (out) // // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx, ggml_repeat(ctx, model.layers[il].c_mlp_fc_b, cur), cur); // GELU activation // [3072, N] cur = ggml_gelu(ctx, cur); // projection // [ 768, 3072] - model.layers[il].c_mlp_proj_w // [ 768, 1] - model.layers[il].c_mlp_proj_b // [3072, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx, ggml_repeat(ctx, model.layers[il].c_mlp_proj_b, cur), cur); } // input for next layer inpL = ggml_add(ctx, cur, inpFF); } // norm { // [ 768, N] inpL = ggml_norm(ctx, inpL, hparams.eps); // inpL = ln_f_g*inpL + ln_f_b // [ 768, N] inpL = ggml_add(ctx, ggml_mul(ctx, ggml_repeat(ctx, model.ln_f_g, inpL), inpL), ggml_repeat(ctx, model.ln_f_b, inpL)); } // inpL = WTE * inpL // [ 768, 50257] - model.lm_head // [ 768, N] - inpL inpL = ggml_mul_mat(ctx, model.lm_head, inpL); ggml_set_name(inpL, "logits"); // setting a tensor as the output will ensure that it is not overwritten by subsequent operations ggml_set_output(inpL); // logits -> probs //inpL = ggml_soft_max(ctx0, inpL); ggml_build_forward_expand(gf, inpL); ggml_free(ctx); return gf; } // evaluate the transformer // // - model: the model // - allocr: ggml_gallocr to use to allocate the compute buffer // - n_threads: number of threads to use // - n_past: the context size so far // - embd_inp: the embeddings of the tokens in the context // - embd_w: the predicted logits for the next token // bool gpt2_eval( const gpt2_model & model, ggml_gallocr_t allocr, const int n_threads, const int n_past, const std::vector & embd_inp, std::vector & embd_w) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_vocab = hparams.n_vocab; struct ggml_cgraph * gf = gpt2_graph(model, n_past, embd_inp.size()); // allocate the graph tensors ggml_gallocr_alloc_graph(allocr, gf); // set the graph inputs struct ggml_tensor * embd = ggml_graph_get_tensor(gf, "embd"); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); struct ggml_tensor * position = ggml_graph_get_tensor(gf, "position"); for (int i = 0; i < N; ++i) { ((int32_t *) position->data)[i] = n_past + i; } // run the computation struct ggml_cplan plan = ggml_graph_plan(gf, n_threads, nullptr); static std::vector work_buffer; work_buffer.resize(plan.work_size); plan.work_data = work_buffer.data(); ggml_graph_compute(gf, &plan); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); //} // get the graph outputs struct ggml_tensor * logits = ggml_graph_get_tensor(gf, "logits"); //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(logits), sizeof(float)*n_vocab*N); // return result just for the last token embd_w.resize(n_vocab); memcpy(embd_w.data(), (float *) ggml_get_data(logits) + (n_vocab*(N-1)), sizeof(float)*n_vocab); return true; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; params.model = "models/gpt-2-117M/ggml-model.bin"; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gpt2_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_load(params.model, model, vocab)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } ggml_gallocr_t allocr = NULL; // allocate the compute buffer { allocr = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); // create the worst case graph for memory usage estimation int n_tokens = std::min(model.hparams.n_ctx, params.n_batch); int n_past = model.hparams.n_ctx - n_tokens; struct ggml_cgraph * gf = gpt2_graph(model, n_past, n_tokens); // pre-allocate the compute buffer for the worst case (optional) ggml_gallocr_reserve(allocr, gf); size_t mem_size = ggml_gallocr_get_buffer_size(allocr, 0); fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size/1024.0/1024.0); } int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { printf("%d ", embd_inp[i]); } printf("\n\n"); // submit the input prompt token-by-token // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning std::vector embd; for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { // predict if (embd.size() > 0) { const int64_t t_start_us = ggml_time_us(); if (!gpt2_eval(model, allocr, params.n_threads, n_past, embd, logits)) { printf("Failed to predict\n"); return 1; } t_predict_us += ggml_time_us() - t_start_us; } n_past += embd.size(); embd.clear(); if (i >= embd_inp.size()) { // sample next token const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const int n_vocab = model.hparams.n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // add it to the context embd.push_back(id); } else { // if here, it means we are still processing the input prompt for (size_t k = i; k < embd_inp.size(); k++) { embd.push_back(embd_inp[k]); if (int32_t(embd.size()) >= params.n_batch) { break; } } i += embd.size() - 1; } // display text for (auto id : embd) { printf("%s", vocab.id_to_token[id].c_str()); } fflush(stdout); // end of text token if (embd.back() == 50256) { break; } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx_w); return 0; } ggml-org-ggml-3678254/examples/gpt-2/main-backend.cpp000066400000000000000000000772001512524704700221560ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #ifdef GGML_USE_CUDA #include "ggml-cuda.h" #endif #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif #define GPT2_MAX_NODES 4096 static void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); fflush(stderr); } // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; float eps = 1e-5f; }; struct gpt2_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; struct ggml_tensor * ln_2_g; struct ggml_tensor * ln_2_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // mlp struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gpt2_model { gpt2_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // token embedding struct ggml_tensor * wpe; // position embedding struct ggml_tensor * lm_head; // language model head std::vector layers; // key + value memory struct ggml_tensor * memory_k; struct ggml_tensor * memory_v; // struct ggml_context * ctx_w; struct ggml_context * ctx_kv; ggml_backend_t backend = NULL; ggml_backend_buffer_t buffer_w; ggml_backend_buffer_t buffer_kv; std::map tensors; }; // load the model's weights from a file bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab, int n_ctx, int n_gpu_layers) { printf("%s: loading model from '%s'\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } ggml_log_set(ggml_log_callback_default, nullptr); auto & ctx = model.ctx_w; // create the ggml context { size_t n_tensors = 2 + 6 + 12*model.hparams.n_layer; struct ggml_init_params params = { /*.mem_size =*/ ggml_tensor_overhead() * n_tensors, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ctx = ggml_init(params); if (!ctx) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // initialize the backend #ifdef GGML_USE_CUDA if (n_gpu_layers > 0) { fprintf(stderr, "%s: using CUDA backend\n", __func__); model.backend = ggml_backend_cuda_init(0); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); } } #endif #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); } } #endif if (!model.backend) { // fallback to CPU backend fprintf(stderr, "%s: using CPU backend\n", __func__); model.backend = ggml_backend_cpu_init(); } if (!model.backend) { fprintf(stderr, "%s: ggml_backend_cpu_init() failed\n", __func__); return false; } // create the tensors for the model { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); // map by name model.tensors["model/ln_f/g"] = model.ln_f_g; model.tensors["model/ln_f/b"] = model.ln_f_b; model.tensors["model/wte"] = model.wte; model.tensors["model/wpe"] = model.wpe; model.tensors["model/lm_head"] = model.lm_head; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; } } // allocate the model tensors in a backend buffer model.buffer_w = ggml_backend_alloc_ctx_tensors(ctx, model.backend); printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor)); printf("%s: backend buffer size = %6.2f MB\n", __func__, ggml_backend_buffer_get_size(model.buffer_w)/(1024.0*1024.0)); // override the default training context with the user-provided model.hparams.n_ctx = n_ctx; // key + value memory { auto * ctx = model.ctx_kv; // create the ggml context { size_t n_tensors = 2; struct ggml_init_params params = { /*.mem_size =*/ ggml_tensor_overhead() * n_tensors, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ctx = ggml_init(params); if (!ctx) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; // k and v here can also be GGML_TYPE_F16 to save memory and speed up the computation // if backend supports it model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); // allocate the KV memory in a backend buffer model.buffer_kv = ggml_backend_alloc_ctx_tensors(ctx, model.backend); const size_t memory_size = ggml_backend_buffer_get_size(model.buffer_kv); printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights { size_t total_size = 0; bool has_lm_head = false; std::vector read_buf; while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; ggml_set_name(tensor, name.c_str()); if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } if (ggml_backend_buffer_is_host(model.buffer_w)) { // for some backends such as CPU and Metal, the tensor data is in system memory and we can read directly into it fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); } else { // read into a temporary buffer first, then copy to device memory read_buf.resize(ggml_nbytes(tensor)); fin.read(read_buf.data(), ggml_nbytes(tensor)); ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor)); } // GPT-2 models share the WTE tensor as the LM head if (name == "model/wte" && has_lm_head == false) { //ggml_backend_tensor_copy(tensor, model.lm_head); model.lm_head = tensor; } if (name == "model/lm_head") { has_lm_head = true; } total_size += ggml_nbytes(tensor); } printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); } fin.close(); return true; } // build the computation graph struct ggml_cgraph * gpt2_graph( const gpt2_model & model, const int n_past, const int n_tokens) { const int N = n_tokens; const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; // since we are using ggml-alloc, this buffer only needs enough space to hold the ggml_tensor and ggml_cgraph structs, but not the tensor data static size_t buf_size = ggml_tensor_overhead()*GPT2_MAX_NODES + ggml_graph_overhead_custom(GPT2_MAX_NODES, false); static std::vector buf(buf_size); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf.data(), /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() }; struct ggml_context * ctx = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, GPT2_MAX_NODES, false); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N); // at this point, the tensor data is not allocated yet and cannot be set // we will find the tensor after the graph is allocated by its name, and set the data then ggml_set_name(embd, "embd"); // setting a tensor as an input will ensure that it is allocated at the beginning of the graph // this is important to ensure that the input tensors are not overwritten before they are used ggml_set_input(embd); struct ggml_tensor * position = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N); ggml_set_name(position, "position"); ggml_set_input(position); // wte + wpe struct ggml_tensor * inpL = ggml_add(ctx, ggml_get_rows(ctx, model.wte, embd), ggml_get_rows(ctx, model.wpe, position)); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { // [ 768, N] cur = ggml_norm(ctx, inpL, hparams.eps); // cur = ln_1_g*cur + ln_1_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_1_g), model.layers[il].ln_1_b); } // attn // [2304, 768] - model.layers[il].c_attn_attn_w // [2304, 1] - model.layers[il].c_attn_attn_b // [ 768, N] - cur (in) // [2304, N] - cur (out) // // cur = attn_w*cur + attn_b // [2304, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_attn_attn_b); } // self-attention { struct ggml_tensor * Qcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); struct ggml_tensor * Vcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); // store key and value to memory if (N >= 1) { struct ggml_tensor * k = ggml_view_1d(ctx, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_1d(ctx, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) // [64, N, 12] struct ggml_tensor * Q = ggml_permute(ctx, ggml_cont_3d(ctx, Qcur, n_embd/n_head, n_head, N), 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) // [64, n_past + N, 12] struct ggml_tensor * K = ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); // GG: flash attention //struct ggml_tensor * V = // ggml_cpy(ctx0, // ggml_permute(ctx0, // ggml_reshape_3d(ctx0, // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), // n_embd/n_head, n_head, n_past + N), // 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); // K * Q // [n_past + N, N, 12] struct ggml_tensor * KQ = ggml_mul_mat(ctx, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = ggml_scale(ctx, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] struct ggml_tensor * V_trans = ggml_cont_3d(ctx, ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), n_embd/n_head, n_head, n_past + N), 1, 2, 0, 3), n_past + N, n_embd/n_head, n_head); // KQV = transpose(V) * KQ_soft_max // [64, N, 12] struct ggml_tensor * KQV = ggml_mul_mat(ctx, V_trans, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) // [64, 12, N] struct ggml_tensor * KQV_merged = ggml_permute(ctx, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) // [768, N] cur = ggml_cont_2d(ctx, KQV_merged, n_embd, N); } // projection // [ 768, 768] - model.layers[il].c_attn_proj_w // [ 768, 1] - model.layers[il].c_attn_proj_b // [ 768, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_proj_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_attn_proj_b); } // add the input cur = ggml_add(ctx, cur, inpL); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx, inpFF, hparams.eps); // cur = ln_2_g*cur + ln_2_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_2_g), model.layers[il].ln_2_b); } // fully connected // [3072, 768] - model.layers[il].c_mlp_fc_w // [3072, 1] - model.layers[il].c_mlp_fc_b // [ 768, N] - cur (in) // [3072, N] - cur (out) // // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_fc_b); // GELU activation // [3072, N] cur = ggml_gelu(ctx, cur); // projection // [ 768, 3072] - model.layers[il].c_mlp_proj_w // [ 768, 1] - model.layers[il].c_mlp_proj_b // [3072, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_proj_b); } // input for next layer inpL = ggml_add(ctx, cur, inpFF); } // norm { // [ 768, N] inpL = ggml_norm(ctx, inpL, hparams.eps); // inpL = ln_f_g*inpL + ln_f_b // [ 768, N] inpL = ggml_add(ctx, ggml_mul(ctx, inpL, model.ln_f_g), model.ln_f_b); } // inpL = WTE * inpL // [ 768, 50257] - model.lm_head // [ 768, N] - inpL inpL = ggml_mul_mat(ctx, model.lm_head, inpL); ggml_set_name(inpL, "logits"); // setting a tensor as the output will ensure that it is not overwritten by subsequent operations ggml_set_output(inpL); // logits -> probs //inpL = ggml_soft_max(ctx0, inpL); ggml_build_forward_expand(gf, inpL); ggml_free(ctx); return gf; } // evaluate the transformer // // - model: the model // - allocr: ggml_gallocr to use to allocate the compute buffer // - n_threads: number of threads to use // - n_past: the context size so far // - embd_inp: the embeddings of the tokens in the context // - embd_w: the predicted logits for the next token // bool gpt2_eval( const gpt2_model & model, ggml_gallocr_t allocr, const int n_threads, const int n_past, const std::vector & embd_inp, std::vector & embd_w) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_vocab = hparams.n_vocab; struct ggml_cgraph * gf = gpt2_graph(model, n_past, embd_inp.size()); // allocate the graph tensors ggml_gallocr_alloc_graph(allocr, gf); // set the graph inputs struct ggml_tensor * embd = ggml_graph_get_tensor(gf, "embd"); ggml_backend_tensor_set(embd, embd_inp.data(), 0, N*ggml_element_size(embd)); struct ggml_tensor * position = ggml_graph_get_tensor(gf, "position"); for (int i = 0; i < N; ++i) { int32_t v = n_past + i; ggml_backend_tensor_set(position, &v, i*sizeof(int32_t), sizeof(v)); } // set backend options if (ggml_backend_is_cpu(model.backend)) { ggml_backend_cpu_set_n_threads(model.backend, n_threads); } // run the computation ggml_backend_graph_compute(model.backend, gf); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); //} // get the graph outputs struct ggml_tensor * logits = ggml_graph_get_tensor(gf, "logits"); //embd_w.resize(n_vocab*N); //ggml_backend_tensor_get(logits, embd_w.data(), 0, sizeof(float)*n_vocab*N); // return result just for the last token embd_w.resize(n_vocab); ggml_backend_tensor_get(logits, embd_w.data(), (n_vocab*(N-1))*sizeof(float), sizeof(float)*n_vocab); return true; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; params.model = "models/gpt-2-117M/ggml-model.bin"; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gpt2_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_load(params.model, model, vocab, params.n_ctx, params.n_gpu_layers)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } ggml_gallocr_t allocr = NULL; // allocate the compute buffer { // create a graph allocator with the backend's default buffer type allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend)); // create the worst case graph for memory usage estimation int n_tokens = std::min(model.hparams.n_ctx, params.n_batch); int n_past = model.hparams.n_ctx - n_tokens; struct ggml_cgraph * gf = gpt2_graph(model, n_past, n_tokens); // pre-allocate the compute buffer for the worst case (optional) ggml_gallocr_reserve(allocr, gf); size_t mem_size = ggml_gallocr_get_buffer_size(allocr, 0); fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size/1024.0/1024.0); } int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { printf("%d ", embd_inp[i]); } printf("\n\n"); // submit the input prompt token-by-token // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning std::vector embd; for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { // predict if (embd.size() > 0) { const int64_t t_start_us = ggml_time_us(); if (!gpt2_eval(model, allocr, params.n_threads, n_past, embd, logits)) { printf("Failed to predict\n"); return 1; } t_predict_us += ggml_time_us() - t_start_us; } n_past += embd.size(); embd.clear(); if (i >= embd_inp.size()) { // sample next token const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const int n_vocab = model.hparams.n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // add it to the context embd.push_back(id); } else { // if here, it means we are still processing the input prompt for (size_t k = i; k < embd_inp.size(); k++) { embd.push_back(embd_inp[k]); if (int32_t(embd.size()) >= params.n_batch) { break; } } i += embd.size() - 1; } // display text for (auto id : embd) { printf("%s", vocab.id_to_token[id].c_str()); } fflush(stdout); // end of text token if (!params.ignore_eos && embd.back() == 50256) { break; } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx_w); ggml_gallocr_free(allocr); ggml_backend_buffer_free(model.buffer_w); ggml_backend_buffer_free(model.buffer_kv); ggml_backend_free(model.backend); return 0; } ggml-org-ggml-3678254/examples/gpt-2/main-batched.cpp000066400000000000000000001205221512524704700221550ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #ifdef GGML_USE_CUDA #include "ggml-cuda.h" #endif #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif #define GPT2_MAX_NODES 4096 static void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); fflush(stderr); } typedef int32_t gpt2_pos; typedef int32_t gpt2_seq_id; // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; float eps = 1e-5f; }; struct gpt2_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; struct ggml_tensor * ln_2_g; struct ggml_tensor * ln_2_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // mlp struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gpt2_kv_cell { gpt2_pos pos = -1; gpt2_pos delta = 0; std::set seq_id; bool has_seq_id(const gpt2_seq_id & id) const { return seq_id.find(id) != seq_id.end(); } }; struct gpt2_kv_cache { // key + value memory struct ggml_tensor * k; struct ggml_tensor * v; // uint32_t head = 0; uint32_t size = 0; // computed before each graph build uint32_t n = 0; std::vector cells; ggml_backend_buffer_t buffer; }; struct gpt2_model { gpt2_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // token embedding struct ggml_tensor * wpe; // position embedding struct ggml_tensor * lm_head; // language model head std::vector layers; gpt2_kv_cache kv_cache; struct ggml_context * ctx_w; ggml_backend_t backend = NULL; ggml_backend_buffer_t buffer_w; std::map tensors; }; // Input data for gpt2_decode // A gpt2_batch object can contain input about one or many sequences // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens // // - token : the token ids of the input (used when embd is NULL) // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) // - pos : the positions of the respective token in the sequence // - seq_id : the sequence to which the respective token belongs // - logits : if zero, the logits for the respective token will not be output // struct gpt2_batch { int32_t n_tokens = -1; gpt_vocab::id * token = {}; float * embd = {}; gpt2_pos * pos = {}; gpt2_seq_id * seq_id = {}; int8_t * logits = {}; }; // load the model's weights from a file bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab, int n_ctx, int n_gpu_layers) { printf("%s: loading model from '%s'\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx_w; size_t buffer_size = 0; { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; buffer_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g buffer_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b buffer_size += ggml_row_size(wtype, n_vocab*n_embd); // wte buffer_size += ggml_row_size(GGML_TYPE_F32, n_ctx*n_embd); // wpe buffer_size += ggml_row_size(wtype, n_vocab*n_embd); // lm_head buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_g buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_b buffer_size += n_layer*(ggml_row_size(wtype, 3*n_embd*n_embd)); // c_attn_attn_w buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 3*n_embd)); // c_attn_attn_b buffer_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_attn_proj_b buffer_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b buffer_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w buffer_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_proj_b buffer_size += (6 + 12*n_layer)*128; // alignment overhead printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor)); printf("%s: backend buffer size = %6.2f MB\n", __func__, buffer_size/(1024.0*1024.0)); } ggml_log_set(ggml_log_callback_default, nullptr); // create the ggml context { size_t n_tensors = 2 + 6 + 12*model.hparams.n_layer; struct ggml_init_params params = { /*.mem_size =*/ ggml_tensor_overhead() * n_tensors, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; model.ctx_w = ggml_init(params); if (!model.ctx_w) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // initialize the backend #ifdef GGML_USE_CUDA if (n_gpu_layers > 0) { fprintf(stderr, "%s: using CUDA backend\n", __func__); model.backend = ggml_backend_cuda_init(0); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); } } #endif #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); model.backend = ggml_backend_metal_init(); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); } } #endif if (!model.backend) { // fallback to CPU backend fprintf(stderr, "%s: using CPU backend\n", __func__); model.backend = ggml_backend_cpu_init(); } if (!model.backend) { fprintf(stderr, "%s: ggml_backend_cpu_init() failed\n", __func__); return false; } // allocate weights buffer model.buffer_w = ggml_backend_alloc_buffer(model.backend, buffer_size); // prepare memory for the weights { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); // map by name model.tensors["model/ln_f/g"] = model.ln_f_g; model.tensors["model/ln_f/b"] = model.ln_f_b; model.tensors["model/wte"] = model.wte; model.tensors["model/wpe"] = model.wpe; model.tensors["model/lm_head"] = model.lm_head; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; } } // override the default training context with the user-provided model.hparams.n_ctx = n_ctx; // key + value memory { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; model.kv_cache.k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.kv_cache.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.kv_cache.head = 0; model.kv_cache.size = n_ctx; model.kv_cache.cells.resize(n_ctx); const size_t memory_size = ggml_nbytes(model.kv_cache.k) + ggml_nbytes(model.kv_cache.v); printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); // create a backend buffer (can be in host or device memory) model.kv_cache.buffer = ggml_backend_alloc_buffer(model.backend, memory_size + 256); // allocate the tensors into the backend buffer { ggml_tallocr alloc = ggml_tallocr_new(model.kv_cache.buffer); // this updates the pointers in the tensors to point to the correct location in the buffer // this is necessary since the ggml_context is .no_alloc == true // note that the buffer can actually be a device buffer, depending on the backend ggml_tallocr_alloc(&alloc, model.kv_cache.k); ggml_tallocr_alloc(&alloc, model.kv_cache.v); } } // load weights { ggml_tallocr alloc = ggml_tallocr_new(model.buffer_w); size_t total_size = 0; bool has_lm_head = false; std::vector read_buf; while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; ggml_set_name(tensor, name.c_str()); if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } ggml_tallocr_alloc(&alloc, tensor); if (ggml_backend_is_cpu (model.backend) #ifdef GGML_USE_METAL || ggml_backend_is_metal(model.backend) #endif ) { // for the CPU and Metal backend, we can read directly into the tensor fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); } else { // read into a temporary buffer first, then copy to device memory read_buf.resize(ggml_nbytes(tensor)); fin.read(read_buf.data(), ggml_nbytes(tensor)); ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor)); } // GPT-2 models share the WTE tensor as the LM head if (name == "model/wte" && has_lm_head == false) { //ggml_tallocr_alloc(alloc, model.lm_head); //ggml_backend_tensor_copy(tensor, model.lm_head); model.lm_head = tensor; } if (name == "model/lm_head") { has_lm_head = true; } total_size += ggml_nbytes(tensor); } printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); } fin.close(); return true; } // build the computation graph struct ggml_cgraph * gpt2_graph( const gpt2_model & model, const gpt2_batch & batch, bool measure) { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; const auto & kv_cache = model.kv_cache; const int32_t n_tokens = batch.n_tokens; const int32_t n_kv = measure ? n_ctx : kv_cache.n; const int32_t kv_head = measure ? n_ctx - n_tokens : kv_cache.head; // since we are using ggml-alloc, this buffer only needs enough space to hold the ggml_tensor and ggml_cgraph structs, but not the tensor data static size_t buf_size = ggml_tensor_overhead()*GPT2_MAX_NODES + ggml_graph_overhead_custom(GPT2_MAX_NODES, false); static std::vector buf(buf_size); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf.data(), /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() }; struct ggml_context * ctx = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, GPT2_MAX_NODES, false); struct ggml_tensor * inpL; if (batch.token) { struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_tokens); ggml_set_name(inp_tokens, "inp_tokens"); ggml_set_input(inp_tokens); struct ggml_tensor * position = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_tokens); ggml_set_name(position, "position"); ggml_set_input(position); // wte + wpe inpL = ggml_add(ctx, ggml_get_rows(ctx, model.wte, inp_tokens), ggml_get_rows(ctx, model.wpe, position)); } else { GGML_ASSERT(batch.embd); inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_tokens); ggml_set_name(inpL, "embd"); ggml_set_input(inpL); } // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_kv, n_tokens, 1); ggml_set_name(KQ_mask, "KQ_mask"); ggml_set_input(KQ_mask); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { // [ 768, N] cur = ggml_norm(ctx, inpL, hparams.eps); // cur = ln_1_g*cur + ln_1_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_1_g), model.layers[il].ln_1_b); } // attn // [2304, 768] - model.layers[il].c_attn_attn_w // [2304, 1] - model.layers[il].c_attn_attn_b // [ 768, n_tokens] - cur (in) // [2304, n_tokens] - cur (out) // // cur = attn_w*cur + attn_b // [2304, n_tokens] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_attn_attn_b); } // self-attention { struct ggml_tensor * Qcur = ggml_view_2d(ctx, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*n_embd); struct ggml_tensor * Vcur = ggml_view_2d(ctx, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*n_embd); // store key and value to memory if (n_tokens >= 1) { struct ggml_tensor * k = ggml_view_1d(ctx, model.kv_cache.k, n_tokens*n_embd, (ggml_element_size(model.kv_cache.k)*n_embd)*(il*n_ctx + kv_head)); struct ggml_tensor * v = ggml_view_1d(ctx, model.kv_cache.v, n_tokens*n_embd, (ggml_element_size(model.kv_cache.v)*n_embd)*(il*n_ctx + kv_head)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) // [64, N, 12] struct ggml_tensor * Q = ggml_permute(ctx, ggml_cont_3d(ctx, Qcur, n_embd/n_head, n_head, n_tokens), 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_kv).permute(0, 2, 1, 3) // [64, n_kv, 12] struct ggml_tensor * K = ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.kv_cache.k, n_kv*n_embd, il*n_ctx*ggml_element_size(model.kv_cache.k)*n_embd), n_embd/n_head, n_head, n_kv), 0, 2, 1, 3); // GG: flash attention //struct ggml_tensor * V = // ggml_cpy(ctx0, // ggml_permute(ctx0, // ggml_reshape_3d(ctx0, // ggml_view_1d(ctx0, model.kv_cache.v, n_kv*n_embd, il*n_ctx*ggml_element_size(model.kv_cache.v)*n_embd), // n_embd/n_head, n_head, n_kv), // 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_embd/n_head, n_head)); //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); // K * Q // [n_kv, n_tokens, 12] struct ggml_tensor * KQ = ggml_mul_mat(ctx, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_kv, n_tokens, 12] struct ggml_tensor * KQ_scaled = ggml_scale(ctx, KQ, 1.0f/sqrtf(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // [n_kv, n_tokens, 12] struct ggml_tensor * KQ_masked = ggml_add(ctx, KQ_scaled, KQ_mask); // KQ = soft_max(KQ_masked) // [n_kv, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_kv).permute(1, 2, 0, 3).contiguous() // [n_kv, 64, 12] struct ggml_tensor * V_trans = ggml_cont_3d(ctx, ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.kv_cache.v, n_kv*n_embd, il*n_ctx*ggml_element_size(model.kv_cache.v)*n_embd), n_embd/n_head, n_head, n_kv), 1, 2, 0, 3), n_kv, n_embd/n_head, n_head); // KQV = transpose(V) * KQ_soft_max // [64, n_tokens, 12] struct ggml_tensor * KQV = ggml_mul_mat(ctx, V_trans, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) // [64, 12, n_tokens] struct ggml_tensor * KQV_merged = ggml_permute(ctx, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) // [768, n_tokens] cur = ggml_cont_2d(ctx, KQV_merged, n_embd, n_tokens); } // projection // [ 768, 768] - model.layers[il].c_attn_proj_w // [ 768, 1] - model.layers[il].c_attn_proj_b // [ 768, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_proj_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_attn_proj_b); } // add the input cur = ggml_add(ctx, cur, inpL); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx, inpFF, hparams.eps); // cur = ln_2_g*cur + ln_2_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_2_g), model.layers[il].ln_2_b); } // fully connected // [3072, 768] - model.layers[il].c_mlp_fc_w // [3072, 1] - model.layers[il].c_mlp_fc_b // [ 768, N] - cur (in) // [3072, N] - cur (out) // // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_fc_b); // GELU activation // [3072, N] cur = ggml_gelu(ctx, cur); // projection // [ 768, 3072] - model.layers[il].c_mlp_proj_w // [ 768, 1] - model.layers[il].c_mlp_proj_b // [3072, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_proj_b); } // input for next layer inpL = ggml_add(ctx, cur, inpFF); } // norm { // [ 768, N] inpL = ggml_norm(ctx, inpL, hparams.eps); // inpL = ln_f_g*inpL + ln_f_b // [ 768, N] inpL = ggml_add(ctx, ggml_mul(ctx, inpL, model.ln_f_g), model.ln_f_b); } // inpL = WTE * inpL // [ 768, 50257] - model.lm_head // [ 768, N] - inpL inpL = ggml_mul_mat(ctx, model.lm_head, inpL); // logits -> probs //inpL = ggml_soft_max(ctx0, inpL); ggml_build_forward_expand(gf, inpL); ggml_free(ctx); return gf; } static void gpt2_kv_cache_seq_cp( struct gpt2_kv_cache & cache, gpt2_seq_id seq_id_src, gpt2_seq_id seq_id_dst, gpt2_pos p0, gpt2_pos p1) { if (p0 < 0) p0 = 0; if (p1 < 0) p1 = std::numeric_limits::max(); for (uint32_t i = 0; i < cache.size; ++i) { if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { cache.cells[i].seq_id.insert(seq_id_dst); } } } struct gpt2_batch gpt2_batch_init(int32_t n_tokens, int32_t embd) { gpt2_batch batch; if (embd) { batch.embd = (float *) malloc(sizeof(float) * n_tokens * embd); } else { batch.token = (gpt_vocab::id *) malloc(sizeof(gpt_vocab::id) * n_tokens); } batch.pos = (gpt2_pos *) malloc(sizeof(gpt2_pos) * n_tokens); batch.seq_id = (gpt2_seq_id *) malloc(sizeof(gpt2_seq_id) * n_tokens); batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens); return batch; } void gpt2_batch_free(struct gpt2_batch batch) { if (batch.token) free(batch.token); if (batch.embd) free(batch.embd); if (batch.pos) free(batch.pos); if (batch.seq_id) free(batch.seq_id); if (batch.logits) free(batch.logits); } // Positive return values does not mean a fatal error, but rather a warning. // 0 - success // < 0 - error int gpt2_decode( struct gpt2_model & model, ggml_gallocr_t allocr, struct gpt2_batch batch, int n_threads, std::vector & logits) { const int32_t n_tokens = batch.n_tokens; const auto & hparams = model.hparams; const int n_vocab = hparams.n_vocab; if (n_tokens == 0) { printf("%s: n_tokens == 0", __func__); return -1; } GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); auto & cache = model.kv_cache; for (int i = 0; i < n_tokens; i++) { cache.cells[cache.head + i].pos = batch.pos[i]; cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i]); } cache.n = cache.head + n_tokens; struct ggml_cgraph * gf = gpt2_graph(model, batch, false); // allocate tensors ggml_gallocr_alloc_graph(allocr, gf); // set the graph inputs if (batch.token) { struct ggml_tensor * inp_tokens = ggml_graph_get_tensor(gf, "inp_tokens"); ggml_backend_tensor_set(inp_tokens, batch.token, 0, n_tokens*ggml_element_size(inp_tokens)); struct ggml_tensor * position = ggml_graph_get_tensor(gf, "position"); for (int i = 0; i < n_tokens; ++i) { int32_t v = batch.pos[i]; ggml_backend_tensor_set(position, &v, i*sizeof(int32_t), sizeof(v)); } } else { struct ggml_tensor * embd = ggml_graph_get_tensor(gf, "embd"); ggml_backend_tensor_set(embd, batch.embd, 0, n_tokens * hparams.n_embd * ggml_element_size(embd)); } { struct ggml_tensor * KQ_mask = ggml_graph_get_tensor(gf, "KQ_mask"); const auto & kv_cache = model.kv_cache; const int32_t n_tokens = batch.n_tokens; const int32_t n_kv = kv_cache.n; std::vector data_buf(n_kv*n_tokens); const float neg_inf_v = -INFINITY; for (int h = 0; h < 1; ++h) { int h_offset = h*(n_kv*n_tokens); for (int j = 0; j < n_tokens; ++j) { const gpt2_pos pos = batch.pos[j]; const gpt2_seq_id seq_id = batch.seq_id[j]; for (int i = 0; i < n_kv; ++i) { if (!kv_cache.cells[i].has_seq_id(seq_id) || kv_cache.cells[i].pos > pos) { data_buf[h_offset + j*n_kv + i] = neg_inf_v; } } } } ggml_backend_tensor_set(KQ_mask, data_buf.data(), 0, data_buf.size() * sizeof(float)); } // run the computation if (ggml_backend_is_cpu(model.backend)) { ggml_backend_cpu_set_n_threads(model.backend, n_threads); } ggml_backend_graph_compute(model.backend, gf); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); //} // in this case, the output tensor is the last one in the graph struct ggml_tensor * inpL = ggml_graph_node(gf, -1); if (batch.logits) { // return logits for all tokens logits.resize(n_vocab*n_tokens); for (int32_t i = 0; i < n_tokens; i++) { if (batch.logits[i] == 0) { continue; } ggml_backend_tensor_get(inpL, logits.data() + n_vocab*i, n_vocab*i*sizeof(float), sizeof(float)*n_vocab); } } else { // return result just for the last token logits.resize(n_vocab); ggml_backend_tensor_get(inpL, logits.data(), (n_vocab*(n_tokens-1))*sizeof(float), sizeof(float)*n_vocab); } // update the kv ring buffer cache.head += n_tokens; // ensure kv cache head points to a valid index. if (cache.head >= cache.size) { printf("%s: cache.head >= cache.size\n", __func__); return -2; } return 0; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gpt2_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_load(params.model, model, vocab, params.n_ctx, params.n_gpu_layers)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); const int n_parallel = params.n_parallel; const int n_batch_max = std::max(embd_inp.size(), (size_t)n_parallel); // create a gpt2_batch // we use this object to submit token data for decoding gpt2_batch batch = gpt2_batch_init(n_batch_max, 0); // prepare required memory and allocate the compute buffer ggml_gallocr_t allocr = NULL; { // create an allocator to measure the memory usage allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend)); // create the worst case graph for memory usage estimation batch.n_tokens = n_batch_max; struct ggml_cgraph * gf = gpt2_graph(model, batch, true); // pre-allocate the compute buffer for the worst case (optional) ggml_gallocr_reserve(allocr, gf); size_t mem_size = ggml_gallocr_get_buffer_size(allocr, 0); fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size/1024.0/1024.0); } int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // evaluate the initial prompt batch.n_tokens = embd_inp.size(); for (int32_t i = 0; i < batch.n_tokens; i++) { batch.token[i] = embd_inp[i]; batch.pos[i] = i; batch.seq_id[i] = 0; batch.logits[i] = false; } // gpt2_decode will output logits only for the last token of the prompt batch.logits[batch.n_tokens - 1] = true; if (gpt2_decode(model, allocr, batch, params.n_threads, logits) != 0) { printf("%s: gpt2_decode() failed\n", __func__); return 1; } // assign the system KV cache to all parallel sequences // this way, the parallel sequences will "reuse" the prompt tokens without having to copy them for (int32_t i = 1; i < n_parallel; ++i) { gpt2_kv_cache_seq_cp(model.kv_cache, 0, i, 0, batch.n_tokens); } if (n_parallel > 1) { printf("\n\n%s: generating %d sequences ...\n", __func__, n_parallel); } params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { printf("%d ", embd_inp[i]); } printf("\n\n"); std::vector streams(n_parallel); // remember the batch index of the last token for each parallel sequence // we need this to determine which logits to sample from std::vector i_batch(n_parallel, batch.n_tokens - 1); int n_cur = batch.n_tokens; int n_len = batch.n_tokens + params.n_predict; int n_decoded = 0; const int n_vocab = model.hparams.n_vocab; const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; while (n_cur < n_len) { batch.n_tokens = 0; for (int32_t i = 0; i < n_parallel; ++i) { if (i_batch[i] < 0) { // the stream has already finished continue; } auto * logits_i = logits.data() + i_batch[i]*n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits_i, top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // is it an end of stream? -> mark the stream as finished if ((!params.ignore_eos && id == 50256) || n_cur == n_len - 1) { i_batch[i] = -1; printf("\n"); if (n_parallel > 1) { printf("%s: stream %d finished at n_cur = %d", __func__, i, n_cur); } continue; } auto& token = vocab.id_to_token[id]; if (n_parallel == 1) { printf("%s", token.c_str()); fflush(stdout); } streams[i] += token; // push this new token for next evaluation batch.token [batch.n_tokens] = id; batch.pos [batch.n_tokens] = n_cur; batch.seq_id[batch.n_tokens] = i; batch.logits[batch.n_tokens] = true; i_batch[i] = batch.n_tokens; batch.n_tokens += 1; n_decoded += 1; } // all streams are finished if (batch.n_tokens == 0) { break; } n_cur += 1; { const int64_t t_start_us = ggml_time_us(); // evaluate the current batch with the transformer model int ret_code = gpt2_decode(model, allocr, batch, params.n_threads, logits); if (ret_code != 0) { fprintf(stderr, "%s : failed to eval, return code %d\n", __func__, ret_code); return 1; } t_predict_us += ggml_time_us() - t_start_us; } } if (n_parallel > 1) { printf("\n"); for (int32_t i = 0; i < n_parallel; ++i) { printf("sequence %d:\n\n%s%s\n\n", i, params.prompt.c_str(), streams[i].c_str()); } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: n_decoded = %8d\n", __func__, n_decoded); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms\n", __func__, t_predict_us/1000.0f); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } gpt2_batch_free(batch); ggml_free(model.ctx_w); ggml_gallocr_free(allocr); ggml_backend_buffer_free(model.buffer_w); ggml_backend_buffer_free(model.kv_cache.buffer); ggml_backend_free(model.backend); return 0; } ggml-org-ggml-3678254/examples/gpt-2/main-ctx.cpp000066400000000000000000000714431512524704700213700ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; float eps = 1e-5f; }; struct gpt2_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; struct ggml_tensor * ln_2_g; struct ggml_tensor * ln_2_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // mlp struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gpt2_model { gpt2_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // token embedding struct ggml_tensor * wpe; // position embedding struct ggml_tensor * lm_head; // language model head std::vector layers; // key + value memory struct ggml_tensor * memory_k; struct ggml_tensor * memory_v; // struct ggml_context * ctx_w; std::map tensors; }; // load the model's weights from a file bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab) { printf("%s: loading model from '%s'\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx_w; size_t ctx_size = 0; { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b ctx_size += ggml_row_size(wtype, n_vocab*n_embd); // wte ctx_size += ggml_row_size(GGML_TYPE_F32, n_ctx*n_embd); // wpe ctx_size += ggml_row_size(wtype, n_vocab*n_embd); // lm_head ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_g ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_b ctx_size += n_layer*(ggml_row_size(wtype, 3*n_embd*n_embd)); // c_attn_attn_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 3*n_embd)); // c_attn_attn_b ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_attn_proj_b ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_proj_b ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_k ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_v ctx_size += (6 + 12*n_layer)*512; // object overhead printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor)); printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } // create the ggml context { struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; model.ctx_w = ggml_init(params); if (!model.ctx_w) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // prepare memory for the weights { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); // map by name model.tensors["model/ln_f/g"] = model.ln_f_g; model.tensors["model/ln_f/b"] = model.ln_f_b; model.tensors["model/wte"] = model.wte; model.tensors["model/wpe"] = model.wpe; model.tensors["model/lm_head"] = model.lm_head; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; } } // key + value memory { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights { size_t total_size = 0; bool has_lm_head = false; while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); // GPT-2 models share the WTE tensor as the LM head if (name == "model/wte" && has_lm_head == false) { memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor)); } if (name == "model/lm_head") { has_lm_head = true; } total_size += ggml_nbytes(tensor); } printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); } fin.close(); return true; } // evaluate the transformer // // - model: the model // - n_threads: number of threads to use // - n_past: the context size so far // - embd_inp: the embeddings of the tokens in the context // - embd_w: the predicted logits for the next token // bool gpt2_eval( const gpt2_model & model, const int n_threads, const int n_past, const std::vector & embd_inp, std::vector & embd_w, size_t & mem_per_token) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; const int n_vocab = hparams.n_vocab; static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N > buf_size) { const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate buf_size = buf_size_new; buf = realloc(buf, buf_size); if (buf == nullptr) { fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); return false; } } struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf, /*.no_alloc =*/ false, }; struct ggml_context * ctx0 = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); for (int i = 0; i < N; ++i) { ((int32_t *) position->data)[i] = n_past + i; } // wte + wpe struct ggml_tensor * inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.wte, embd), ggml_get_rows(ctx0, model.wpe, position)); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { // [ 768, N] cur = ggml_norm(ctx0, inpL, hparams.eps); // cur = ln_1_g*cur + ln_1_b // [ 768, N] cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); } // attn // [2304, 768] - model.layers[il].c_attn_attn_w // [2304, 1] - model.layers[il].c_attn_attn_b // [ 768, N] - cur (in) // [2304, N] - cur (out) // // cur = attn_w*cur + attn_b // [2304, N] { cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur), cur); } // self-attention { struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); // store key and value to memory if (N >= 1) { struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) // [64, N, 12] struct ggml_tensor * Q = ggml_permute(ctx0, ggml_cpy(ctx0, Qcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)), 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) // [64, n_past + N, 12] struct ggml_tensor * K = ggml_permute(ctx0, ggml_reshape_3d(ctx0, ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); // GG: flash attention //struct ggml_tensor * V = // ggml_cpy(ctx0, // ggml_permute(ctx0, // ggml_reshape_3d(ctx0, // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), // n_embd/n_head, n_head, n_past + N), // 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); // K * Q // [n_past + N, N, 12] struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, 1.0f/sqrt(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] struct ggml_tensor * V_trans = ggml_cpy(ctx0, ggml_permute(ctx0, ggml_reshape_3d(ctx0, ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), n_embd/n_head, n_head, n_past + N), 1, 2, 0, 3), ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head)); // KQV = transpose(V) * KQ_soft_max // [64, N, 12] struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) // [64, 12, N] struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) // [768, N] cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); } // projection // [ 768, 768] - model.layers[il].c_attn_proj_w // [ 768, 1] - model.layers[il].c_attn_proj_b // [ 768, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] { cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_proj_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur); } // add the input cur = ggml_add(ctx0, cur, inpL); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx0, inpFF, hparams.eps); // cur = ln_2_g*cur + ln_2_b // [ 768, N] cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_2_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_2_b, cur)); } // fully connected // [3072, 768] - model.layers[il].c_mlp_fc_w // [3072, 1] - model.layers[il].c_mlp_fc_b // [ 768, N] - cur (in) // [3072, N] - cur (out) // // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), cur); // GELU activation // [3072, N] cur = ggml_gelu(ctx0, cur); // projection // [ 768, 3072] - model.layers[il].c_mlp_proj_w // [ 768, 1] - model.layers[il].c_mlp_proj_b // [3072, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), cur); } // input for next layer inpL = ggml_add(ctx0, cur, inpFF); } // norm { // [ 768, N] inpL = ggml_norm(ctx0, inpL, hparams.eps); // inpL = ln_f_g*inpL + ln_f_b // [ 768, N] inpL = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.ln_f_g, inpL), inpL), ggml_repeat(ctx0, model.ln_f_b, inpL)); } // inpL = WTE * inpL // [ 768, 50257] - model.lm_head // [ 768, N] - inpL inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); // logits -> probs //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(gf, inpL); ggml_graph_compute_with_ctx(ctx0, gf, n_threads); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); //} //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); // return result just for the last token embd_w.resize(n_vocab); memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); if (mem_per_token == 0) { mem_per_token = ggml_used_mem(ctx0)/N; } //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); ggml_free(ctx0); return true; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; params.model = "models/gpt-2-117M/ggml-model.bin"; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gpt2_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_load(params.model, model, vocab)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { printf("%d ", embd_inp[i]); } printf("\n\n"); // submit the input prompt token-by-token // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning std::vector embd; // determine the required inference memory per token: size_t mem_per_token = 0; gpt2_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { // predict if (embd.size() > 0) { const int64_t t_start_us = ggml_time_us(); if (!gpt2_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { printf("Failed to predict\n"); return 1; } t_predict_us += ggml_time_us() - t_start_us; } n_past += embd.size(); embd.clear(); if (i >= embd_inp.size()) { // sample next token const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const int n_vocab = model.hparams.n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // add it to the context embd.push_back(id); } else { // if here, it means we are still processing the input prompt for (size_t k = i; k < embd_inp.size(); k++) { embd.push_back(embd_inp[k]); if (int32_t(embd.size()) >= params.n_batch) { break; } } i += embd.size() - 1; } // display text for (auto id : embd) { printf("%s", vocab.id_to_token[id].c_str()); } fflush(stdout); // end of text token if (embd.back() == 50256) { break; } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx_w); return 0; } ggml-org-ggml-3678254/examples/gpt-2/main-sched.cpp000066400000000000000000001132361512524704700216550ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #ifdef GGML_USE_CUDA #include "ggml-cuda.h" #endif #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif #ifdef GGML_USE_BLAS #include "ggml-blas.h" #endif #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif #define GPT2_MAX_NODES 4096 static void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); fflush(stderr); } // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; float eps = 1e-5f; }; struct gpt2_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; struct ggml_tensor * ln_2_g; struct ggml_tensor * ln_2_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // mlp struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gpt2_model { gpt2_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // tkoen embedding struct ggml_tensor * wpe; // position embedding struct ggml_tensor * lm_head; // language model head std::vector layers; // key + value memory struct ggml_tensor * memory_k; struct ggml_tensor * memory_v; // struct ggml_context * ctx_w; std::vector backends; std::vector buffers_w; ggml_backend_buffer_t buffer_kv; ggml_backend_buffer_t buffer_input; std::map tensors; // inputs/constants struct ggml_tensor * embd; struct ggml_tensor * position; }; void init_backends(gpt2_model & model, const gpt_params & params) { ggml_backend_t gpu_backend = NULL; ggml_log_set(ggml_log_callback_default, nullptr); // initialize the backends #ifdef GGML_USE_CUDA if (params.n_gpu_layers > 0) { fprintf(stderr, "%s: using CUDA backend\n", __func__); gpu_backend = ggml_backend_cuda_init(0); if (!gpu_backend) { fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); } } #endif #ifdef GGML_USE_METAL if (params.n_gpu_layers > 0) { fprintf(stderr, "%s: using Metal backend\n", __func__); gpu_backend = ggml_backend_metal_init(); if (!gpu_backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); } } #endif if (gpu_backend) { model.backends.push_back(gpu_backend); } #ifdef GGML_USE_BLAS ggml_backend_t blas_backend = ggml_backend_blas_init(); if (!blas_backend) { fprintf(stderr, "%s: failed to initialize BLAS backend\n", __func__); } else { ggml_backend_blas_set_n_threads(blas_backend, params.n_threads); model.backends.push_back(blas_backend); } #endif // always add the CPU backend as a fallback ggml_backend_t cpu_backend = ggml_backend_cpu_init(); ggml_backend_cpu_set_n_threads(cpu_backend, params.n_threads); model.backends.push_back(cpu_backend); } // load the model's weights from a file bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab, const gpt_params & params) { printf("%s: loading model from '%s'\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx_w; // create the ggml context { size_t n_tensors = 3 /* input */ + 2 /* kv */ + 6 + 12*model.hparams.n_layer; struct ggml_init_params params = { /*.mem_size =*/ ggml_tensor_overhead() * n_tensors, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; model.ctx_w = ggml_init(params); if (!model.ctx_w) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // create tensors for the weights { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); // map by name model.tensors["model/ln_f/g"] = model.ln_f_g; model.tensors["model/ln_f/b"] = model.ln_f_b; model.tensors["model/wte"] = model.wte; model.tensors["model/wpe"] = model.wpe; model.tensors["model/lm_head"] = model.lm_head; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; } } // assign tensors to backends init_backends(model, params); ggml_backend_t backend_gpu = model.backends.front(); ggml_backend_t backend_cpu = model.backends.back(); std::map tensor_backends; { const int i_gpu_first_layer = model.hparams.n_layer - params.n_gpu_layers; for (auto it : model.tensors) { const std::string & name = it.first; // input tensors if (name == "model/wte" || name == "model/wpe") { if (params.n_gpu_layers > model.hparams.n_layer) { tensor_backends[name] = backend_gpu; } else { tensor_backends[name] = backend_cpu; } } // output tensors if (name == "model/ln_f/g" || name == "model/ln_f/b" || name == "model/lm_head") { if (params.n_gpu_layers > 0) { tensor_backends[name] = backend_gpu; } else { tensor_backends[name] = backend_cpu; } } // layer tensors if (name.substr(0, 7) == "model/h") { // parse layer number int layer = std::stoi(name.substr(7, 2)); if (layer >= i_gpu_first_layer) { tensor_backends[name] = backend_gpu; } else { tensor_backends[name] = backend_cpu; } } } } // allocate buffers std::map backend_buffers; for (auto backend : model.backends) { // compute the size of the buffer size_t size = 0; for (auto it : model.tensors) { if (tensor_backends[it.first] == backend) { size += ggml_nbytes(it.second) + 512; } } if (size > 0) { printf("%s: %8s buffer size = %8.2f MB\n", __func__, ggml_backend_name(backend), size/1024.0/1024.0); // allocate the buffer ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, size); ggml_backend_buffer_set_usage(buffer, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); model.buffers_w.push_back(buffer); // create an allocator for the buffer to allocate the tensors auto alloc = ggml_tallocr_new(buffer); backend_buffers.insert(std::make_pair(backend, std::move(alloc))); } else { model.buffers_w.push_back(NULL); } } // allocate key + value memory { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); ggml_set_name(model.memory_k, "model/memory_k"); ggml_set_name(model.memory_v, "model/memory_v"); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); // create a backend buffer (can be in host or device memory) ggml_backend_t backend_kv = params.n_gpu_layers >= hparams.n_layer/2 ? backend_gpu : backend_cpu; printf("%s: backend_kv = %s\n", __func__, ggml_backend_name(backend_kv)); model.buffer_kv = ggml_backend_alloc_buffer(backend_kv, memory_size + 512*2); // allocate the tensors into the backend buffer { ggml_tallocr alloc = ggml_tallocr_new(model.buffer_kv); // this updates the pointers in the tensors to point to the correct location in the buffer // this is necessary since the ggml_context is .no_alloc == true // note that the buffer can actually be a device buffer, depending on the backend ggml_tallocr_alloc(&alloc, model.memory_k); ggml_tallocr_alloc(&alloc, model.memory_v); } } // load weights { size_t total_size = 0; bool has_lm_head = false; std::vector read_buf; while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; ggml_set_name(tensor, name.c_str()); if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } // allocate the tensor ggml_backend_t backend = tensor_backends[name]; ggml_tallocr * alloc = &backend_buffers.find(backend)->second; ggml_tallocr_alloc(alloc, tensor); //printf("%s: [%5.5s] %s\n", __func__, ggml_backend_name(backend), name.c_str()); if (ggml_backend_is_cpu(backend) #ifdef GGML_USE_METAL || ggml_backend_is_metal(backend) #endif ) { // for the CPU and Metal backend, we can read directly into the tensor fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); } else { // read into a temporary buffer first, then copy to device memory read_buf.resize(ggml_nbytes(tensor)); fin.read(read_buf.data(), ggml_nbytes(tensor)); ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor)); } // GPT-2 models share the WTE tensor as the LM head if (name == "model/wte" && has_lm_head == false) { ggml_tallocr * alloc_head = &backend_buffers.find(tensor_backends["model/lm_head"])->second; ggml_tallocr_alloc(alloc_head, model.lm_head); //printf("%s: [%5.5s] %s (copied)\n", __func__, ggml_backend_name(tensor_backends["model/lm_head"]), "model/lm_head"); ggml_backend_tensor_copy(tensor, model.lm_head); total_size += ggml_nbytes(model.lm_head); } if (name == "model/lm_head") { has_lm_head = true; } total_size += ggml_nbytes(tensor); } printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); } fin.close(); // allocate input tensors { model.embd = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, model.hparams.n_ctx); model.position = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, model.hparams.n_ctx); ggml_set_name(model.embd, "in/embd"); ggml_set_name(model.position, "in/position"); // add input tensors to cpu backend size_t input_size = ggml_nbytes(model.embd) + ggml_nbytes(model.position); // FIXME: use cpu backend after sched impl ggml_backend_t backend_input = params.n_gpu_layers >= model.hparams.n_layer ? backend_gpu : backend_cpu; model.buffer_input = ggml_backend_alloc_buffer(backend_input, input_size + 512*3); printf("%s: backend_in = %s (%zu bytes)\n", __func__, ggml_backend_name(backend_input), input_size); // allocate the tensors into the backend buffer ggml_tallocr alloc = ggml_tallocr_new(model.buffer_input); ggml_tallocr_alloc(&alloc, model.embd); ggml_tallocr_alloc(&alloc, model.position); } return true; } // build the computation graph struct ggml_cgraph * gpt2_graph( const gpt2_model & model, const int n_past, const std::vector & embd_inp) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; // since we are using ggml-alloc, this buffer only needs enough space to hold the ggml_tensor and ggml_cgraph structs, but not the tensor data static size_t buf_size = ggml_tensor_overhead()*GPT2_MAX_NODES + ggml_graph_overhead_custom(GPT2_MAX_NODES, false); static std::vector buf(buf_size); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf.data(), /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() }; struct ggml_context * ctx = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, GPT2_MAX_NODES, false); struct ggml_tensor * embd = ggml_view_1d(ctx, model.embd, N, 0); // set inputs // TODO: move to gpt2_eval ggml_backend_tensor_set(model.embd, embd_inp.data(), 0, N*ggml_element_size(embd)); struct ggml_tensor * position = ggml_view_1d(ctx, model.position, N, 0); for (int i = 0; i < N; ++i) { int32_t v = n_past + i; ggml_backend_tensor_set(model.position, &v, i*sizeof(int32_t), sizeof(v)); } const float KQ_scale = 1.0f/sqrtf(float(model.hparams.n_embd)/model.hparams.n_head); // wte + wpe struct ggml_tensor * inpL = ggml_add(ctx, ggml_get_rows(ctx, model.wte, embd), ggml_get_rows(ctx, model.wpe, position)); ggml_set_name(inpL, "inpL"); ggml_set_name(inpL->src[0], "wte"); ggml_set_name(inpL->src[1], "wpe"); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { // [ 768, N] cur = ggml_norm(ctx, inpL, hparams.eps); ggml_format_name(cur, "l%d.norm", il); // cur = ln_1_g*cur + ln_1_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_1_g), model.layers[il].ln_1_b); ggml_format_name(cur, "l%d.ln_1_b", il); ggml_format_name(cur->src[0], "l%d.ln_1_g", il); } // attn // [2304, 768] - model.layers[il].c_attn_attn_w // [2304, 1] - model.layers[il].c_attn_attn_b // [ 768, N] - cur (in) // [2304, N] - cur (out) // // cur = attn_w*cur + attn_b // [2304, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_attn_w, cur); ggml_format_name(cur, "l%d.attn_w", il); cur = ggml_add(ctx, cur, model.layers[il].c_attn_attn_b); ggml_format_name(cur, "l%d.attn_b", il); } // self-attention { struct ggml_tensor * Qcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); struct ggml_tensor * Kcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); struct ggml_tensor * Vcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); ggml_format_name(Qcur, "l%d.Qcur", il); ggml_format_name(Kcur, "l%d.Kcur", il); ggml_format_name(Vcur, "l%d.Vcur", il); // store key and value to memory if (N >= 1) { struct ggml_tensor * k = ggml_view_1d(ctx, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_1d(ctx, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) // [64, N, 12] struct ggml_tensor * Q = ggml_permute(ctx, ggml_cont_3d(ctx, Qcur, n_embd/n_head, n_head, N), 0, 2, 1, 3); ggml_format_name(Q, "l%d.Q", il); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) // [64, n_past + N, 12] struct ggml_tensor * K = ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); ggml_format_name(K, "l%d.K", il); // GG: flash attention //struct ggml_tensor * V = // ggml_cpy(ctx0, // ggml_permute(ctx0, // ggml_reshape_3d(ctx0, // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), // n_embd/n_head, n_head, n_past + N), // 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); // K * Q // [n_past + N, N, 12] struct ggml_tensor * KQ = ggml_mul_mat(ctx, K, Q); ggml_format_name(KQ, "l%d.KQ", il); // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = ggml_scale(ctx, KQ, KQ_scale); ggml_format_name(KQ_scaled, "l%d.KQ_scaled", il); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx, KQ_scaled, n_past); ggml_format_name(KQ_masked, "l%d.KQ_masked", il); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx, KQ_masked); ggml_format_name(KQ_soft_max, "l%d.KQ_soft_max", il); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] struct ggml_tensor * V_trans = ggml_cont_3d(ctx, ggml_permute(ctx, ggml_reshape_3d(ctx, ggml_view_1d(ctx, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), n_embd/n_head, n_head, n_past + N), 1, 2, 0, 3), n_past + N, n_embd/n_head, n_head); // KQV = transpose(V) * KQ_soft_max // [64, N, 12] struct ggml_tensor * KQV = ggml_mul_mat(ctx, V_trans, KQ_soft_max); ggml_format_name(KQV, "l%d.KQV", il); // KQV_merged = KQV.permute(0, 2, 1, 3) // [64, 12, N] struct ggml_tensor * KQV_merged = ggml_permute(ctx, KQV, 0, 2, 1, 3); ggml_format_name(KQV_merged, "l%d.KQV_merged", il); // cur = KQV_merged.contiguous().view(n_embd, N) // [768, N] cur = ggml_cont_2d(ctx, KQV_merged, n_embd, N); ggml_format_name(cur, "l%d.KQV_merged_contiguous", il); } // projection // [ 768, 768] - model.layers[il].c_attn_proj_w // [ 768, 1] - model.layers[il].c_attn_proj_b // [ 768, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] { cur = ggml_mul_mat(ctx, model.layers[il].c_attn_proj_w, cur); ggml_format_name(cur, "l%d.attn_proj_w", il); cur = ggml_add(ctx, cur, model.layers[il].c_attn_proj_b); ggml_format_name(cur, "l%d.attn_proj_b", il); } // add the input cur = ggml_add(ctx, cur, inpL); ggml_format_name(cur, "l%d.add", il); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx, inpFF, hparams.eps); ggml_format_name(cur, "l%d.FFnorm", il); // cur = ln_2_g*cur + ln_2_b // [ 768, N] cur = ggml_add(ctx, ggml_mul(ctx, cur, model.layers[il].ln_2_g), model.layers[il].ln_2_b); ggml_format_name(cur, "l%d.ln_2_b", il); ggml_format_name(cur->src[0], "l%d.ln_2_g", il); } // fully connected // [3072, 768] - model.layers[il].c_mlp_fc_w // [3072, 1] - model.layers[il].c_mlp_fc_b // [ 768, N] - cur (in) // [3072, N] - cur (out) // // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_fc_w, cur); ggml_format_name(cur, "l%d.mlp_fc_w", il); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_fc_b); ggml_format_name(cur, "l%d.mlp_fc_b", il); // GELU activation // [3072, N] cur = ggml_gelu(ctx, cur); ggml_format_name(cur, "l%d.gelu", il); // projection // [ 768, 3072] - model.layers[il].c_mlp_proj_w // [ 768, 1] - model.layers[il].c_mlp_proj_b // [3072, N] - cur (in) // [ 768, N] - cur (out) // // cur = proj_w*cur + proj_b // [768, N] cur = ggml_mul_mat(ctx, model.layers[il].c_mlp_proj_w, cur); ggml_format_name(cur, "l%d.mlp_proj_w", il); cur = ggml_add(ctx, cur, model.layers[il].c_mlp_proj_b); ggml_format_name(cur, "l%d.mlp_proj_b", il); } // input for next layer inpL = ggml_add(ctx, cur, inpFF); ggml_format_name(inpL, "l%d.add2", il); } // norm { // [ 768, N] inpL = ggml_norm(ctx, inpL, hparams.eps); ggml_format_name(inpL, "out_norm"); // inpL = ln_f_g*inpL + ln_f_b // [ 768, N] inpL = ggml_add(ctx, ggml_mul(ctx, inpL, model.ln_f_g), model.ln_f_b); ggml_format_name(inpL, "out_ln_f_b"); ggml_format_name(inpL->src[0], "out_ln_f_g"); } // inpL = WTE * inpL // [ 768, 50257] - model.lm_head // [ 768, N] - inpL inpL = ggml_mul_mat(ctx, model.lm_head, inpL); ggml_format_name(inpL, "out_lm_head"); // logits -> probs //inpL = ggml_soft_max(ctx0, inpL); ggml_build_forward_expand(gf, inpL); ggml_free(ctx); return gf; } // evaluate the transformer // // - model: the model // - sched: the backend scheduler // - n_past: the context size so far // - embd_inp: the embeddings of the tokens in the context // - embd_w: the predicted logits for the next token // bool gpt2_eval( const gpt2_model & model, ggml_backend_sched_t sched, const int n_past, const std::vector & embd_inp, std::vector & embd_w) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_vocab = hparams.n_vocab; struct ggml_cgraph * gf = gpt2_graph(model, n_past, embd_inp); // run the computation ggml_backend_sched_reset(sched); ggml_backend_sched_graph_compute(sched, gf); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); //} // in this case, the output tensor is the last one in the graph struct ggml_tensor * inpL = ggml_graph_node(gf, -1); //embd_w.resize(n_vocab*N); //ggml_backend_tensor_get(inpL, embd_w.data(), 0, sizeof(float)*n_vocab*N); // return result just for the last token embd_w.resize(n_vocab); ggml_backend_tensor_get(inpL, embd_w.data(), (n_vocab*(N-1))*sizeof(float), sizeof(float)*n_vocab); return true; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; params.model = "models/gpt-2-117M/ggml-model.bin"; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gpt2_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_load(params.model, model, vocab, params)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } // create the backend scheduler // the scheduler handles the allocation of the compute buffers and the scheduling of the computation between the different backends ggml_backend_sched_t sched; { // initialize the scheduler sched = ggml_backend_sched_new(model.backends.data(), NULL, model.backends.size(), GPT2_MAX_NODES, false, true); // create the worst case graph for memory usage estimation int n_tokens = std::min(model.hparams.n_ctx, params.n_batch); int n_past = model.hparams.n_ctx - n_tokens; struct ggml_cgraph * gf = gpt2_graph(model, n_past, std::vector(n_tokens, 0)); ggml_backend_sched_reserve(sched, gf); // compute the required memory size_t mem_size = 0; for (size_t i = 0; i < model.backends.size(); i++) { size_t size = ggml_backend_sched_get_buffer_size(sched, model.backends[i]); if (size > 0) { mem_size += size; printf("%s: %8s compute buffer size = %8.2f MB\n", __func__, ggml_backend_name(model.backends[i]), size/1024.0/1024.0); //printf("%s: %8s compute buffer size = %zu bytes\n", __func__, ggml_backend_name(model.backends[i]), size); } } printf("%s: total compute buffer size: %.2f MB\n", __func__, mem_size/1024.0/1024.0); } int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { printf("%d ", embd_inp[i]); } printf("\n\n"); // submit the input prompt token-by-token // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning std::vector embd; for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { // predict if (embd.size() > 0) { const int64_t t_start_us = ggml_time_us(); if (!gpt2_eval(model, sched, n_past, embd, logits)) { printf("Failed to predict\n"); return 1; } t_predict_us += ggml_time_us() - t_start_us; } n_past += embd.size(); embd.clear(); if (i >= embd_inp.size()) { // sample next token const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const int n_vocab = model.hparams.n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // add it to the context embd.push_back(id); } else { // if here, it means we are still processing the input prompt for (size_t k = i; k < embd_inp.size(); k++) { embd.push_back(embd_inp[k]); if (int32_t(embd.size()) >= params.n_batch) { break; } } i += embd.size() - 1; } // display text for (auto id : embd) { printf("%s", vocab.id_to_token[id].c_str()); } fflush(stdout); // end of text token if (embd.back() == 50256) { break; } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx_w); ggml_backend_sched_free(sched); ggml_backend_buffer_free(model.buffer_kv); for (auto buf : model.buffers_w) { ggml_backend_buffer_free(buf); } for (auto backend : model.backends) { ggml_backend_free(backend); } return 0; } ggml-org-ggml-3678254/examples/gpt-2/quantize.cpp000066400000000000000000000133361512524704700215050ustar00rootroot00000000000000#include "ggml.h" #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #include // default hparams (GPT-2 117M) struct gpt2_hparams { int32_t n_vocab = 50257; int32_t n_ctx = 1024; int32_t n_embd = 768; int32_t n_head = 12; int32_t n_layer = 12; int32_t ftype = 1; }; // quantize a model bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); auto finp = std::ifstream(fname_inp, std::ios::binary); if (!finp) { fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); return false; } auto fout = std::ofstream(fname_out, std::ios::binary); if (!fout) { fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); return false; } // verify magic { uint32_t magic; finp.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); return false; } fout.write((char *) &magic, sizeof(magic)); } gpt2_hparams hparams; // load hparams { finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); finp.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR; const int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype (src) = %d\n", __func__, hparams.ftype); printf("%s: qntvr (src) = %d\n", __func__, qntvr_src); printf("%s: ftype (dst) = %d\n", __func__, ftype_dst); printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION); fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &ftype_dst, sizeof(ftype_dst)); } // load vocab { int32_t n_vocab = 0; finp.read ((char *) &n_vocab, sizeof(n_vocab)); fout.write((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); return false; } std::string word; for (int i = 0; i < n_vocab; i++) { uint32_t len; finp.read ((char *) &len, sizeof(len)); fout.write((char *) &len, sizeof(len)); word.resize(len); finp.read ((char *) word.data(), len); fout.write((char *) word.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // regexes of tensor names to be quantized const std::vector to_quant = { "model/wte", "model/lm_head", "model/h.*/attn/c_attn/w", "model/h.*/attn/c_proj/w", "model/h.*/mlp/c_fc/w", "model/h.*/mlp/c_proj/w", }; if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) { fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); return false; } finp.close(); fout.close(); return true; } // usage: // ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type // int main(int argc, char ** argv) { if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); ggml_print_ftypes(stderr); return 1; } // needed to initialize f16 tables { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; const ggml_ftype ftype = ggml_parse_ftype(argv[3]); const int64_t t_main_start_us = ggml_time_us(); int64_t t_quantize_us = 0; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gpt2_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } t_quantize_us = ggml_time_us() - t_start_us; } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n"); printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } return 0; } ggml-org-ggml-3678254/examples/gpt-j/000077500000000000000000000000001512524704700172235ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/gpt-j/CMakeLists.txt000066400000000000000000000004671512524704700217720ustar00rootroot00000000000000# # gpt-j set(TEST_TARGET gpt-j) add_executable(${TEST_TARGET} main.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) # # gpt-j-quantize set(TEST_TARGET gpt-j-quantize) add_executable(${TEST_TARGET} quantize.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) ggml-org-ggml-3678254/examples/gpt-j/README.md000066400000000000000000000253151512524704700205100ustar00rootroot00000000000000# gpt-j Local GPT-J inference on your computer using C/C++ No video card required. You just need to have 16 GB of RAM. ## Motivation The GPT-J 6B model is the open-source alternative to OpenAI's GPT-3. It's basically a neural network that allows you to generate coherent, human-like text given a certain context (prompt). The GPT-J model is quite big - the compact version of the model uses 16-bit floating point representation of the weights and is still 12 GB big. This means that in order to run inference on your computer, you would need to have a video card with at least 12 GB of video RAM. Alternatively, you can try to run the python implementations on the CPU, but that would probably not be very efficient as they are primarily optimized for running on a GPU (or at least this is my guess - I don't have much experience with python). I wanted to try and run the model on my MacBook, so I decided to implement the model inference from scratch using my own custom build tensor library. The tensor library (called [ggml](https://github.com/ggerganov/ggml), written in C) is in early development stage, but it already allows me to run the GPT-J model. On my 32GB MacBook M1 Pro, I achieve an inference speed of about `125 ms/token` or about ~6 words per second (1 word typically consists of 1 or 2 tokens). Here is a sample run with prompt `int main(int argc, char ** argv) {`: ```bash $ time ./bin/gpt-j -p "int main(int argc, char ** argv) {" gptj_model_load: loading model from 'models/gpt-j-6B/ggml-model.bin' - please wait ... gptj_model_load: n_vocab = 50400 gptj_model_load: n_ctx = 2048 gptj_model_load: n_embd = 4096 gptj_model_load: n_head = 16 gptj_model_load: n_layer = 28 gptj_model_load: n_rot = 64 gptj_model_load: f16 = 1 gptj_model_load: ggml ctx size = 13334.86 MB gptj_model_load: memory_size = 1792.00 MB, n_mem = 57344 gptj_model_load: ................................... done gptj_model_load: model size = 11542.79 MB / num tensors = 285 main: number of tokens in prompt = 13 int main(int argc, char ** argv) { (void)argc; (void)argv; { struct sockaddr_in addr; int addrlen; char * ip = "192.168.1.4"; int i; if ( (addrlen = sizeof(addr)) == -1 ) return -1; for (i = 0; i < 10; ++i) { addr.sin_family = AF_INET; addr.sin_addr.s_addr = inet_addr(ip); main: mem per token = 16430420 bytes main: load time = 6211.48 ms main: sample time = 13.74 ms main: predict time = 26420.34 ms / 124.62 ms per token main: total time = 33035.37 ms real 0m33.171s user 3m32.269s sys 0m3.686s $ ``` It took ~6.2 seconds to load the model to memory. After that, it took ~26.4 seconds to generate 200 tokens of what looks like to be the beginning of a networking program in C. Pretty cool! Here is another run, just for fun: ```bash time ./bin/gpt-j -n 500 -t 8 -p "Ask HN: Inherited the worst code and tech team I have ever seen. How to fix it? " gptj_model_load: loading model from 'models/gpt-j-6B/ggml-model.bin' - please wait ... gptj_model_load: n_vocab = 50400 gptj_model_load: n_ctx = 2048 gptj_model_load: n_embd = 4096 gptj_model_load: n_head = 16 gptj_model_load: n_layer = 28 gptj_model_load: n_rot = 64 gptj_model_load: f16 = 1 gptj_model_load: ggml ctx size = 13334.86 MB gptj_model_load: memory_size = 1792.00 MB, n_mem = 57344 gptj_model_load: ................................... done gptj_model_load: model size = 11542.79 MB / num tensors = 285 main: number of tokens in prompt = 24 Ask HN: Inherited the worst code and tech team I have ever seen. How to fix it? I've inherited a team with some very strange and un-documented practices, one of them is that they use an old custom application with a very slow tech stack written in Python that the team doesn't want to touch but also doesn't want to throw away as it has some "legacy" code in it. The problem is, the tech stack is very very slow. They have a single web server on a VM that is slow. The server is a little bit busy (not very busy though) and they have a lot of processes (30+ that are constantly being spawned by the application) They have an application that is single threaded and was written in Python and the team don't want to touch this, and the application is very slow. My task as a new member of the team is to fix this. I'm a senior dev on the team (3 years on the project) and have been told that I will take the lead on this task. I know next to nothing about Python. So here is what I have so far. What I have done is I've been trying to debug the processes with the "ps" command. This way I can see what is running and where. From what I see, the application spawns 10 processes a minute and some of them are used for nothing. I have also started to look for the code. The application source is not in GitHub or any other repository, it is only on our internal GitLab. What I've found so far: The application uses a custom SQLAlchemy implementation to interact with the data. I've looked at the source, it looks like an object cache or something like that. But from what I've seen, the cache gets full every 20 minutes and then gets cleared with a special command. Another strange thing is that the application creates a file for every entry in the database (even if the entry already exists). I've looked at the file to see if it contains something, but it seems to be a JSON file with lots of records. The other strange thing is that I can only find the database tables in the GitLab repository and not the code. So I can't really understand how the application is supposed to interact with the database. I also found a "log" directory, but the code is encrypted with AES. From what I've found, it is in main: mem per token = 16430420 bytes main: load time = 3900.10 ms main: sample time = 32.58 ms main: predict time = 68049.91 ms / 130.11 ms per token main: total time = 73020.05 ms real 1m13.156s user 9m1.328s sys. 0m7.103s ``` ## Implementation details The high level implementation of the model is contained in the [main.cpp](main.cpp) file. The core computations are performed by the [ggml](https://github.com/ggerganov/ggml/blob/master/include/ggml.h) library. #### Matrix multiplication The most performance critical part of the implementation is of course the matrix multiplication routine. 99% of the time is spent here, so it was important to optimize this as much as possible. On Arm64, I utilize the 128-bit NEON intrinsics for 16-bit floating point operations: https://github.com/ggerganov/ggml/blob/fb558f78d905f85c54813602649ddd628ffe0f3a/src/ggml.c#L187-L243 These instructions allow each core to operate simultaneously on 64 16-bit floats. I'm no expert in SIMD, but after quite some trials this was the most efficient code for dot product of a row and column that I could come up with. Combined with the parallel computation on 8 CPU threads, I believe I'm close to the maximum performance that one could possibly get on the M1 CPU. Still, I'm curious to know if there is a more efficient way to implement this. #### Attempt to use the M1 GPU One interesting property of the GPT-J transformer architecture is that it allows you to perform part of the inference in parallel - i.e. the Feed-forward network can be computed in parallel to the Self-attention layer: https://github.com/ggerganov/ggml/blob/fb558f78d905f85c54813602649ddd628ffe0f3a/examples/gpt-j/main.cpp#L507-L531 So I thought why not try and bring in the M1 GPU to compute half of the neural network in parallel to the CPU and potentially gain some extra performance. Thanks to the M1's shared memory model, it was relatively easy to offload part of the computation to the GPU using Apple's [Metal Performance Shaders](https://developer.apple.com/documentation/metalperformanceshaders). The GPU shares the host memory, so there is no need to copy the data back and forth as you would normally do with Cuda or OpenCL. The weight matrices are directly available to be used by the GPU. However, to my surprise, using MPS together with the CPU did not lead to any performance improvement at all. My conclusion was that the 8-thread NEON CPU computation is already saturating the memory bandwidth of the M1 and since the CPU and the GPU on the MacBook are sharing that bandwidth, it does not help to offload the computation to the GPU. Another observation was that the MPS GPU matrix multiplication using 16-bit floats had the same performance as the 8-thread NEON CPU implementation. Again, I explain this with a saturated memory channel. But of course, my explanation could be totally wrong and somehow the implementation wasn't utilizing the resources correctly. In the end, I decided to not use MPS or the GPU all together. ### Zero memory allocations Another property of my implementation is that it does not perform any memory allocations once the model is loaded into memory. All required memory is allocated at the start of the program with a single `malloc` (technically 2 calls, but that is not important). ## Usage If you want to give this a try and you are on Linux or Mac OS, simply follow these instructions: ```bash # Download the ggml-compatible GPT-J 6B model (requires 12GB disk space) ../examples/gpt-j/download-ggml-model.sh 6B # Run the inference (requires 16GB of CPU RAM) ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin -p "This is an example" # Input prompt through pipe and run the inference. echo "This is an example" > prompt.txt cat prompt.txt | ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin ``` To run the `gpt-j` tool, you need the 12GB `ggml-model.bin` file which contains the GPT-J model in [ggml](https://github.com/ggerganov/ggml) compatible format. In the instructions above, the binary file is downloaded from my repository on Hugging Face using the [download-ggml-model.sh](download-ggml-model.sh) script. You can also, download the file manually from this link: https://huggingface.co/ggerganov/ggml/tree/main --- Alternatively, if you don't want to download the 12GB ggml model file, you can perform the conversion yourself using python. First, you need to download the full GPT-J model from here: https://huggingface.co/EleutherAI/gpt-j-6B Note that the full model is quite big - about 72 GB. After you download it, you need to convert it to ggml format using the [convert-h5-to-ggml.py](convert-h5-to-ggml.py) script. This will generate the `ggml-model.bin` file, which you can then use with the `gpt-j` program. ## GPT-2 I also implemented a tool for CPU inference using the smaller GPT-2 models. They have worse quality compared to GPT-J, but are much faster to execute. For example, the Small GPT-2 model is only 240 MB big and the inference speed on my MacBook is about 200 tokens/sec. For more details, checkout the GPT-2 example here: [gpt-2](https://github.com/ggerganov/ggml/tree/master/examples/gpt-2) ggml-org-ggml-3678254/examples/gpt-j/convert-h5-to-ggml.py000066400000000000000000000126051512524704700231370ustar00rootroot00000000000000# Convert GPT-J-6B h5 transformer model to ggml format # # Load the model using GPTJForCausalLM. # Iterate over all variables and write them to a binary file. # # For each variable, write the following: # - Number of dimensions (int) # - Name length (int) # - Dimensions (int[n_dims]) # - Name (char[name_length]) # - Data (float[n_dims]) # # By default, the bigger matrices are converted to 16-bit floats. # This can be disabled by adding the "use-f32" CLI argument. # # At the start of the ggml file we write the model parameters # and vocabulary. # import sys import struct import json import torch import numpy as np from transformers import GPTJForCausalLM # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) if len(sys.argv) < 3: print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n") print(" ftype == 0 -> float32") print(" ftype == 1 -> float16") sys.exit(1) # output in the same directory as the model dir_model = sys.argv[1] fname_out = sys.argv[1] + "/ggml-model.bin" with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: encoder = json.load(f) with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: encoder_added = json.load(f) with open(dir_model + "/config.json", "r", encoding="utf-8") as f: hparams = json.load(f) # possible data types # ftype == 0 -> float32 # ftype == 1 -> float16 # # map from ftype to string ftype_str = ["f32", "f16"] ftype = 1 if len(sys.argv) > 2: ftype = int(sys.argv[2]) if ftype < 0 or ftype > 1: print("Invalid ftype: " + str(ftype)) sys.exit(1) fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" model = GPTJForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True) #print (model) list_vars = model.state_dict() #print (list_vars) fout = open(fname_out, "wb") fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex fout.write(struct.pack("i", hparams["vocab_size"])) fout.write(struct.pack("i", hparams["n_positions"])) fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) fout.write(struct.pack("i", hparams["rotary_dim"])) fout.write(struct.pack("i", ftype)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} fout.write(struct.pack("i", len(encoder) + len(encoder_added))) for key in encoder: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for key in encoder_added: text = bytearray([byte_decoder[c] for c in key]) fout.write(struct.pack("i", len(text))) fout.write(text) for name in list_vars.keys(): data = list_vars[name].squeeze().numpy() print("Processing variable: " + name + " with shape: ", data.shape) # we don't need these if name.endswith("attn.masked_bias") or name.endswith(".attn.bias"): print(" Skipping variable: " + name) continue n_dims = len(data.shape); # ftype == 0 -> float32, ftype == 1 -> float16 ftype_cur = 0; if ftype != 0: if name[-7:] == ".weight" and n_dims == 2: print(" Converting to float16") data = data.astype(np.float16) ftype_cur = 1 else: print(" Converting to float32") data = data.astype(np.float32) ftype_cur = 0 else: if data.dtype != np.float32: print(" Converting to float32") data = data.astype(np.float32) ftype_cur = 0 # for efficiency - transpose these matrices: # (note - with latest ggml this is no longer more efficient, so disabling it) # "transformer.h.*.mlp.fc_in.weight" # "transformer.h.*.attn.out_proj.weight" # "transformer.h.*.attn.q_proj.weight" # "transformer.h.*.attn.k_proj.weight" # "transformer.h.*.attn.v_proj.weight" #if name.endswith(".mlp.fc_in.weight") or \ # name.endswith(".attn.out_proj.weight") or \ # name.endswith(".attn.q_proj.weight") or \ # name.endswith(".attn.k_proj.weight") or \ # name.endswith(".attn.v_proj.weight"): # print(" Transposing") # data = data.transpose() # header str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) fout.write(str); # data data.tofile(fout) fout.close() print("Done. Output file: " + fname_out) print("") ggml-org-ggml-3678254/examples/gpt-j/download-ggml-model.sh000077500000000000000000000033001512524704700234070ustar00rootroot00000000000000#!/bin/bash # This script downloads GPT-J model files that have already been converted to ggml format. # This way you don't have to convert them yourself. # # If you want to download the original GPT-J model files, use the "download-model.sh" script instead. #src="https://ggml.ggerganov.com" #pfx="ggml-model-gpt-j" src="https://huggingface.co/ggerganov/ggml" pfx="resolve/main/ggml-model-gpt-j" ggml_path=$(dirname $(realpath $0)) # GPT-J models models=( "6B" ) # list available models function list_models { printf "\n" printf " Available models:" for model in "${models[@]}"; do printf " $model" done printf "\n\n" } if [ "$#" -ne 1 ]; then printf "Usage: $0 \n" list_models exit 1 fi model=$1 if [[ ! " ${models[@]} " =~ " ${model} " ]]; then printf "Invalid model: $model\n" list_models exit 1 fi # download ggml model printf "Downloading ggml model $model ...\n" mkdir -p models/gpt-j-$model if [ -x "$(command -v wget)" ]; then wget --quiet --show-progress -O models/gpt-j-$model/ggml-model.bin $src/$pfx-$model.bin elif [ -x "$(command -v curl)" ]; then curl -L --output models/gpt-j-$model/ggml-model.bin $src/$pfx-$model.bin else printf "Either wget or curl is required to download models.\n" exit 1 fi if [ $? -ne 0 ]; then printf "Failed to download ggml model $model \n" printf "Please try again later or download the original GPT-J model files and convert them yourself.\n" exit 1 fi printf "Done! Model '$model' saved in 'models/gpt-j-$model/ggml-model.bin'\n" printf "You can now use it like this:\n\n" printf " $ ./bin/gpt-j -m models/gpt-j-$model/ggml-model.bin -p \"This is an example\"\n" printf "\n" ggml-org-ggml-3678254/examples/gpt-j/download-model.sh000077500000000000000000000010731512524704700224700ustar00rootroot00000000000000#!/bin/bash printf "To obtain the GPT-J 6B model files, please visit: https://huggingface.co/EleutherAI/gpt-j-6B\n\n" printf "The model is very big. For example, the reposirory above is 72GB in size.\n" printf "If you are sure that you want to clone it, simply run the following command:\n\n" printf " $ git clone https://huggingface.co/EleutherAI/gpt-j-6B models/gpt-j-6B\n\n" printf "Alternatively, use the 'download-ggml-model.sh' script to download a 12GB ggml version of the model.\n" printf "This version is enough to run inference using the ggml library.\n\n" ggml-org-ggml-3678254/examples/gpt-j/main.cpp000066400000000000000000000631111512524704700206550ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif // default hparams (GPT-J 6B) struct gptj_hparams { int32_t n_vocab = 50400; int32_t n_ctx = 2048; int32_t n_embd = 4096; int32_t n_head = 16; int32_t n_layer = 28; int32_t n_rot = 64; int32_t ftype = 1; float eps = 1e-5f; }; struct gptj_layer { // normalization struct ggml_tensor * ln_1_g; struct ggml_tensor * ln_1_b; // attention struct ggml_tensor * c_attn_q_proj_w; struct ggml_tensor * c_attn_k_proj_w; struct ggml_tensor * c_attn_v_proj_w; struct ggml_tensor * c_attn_proj_w; // ff struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gptj_model { gptj_hparams hparams; // normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; struct ggml_tensor * wte; // token embedding struct ggml_tensor * lmh_g; // language model head struct ggml_tensor * lmh_b; // language model bias std::vector layers; // key + value memory struct ggml_tensor * memory_k; struct ggml_tensor * memory_v; // struct ggml_context * ctx; std::map tensors; }; // load the model's weights from a file bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & vocab) { printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } // load hparams { auto & hparams = model.hparams; fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: n_rot = %d\n", __func__, hparams.n_rot); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // load vocab { int32_t n_vocab = 0; fin.read((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != model.hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); return false; } std::string word; std::vector buf(128); for (int i = 0; i < n_vocab; i++) { uint32_t len; fin.read((char *) &len, sizeof(len)); buf.resize(len); fin.read((char *) buf.data(), len); word.assign(buf.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx; size_t ctx_size = 0; { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b ctx_size += ggml_row_size(wtype, n_embd*n_vocab); // wte ctx_size += ggml_row_size(wtype, n_embd*n_vocab); // lmh_g ctx_size += ggml_row_size(GGML_TYPE_F32, n_vocab); // lmh_b ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_q_proj_w ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_k_proj_w ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_v_proj_w ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_mlp_proj_b ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F16, n_embd); // memory_k ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F16, n_embd); // memory_v ctx_size += (5 + 10*n_layer)*512; // object overhead printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } // create the ggml context { struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; model.ctx = ggml_init(params); if (!model.ctx) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // prepare memory for the weights { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab); // map by name model.tensors["transformer.wte.weight"] = model.wte; model.tensors["transformer.ln_f.weight"] = model.ln_f_g; model.tensors["transformer.ln_f.bias"] = model.ln_f_b; model.tensors["lm_head.weight"] = model.lmh_g; model.tensors["lm_head.bias"] = model.lmh_b; for (int i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.c_attn_q_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_k_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_v_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // map by name model.tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g; model.tensors["transformer.h." + std::to_string(i) + ".ln_1.bias"] = layer.ln_1_b; model.tensors["transformer.h." + std::to_string(i) + ".attn.q_proj.weight"] = layer.c_attn_q_proj_w; model.tensors["transformer.h." + std::to_string(i) + ".attn.k_proj.weight"] = layer.c_attn_k_proj_w; model.tensors["transformer.h." + std::to_string(i) + ".attn.v_proj.weight"] = layer.c_attn_v_proj_w; model.tensors["transformer.h." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_proj_w; model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.weight"] = layer.c_mlp_fc_w; model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.bias"] = layer.c_mlp_fc_b; model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.weight"] = layer.c_mlp_proj_w; model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.bias"] = layer.c_mlp_proj_b; } } // key + value memory { const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_mem = n_layer*n_ctx; const int n_elements = n_embd*n_mem; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights { int n_tensors = 0; size_t total_size = 0; printf("%s: ", __func__); while (true) { int32_t n_dims; int32_t length; int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str()); return false; } auto tensor = model.tensors[name]; if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.c_str(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); return false; } // for debugging if (0) { printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.c_str(), ggml_nbytes(tensor), nelements*bpe); return false; } fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.c_str(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { printf("."); fflush(stdout); } } printf(" done\n"); printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors); } fin.close(); return true; } // evaluate the transformer // // - model: the model // - n_threads: number of threads to use // - n_past: the context size so far // - embd_inp: the embeddings of the tokens in the context // - embd_w: the predicted logits for the next token // // The GPT-J model requires about 16MB of memory per input token. // bool gptj_eval( const gptj_model & model, const int n_threads, const int n_past, const std::vector & embd_inp, std::vector & embd_w, size_t & mem_per_token) { const int N = embd_inp.size(); const auto & hparams = model.hparams; const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; const int n_vocab = hparams.n_vocab; const int n_rot = hparams.n_rot; static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N > buf_size) { const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate buf_size = buf_size_new; buf = realloc(buf, buf_size); if (buf == nullptr) { fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); return false; } } struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf, /*.no_alloc =*/ false, }; struct ggml_context * ctx0 = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph(ctx0); // KQ_pos - contains the positions struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); int * data = (int *) KQ_pos->data; for (int i = 0; i < N; ++i) { data[i] = n_past + i; } struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); // wte struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; // norm { cur = ggml_norm(ctx0, inpL, hparams.eps); // cur = ln_1_g*cur + ln_1_b cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); } struct ggml_tensor * inpSA = cur; // self-attention { struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0); struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0); // store key and value to memory { struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur)); struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd, ( n_ctx)*ggml_element_size(model.memory_v), (il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); } // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) struct ggml_tensor * K = ggml_permute(ctx0, ggml_reshape_3d(ctx0, ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); // K * Q struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, 1.0f/sqrt(float(n_embd)/n_head)); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() struct ggml_tensor * V = ggml_view_3d(ctx0, model.memory_v, n_past + N, n_embd/n_head, n_head, n_ctx*ggml_element_size(model.memory_v), n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head, il*n_ctx*ggml_element_size(model.memory_v)*n_embd); // KQV = transpose(V) * KQ_soft_max struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); // projection (no bias) cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_proj_w, cur); } struct ggml_tensor * inpFF = cur; // feed-forward network // this is independent of the self-attention result, so it could be done in parallel to the self-attention { // note here we pass inpSA instead of cur cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, inpSA); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), cur); // GELU activation cur = ggml_gelu(ctx0, cur); // projection // cur = proj_w*cur + proj_b cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), cur); } // self-attention + FF cur = ggml_add(ctx0, cur, inpFF); // input for next layer inpL = ggml_add(ctx0, cur, inpL); } // norm { inpL = ggml_norm(ctx0, inpL, hparams.eps); // inpL = ln_f_g*inpL + ln_f_b inpL = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.ln_f_g, inpL), inpL), ggml_repeat(ctx0, model.ln_f_b, inpL)); } // lm_head { inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL); inpL = ggml_add(ctx0, ggml_repeat(ctx0, model.lmh_b, inpL), inpL); } // logits -> probs //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(gf, inpL); ggml_graph_compute_with_ctx(ctx0, gf, n_threads); //if (n_past%100 == 0) { // ggml_graph_print (&gf); // ggml_graph_dump_dot(&gf, NULL, "gpt-j.dot"); //} //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); // return result for just the last token embd_w.resize(n_vocab); memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); if (mem_per_token == 0) { mem_per_token = ggml_used_mem(ctx0)/N; } //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); ggml_free(ctx0); return true; } int main(int argc, char ** argv) { ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); gpt_params params; params.model = "models/gpt-j-6B/ggml-model.bin"; if (gpt_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } printf("%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.prompt.empty()) { params.prompt = gpt_random_prompt(rng); } int64_t t_load_us = 0; gpt_vocab vocab; gptj_model model; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gptj_model_load(params.model, model, vocab)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; test_gpt_tokenizer(vocab, params.token_test); } int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; std::vector logits; // tokenize the prompt std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); printf("\n"); std::vector embd; // determine the required inference memory per token: size_t mem_per_token = 0; gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { // predict if (embd.size() > 0) { const int64_t t_start_us = ggml_time_us(); if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { printf("Failed to predict\n"); return 1; } t_predict_us += ggml_time_us() - t_start_us; } n_past += embd.size(); embd.clear(); if (i >= embd_inp.size()) { // sample next token const int top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const int n_vocab = model.hparams.n_vocab; gpt_vocab::id id = 0; { const int64_t t_start_sample_us = ggml_time_us(); id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); t_sample_us += ggml_time_us() - t_start_sample_us; } // add it to the context embd.push_back(id); } else { // if here, it means we are still processing the input prompt for (size_t k = i; k < embd_inp.size(); k++) { embd.push_back(embd_inp[k]); if (int32_t(embd.size()) > params.n_batch) { break; } } i += embd.size() - 1; } // display text for (auto id : embd) { printf("%s", vocab.id_to_token[id].c_str()); } fflush(stdout); // end of text token if (embd.back() == 50256) { break; } } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n\n"); printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx); return 0; } ggml-org-ggml-3678254/examples/gpt-j/quantize.cpp000066400000000000000000000133401512524704700215700ustar00rootroot00000000000000#include "ggml.h" #include "common.h" #include "common-ggml.h" #include #include #include #include #include #include #include #include #include // default hparams (GPT-J 6B) struct gptj_hparams { int32_t n_vocab = 50400; int32_t n_ctx = 2048; int32_t n_embd = 4096; int32_t n_head = 16; int32_t n_layer = 28; int32_t n_rot = 64; int32_t ftype = 1; }; // quantize a model bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); auto finp = std::ifstream(fname_inp, std::ios::binary); if (!finp) { fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); return false; } auto fout = std::ofstream(fname_out, std::ios::binary); if (!fout) { fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); return false; } // verify magic { uint32_t magic; finp.read((char *) &magic, sizeof(magic)); if (magic != GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); return false; } fout.write((char *) &magic, sizeof(magic)); } gptj_hparams hparams; // load hparams { finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); finp.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR; const int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype; printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); printf("%s: n_embd = %d\n", __func__, hparams.n_embd); printf("%s: n_head = %d\n", __func__, hparams.n_head); printf("%s: n_layer = %d\n", __func__, hparams.n_layer); printf("%s: ftype (src) = %d\n", __func__, hparams.ftype); printf("%s: qntvr (src) = %d\n", __func__, qntvr_src); printf("%s: ftype (dst) = %d\n", __func__, ftype_dst); printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION); fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); fout.write((char *) &ftype_dst, sizeof(ftype_dst)); } // load vocab { int32_t n_vocab = 0; finp.read ((char *) &n_vocab, sizeof(n_vocab)); fout.write((char *) &n_vocab, sizeof(n_vocab)); if (n_vocab != hparams.n_vocab) { fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); return false; } std::string word; for (int i = 0; i < n_vocab; i++) { uint32_t len; finp.read ((char *) &len, sizeof(len)); fout.write((char *) &len, sizeof(len)); word.resize(len); finp.read ((char *) word.data(), len); fout.write((char *) word.data(), len); vocab.token_to_id[word] = i; vocab.id_to_token[i] = word; } } // regexes of tensor names to be quantized const std::vector to_quant = { ".*weight", }; if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) { fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); return false; } finp.close(); fout.close(); return true; } // usage: // ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type // int main(int argc, char ** argv) { if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); ggml_print_ftypes(stderr); return 1; } // needed to initialize f16 tables { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; const ggml_ftype ftype = ggml_parse_ftype(argv[3]); const int64_t t_main_start_us = ggml_time_us(); int64_t t_quantize_us = 0; // load the model { const int64_t t_start_us = ggml_time_us(); if (!gptj_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } t_quantize_us = ggml_time_us() - t_start_us; } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n"); printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } return 0; } ggml-org-ggml-3678254/examples/magika/000077500000000000000000000000001512524704700174335ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/magika/CMakeLists.txt000066400000000000000000000004621512524704700221750ustar00rootroot00000000000000# # magika set(TEST_TARGET magika) add_executable(${TEST_TARGET} main.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml) # # For GPU offloading if (GGML_CUDA) add_compile_definitions(GGML_USE_CUDA) endif() if (GGML_METAL) add_compile_definitions(GGML_USE_METAL) endif() ggml-org-ggml-3678254/examples/magika/README.md000066400000000000000000000026341512524704700207170ustar00rootroot00000000000000# Google Magika inference Simple example that shows how to use GGML for inference with the [Google Magika](https://github.com/google/magika) file type detection model. ### Usage - Obtain the Magika model in H5 format - Pinned version: https://github.com/google/magika/blob/4460acb5d3f86807c3b53223229dee2afa50c025/assets_generation/models/standard_v1/model.h5 - Use `convert.py` to convert the model to gguf format: ```bash $ python examples/magika/convert.py /path/to/model.h5 ``` - Invoke the program with the model file and a list of files to identify: ```bash $ build/bin/magika model.h5.gguf examples/sam/example.jpg examples/magika/convert.py README.md src/ggml.c /bin/gcc write.exe jfk.wav examples/sam/example.jpg : jpeg (100.00%) pptx (0.00%) smali (0.00%) shell (0.00%) sevenzip (0.00%) examples/magika/convert.py : python (99.99%) javascript (0.00%) txt (0.00%) asm (0.00%) scala (0.00%) README.md : markdown (100.00%) txt (0.00%) yaml (0.00%) ppt (0.00%) shell (0.00%) src/ggml.c : c (99.95%) txt (0.04%) asm (0.01%) yaml (0.00%) html (0.00%) /bin/gcc : elf (99.98%) odex (0.02%) pptx (0.00%) smali (0.00%) shell (0.00%) write.exe : pebin (100.00%) ppt (0.00%) smali (0.00%) shell (0.00%) sevenzip (0.00%) jfk.wav : wav (100.00%) ppt (0.00%) shell (0.00%) sevenzip (0.00%) scala (0.00%) ``` ggml-org-ggml-3678254/examples/magika/convert.py000066400000000000000000000016521512524704700214710ustar00rootroot00000000000000import sys from tensorflow import keras import gguf def convert(model_name): model = keras.models.load_model(model_name, compile=False) gguf_model_name = model_name + ".gguf" gguf_writer = gguf.GGUFWriter(gguf_model_name, "magika") for layer in model.layers: # export layers with weights if layer.weights: for weight in layer.weights: print(f" [{weight.name}] {weight.shape} {weight.dtype}") weight_data = weight.numpy() gguf_writer.add_tensor(weight.name, weight_data.T) gguf_writer.write_header_to_file() gguf_writer.write_kv_data_to_file() gguf_writer.write_tensors_to_file() gguf_writer.close() print("Model converted and saved to '{}'".format(gguf_model_name)) if __name__ == '__main__': if len(sys.argv) > 1: model_file = sys.argv[1] else: model_file = "model.h5" convert(model_file) ggml-org-ggml-3678254/examples/magika/main.cpp000066400000000000000000000326311512524704700210700ustar00rootroot00000000000000#include "ggml.h" #include "gguf.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include #include #include #include #include #include static const char * magika_labels[] = { "ai", "apk", "appleplist", "asm", "asp", "batch", "bmp", "bzip", "c", "cab", "cat", "chm", "coff", "crx", "cs", "css", "csv", "deb", "dex", "dmg", "doc", "docx", "elf", "emf", "eml", "epub", "flac", "gif", "go", "gzip", "hlp", "html", "ico", "ini", "internetshortcut", "iso", "jar", "java", "javabytecode", "javascript", "jpeg", "json", "latex", "lisp", "lnk", "m3u", "macho", "makefile", "markdown", "mht", "mp3", "mp4", "mscompress", "msi", "mum", "odex", "odp", "ods", "odt", "ogg", "outlook", "pcap", "pdf", "pebin", "pem", "perl", "php", "png", "postscript", "powershell", "ppt", "pptx", "python", "pythonbytecode", "rar", "rdf", "rpm", "rst", "rtf", "ruby", "rust", "scala", "sevenzip", "shell", "smali", "sql", "squashfs", "svg", "swf", "symlinktext", "tar", "tga", "tiff", "torrent", "ttf", "txt", "unknown", "vba", "wav", "webm", "webp", "winregistry", "wmf", "xar", "xls", "xlsb", "xlsx", "xml", "xpi", "xz", "yaml", "zip", "zlibstream" }; struct magika_hparams { const int block_size = 4096; const int beg_size = 512; const int mid_size = 512; const int end_size = 512; const int min_file_size_for_dl = 16; const int n_label = 113; const float f_norm_eps = 0.001f; const int padding_token = 256; }; struct magika_model { ~magika_model() { ggml_backend_buffer_free(buf_w); ggml_backend_free(backend); ggml_free(ctx_w); } magika_hparams hparams; struct ggml_tensor * dense_w; struct ggml_tensor * dense_b; struct ggml_tensor * layer_norm_gamma; struct ggml_tensor * layer_norm_beta; struct ggml_tensor * dense_1_w; struct ggml_tensor * dense_1_b; struct ggml_tensor * dense_2_w; struct ggml_tensor * dense_2_b; struct ggml_tensor * layer_norm_1_gamma; struct ggml_tensor * layer_norm_1_beta; struct ggml_tensor * target_label_w; struct ggml_tensor * target_label_b; ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_buffer_t buf_w = nullptr; struct ggml_context * ctx_w = nullptr; }; struct ggml_tensor * checked_get_tensor(struct ggml_context * ctx, const char * name) { struct ggml_tensor * tensor = ggml_get_tensor(ctx, name); if (!tensor) { fprintf(stderr, "%s: tensor '%s' not found\n", __func__, name); throw std::runtime_error("ggml_get_tensor() failed"); } return tensor; } bool magika_model_load(const std::string & fname, magika_model & model) { auto & ctx = model.ctx_w; struct gguf_init_params params = { /*.no_alloc =*/ true, /*.ctx =*/ &ctx, }; struct gguf_context * ctx_gguf = gguf_init_from_file(fname.c_str(), params); if (!ctx_gguf) { fprintf(stderr, "%s: gguf_init_from_file() failed\n", __func__); return false; } model.buf_w = ggml_backend_alloc_ctx_tensors(ctx, model.backend); if (!model.buf_w) { fprintf(stderr, "%s: ggml_backend_alloc_ctx_tensors() failed\n", __func__); gguf_free(ctx_gguf); return false; } try { model.dense_w = checked_get_tensor(ctx, "dense/kernel:0"); model.dense_b = checked_get_tensor(ctx, "dense/bias:0"); model.layer_norm_gamma = checked_get_tensor(ctx, "layer_normalization/gamma:0"); model.layer_norm_beta = checked_get_tensor(ctx, "layer_normalization/beta:0"); model.dense_1_w = checked_get_tensor(ctx, "dense_1/kernel:0"); model.dense_1_b = checked_get_tensor(ctx, "dense_1/bias:0"); model.dense_2_w = checked_get_tensor(ctx, "dense_2/kernel:0"); model.dense_2_b = checked_get_tensor(ctx, "dense_2/bias:0"); model.layer_norm_1_gamma = checked_get_tensor(ctx, "layer_normalization_1/gamma:0"); model.layer_norm_1_beta = checked_get_tensor(ctx, "layer_normalization_1/beta:0"); model.target_label_w = checked_get_tensor(ctx, "target_label/kernel:0"); model.target_label_b = checked_get_tensor(ctx, "target_label/bias:0"); } catch (const std::exception & e) { fprintf(stderr, "%s: %s\n", __func__, e.what()); gguf_free(ctx_gguf); return false; } FILE * f = fopen(fname.c_str(), "rb"); if (!f) { fprintf(stderr, "%s: fopen() failed\n", __func__); gguf_free(ctx_gguf); return false; } const int n_tensors = gguf_get_n_tensors(ctx_gguf); for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * tensor = ggml_get_tensor(ctx, name); size_t offs = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i); //printf("%-30s: [%3ld, %3ld, %3ld, %3ld] %s\n", // name, // tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], // ggml_type_name(tensor->type)); std::vector buf(ggml_nbytes(tensor)); if (fseek(f, offs, SEEK_SET) != 0) { fprintf(stderr, "%s: fseek() failed\n", __func__); gguf_free(ctx_gguf); fclose(f); return false; } if (fread(buf.data(), 1, buf.size(), f) != buf.size()) { fprintf(stderr, "%s: fread() failed\n", __func__); gguf_free(ctx_gguf); fclose(f); return false; } ggml_backend_tensor_set(tensor, buf.data(), 0, buf.size()); } fclose(f); gguf_free(ctx_gguf); return true; } struct ggml_cgraph * magika_graph( const magika_model & model, const int n_files) { const auto & hparams = model.hparams; static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(); static std::vector buf(buf_size); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ buf.data(), /*.no_alloc =*/ true, }; struct ggml_context * ctx = ggml_init(params); struct ggml_cgraph * gf = ggml_new_graph(ctx); struct ggml_tensor * input = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 257, 1536, n_files); // one-hot ggml_set_name(input, "input"); ggml_set_input(input); struct ggml_tensor * cur; // dense cur = ggml_mul_mat(ctx, model.dense_w, input); cur = ggml_add(ctx, cur, model.dense_b); // [128, 1536, n_files] cur = ggml_gelu(ctx, cur); // reshape cur = ggml_reshape_3d(ctx, cur, 512, 384, n_files); // [384, 512, n_files] cur = ggml_cont(ctx, ggml_transpose(ctx, cur)); // layer normalization cur = ggml_norm(ctx, cur, hparams.f_norm_eps); cur = ggml_mul(ctx, cur, model.layer_norm_gamma); // [384, 512, n_files] cur = ggml_add(ctx, cur, model.layer_norm_beta); // [384, 512, n_files] // dense_1 cur = ggml_cont(ctx, ggml_transpose(ctx, cur)); cur = ggml_mul_mat(ctx, model.dense_1_w, cur); cur = ggml_add(ctx, cur, model.dense_1_b); // [256, 384, n_files] cur = ggml_gelu(ctx, cur); // dense_2 cur = ggml_mul_mat(ctx, model.dense_2_w, cur); cur = ggml_add(ctx, cur, model.dense_2_b); // [256, 384, n_files] cur = ggml_gelu(ctx, cur); // global_max_pooling1d cur = ggml_cont(ctx, ggml_transpose(ctx, cur)); // [384, 256, n_files] cur = ggml_pool_1d(ctx, cur, GGML_OP_POOL_MAX, 384, 384, 0); // [1, 256, n_files] cur = ggml_reshape_2d(ctx, cur, 256, n_files); // [256, n_files] // layer normalization 1 cur = ggml_norm(ctx, cur, hparams.f_norm_eps); cur = ggml_mul(ctx, cur, model.layer_norm_1_gamma); // [256, n_files] cur = ggml_add(ctx, cur, model.layer_norm_1_beta); // [256, n_files] // target_label cur = ggml_mul_mat(ctx, model.target_label_w, cur); cur = ggml_add(ctx, cur, model.target_label_b); // [n_label, n_files] cur = ggml_soft_max(ctx, cur); // [n_label, n_files] ggml_set_name(cur, "target_label_probs"); ggml_set_output(cur); ggml_build_forward_expand(gf, cur); return gf; } bool magika_eval( struct magika_model & model, const std::vector & fnames) { const auto & hparams = model.hparams; static ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend)); struct ggml_cgraph * gf = magika_graph(model, fnames.size()); if (!ggml_gallocr_alloc_graph(alloc, gf)) { fprintf(stderr, "%s: ggml_gallocr_alloc_graph() failed\n", __func__); return false; } struct ggml_tensor * input = ggml_graph_get_tensor(gf, "input"); for (size_t i = 0; i < fnames.size(); i++) { FILE * f = fopen(fnames[i].c_str(), "rb"); if (!f) { fprintf(stderr, "%s: fopen() failed\n", __func__); return false; } fseek(f, 0, SEEK_END); long fsize = ftell(f); // the buffer is padded with the padding_token if the file is smaller than the block size std::vector buf(1536, hparams.padding_token); std::vector read_buf(std::max(hparams.beg_size, std::max(hparams.mid_size, hparams.end_size))); // read beg fseek(f, 0, SEEK_SET); int n_read = fread(read_buf.data(), 1, hparams.beg_size, f); for (int j = 0; j < n_read; j++) { // pad at the end buf[j] = read_buf[j]; } // read mid long mid_offs = std::max(0L, (fsize - hparams.mid_size) / 2); fseek(f, mid_offs, SEEK_SET); n_read = fread(read_buf.data(), 1, hparams.mid_size, f); for (int j = 0; j < n_read; j++) { // pad at both ends long mid_idx = hparams.beg_size + (hparams.mid_size / 2) - n_read / 2 + j; buf[mid_idx] = read_buf[j]; } // read end long end_offs = std::max(0L, fsize - hparams.end_size); fseek(f, end_offs, SEEK_SET); n_read = fread(read_buf.data(), 1, hparams.end_size, f); for (int j = 0; j < n_read; j++) { // pad at the beginning int end_idx = hparams.beg_size + hparams.mid_size + hparams.end_size - n_read + j; buf[end_idx] = read_buf[j]; } fclose(f); const size_t inp_bytes = hparams.beg_size + hparams.mid_size + hparams.end_size; // convert to one-hot std::vector one_hot(257*inp_bytes); for (size_t j = 0; j < inp_bytes; j++) { one_hot[257*j + buf[j]] = 1.0f; } ggml_backend_tensor_set(input, one_hot.data(), 257*inp_bytes*i*sizeof(float), 257*inp_bytes*sizeof(float)); } if (ggml_backend_graph_compute(model.backend, gf) != GGML_STATUS_SUCCESS) { fprintf(stderr, "%s: ggml_backend_graph_compute() failed\n", __func__); return false; } struct ggml_tensor * target_label_probs = ggml_graph_get_tensor(gf, "target_label_probs"); // print probabilities for the top labels of each file for (size_t i = 0; i < fnames.size(); i++) { std::vector probs(hparams.n_label); ggml_backend_tensor_get(target_label_probs, probs.data(), hparams.n_label*i*sizeof(float), hparams.n_label*sizeof(float)); // sort the probabilities std::vector idx(hparams.n_label); std::iota(idx.begin(), idx.end(), 0); std::sort(idx.begin(), idx.end(), [&probs](int i1, int i2) { return probs[i1] > probs[i2]; }); // print the top labels const int top_n = 5; printf("%-30s: ", fnames[i].c_str()); for (int j = 0; j < top_n; j++) { printf("%s (%.2f%%) ", magika_labels[idx[j]], probs[idx[j]]*100); } printf("\n"); } return true; } int main(int argc, const char ** argv) { if (argc < 3) { fprintf(stderr, "usage: %s [ ...]\n", argv[0]); return 1; } const char * model_fname = argv[1]; std::vector fnames; for (int i = 2; i < argc; i++) { fnames.push_back(argv[i]); } magika_model model; if (!magika_model_load(model_fname, model)) { fprintf(stderr, "magika_model_load() failed\n"); return 1; } magika_eval(model, fnames); return 0; } ggml-org-ggml-3678254/examples/mnist/000077500000000000000000000000001512524704700173345ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/mnist/.gitignore000066400000000000000000000000241512524704700213200ustar00rootroot00000000000000data/ *.gguf *.ggml ggml-org-ggml-3678254/examples/mnist/CMakeLists.txt000066400000000000000000000033711512524704700221000ustar00rootroot00000000000000# # mnist-common set(TEST_TARGET mnist-common) add_library(${TEST_TARGET} STATIC mnist-common.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common) # # mnist-eval set(TEST_TARGET mnist-eval) add_executable(${TEST_TARGET} mnist-eval.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common mnist-common) # # mnist-train set(TEST_TARGET mnist-train) add_executable(${TEST_TARGET} mnist-train.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common mnist-common) # # mnist-wasm if (EMSCRIPTEN) set(TARGET mnist) add_executable(${TARGET} mnist-common.cpp) target_link_libraries(${TARGET} PRIVATE ggml ggml-cpu) set_target_properties(${TARGET} PROPERTIES LINK_FLAGS " \ --bind \ -s FORCE_FILESYSTEM=1 \ -s USE_PTHREADS=1 \ -s PTHREAD_POOL_SIZE=10 \ -s ASSERTIONS=1 \ -s WASM=1 \ -s EXPORTED_RUNTIME_METHODS=\"['ccall', 'cwrap', 'setValue', 'getValue']\" \ -s EXPORTED_FUNCTIONS=\"['_wasm_eval','_wasm_random_digit','_malloc','_free']\" \ -s ALLOW_MEMORY_GROWTH=1 \ --preload-file ${CMAKE_CURRENT_SOURCE_DIR}/mnist-f32.gguf@/ \ --preload-file ${CMAKE_CURRENT_SOURCE_DIR}/t10k-images-idx3-ubyte@/ \ ") # Copy output to web directory add_custom_command( TARGET ${TARGET} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_BINARY_DIR}/bin/mnist.js ${CMAKE_CURRENT_SOURCE_DIR}/web/mnist.js COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_BINARY_DIR}/bin/mnist.wasm ${CMAKE_CURRENT_SOURCE_DIR}/web/mnist.wasm COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_BINARY_DIR}/bin/mnist.worker.js ${CMAKE_CURRENT_SOURCE_DIR}/web/mnist.worker.js ) endif() ggml-org-ggml-3678254/examples/mnist/README.md000066400000000000000000000226411512524704700206200ustar00rootroot00000000000000# MNIST Examples for GGML This directory contains simple examples of how to use GGML for training and inference using the [MNIST dataset](https://yann.lecun.com/exdb/mnist/). All commands listed in this README assume the working directory to be `examples/mnist`. Please note that training in GGML is a work-in-progress and not production ready. ## Obtaining the data A description of the dataset can be found on [Yann LeCun's website](https://yann.lecun.com/exdb/mnist/). While it is also in principle possible to download the dataset from this website these downloads are frequently throttled and it is recommended to use [HuggingFace](https://huggingface.co/datasets/ylecun/mnist) instead. The dataset will be downloaded automatically when running `mnist-train-fc.py`. ## Fully connected network For our first example we will train a fully connected network. To train a fully connected model in PyTorch and save it as a GGUF file, run: ```bash $ python3 mnist-train-fc.py mnist-fc-f32.gguf ... Test loss: 0.066377+-0.010468, Test accuracy: 97.94+-0.14% Model tensors saved to mnist-fc-f32.gguf: fc1.weight (500, 784) fc1.bias (500,) fc2.weight (10, 500) fc2.bias (10,) ``` The training script includes an evaluation of the model on the test set. To evaluate the model on the CPU using GGML, run: ```bash $ ../../build/bin/mnist-eval mnist-fc-f32.gguf data/MNIST/raw/t10k-images-idx3-ubyte data/MNIST/raw/t10k-labels-idx1-ubyte ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ __________________________________####__________________ ______________________________########__________________ __________________________##########____________________ ______________________##############____________________ ____________________######________####__________________ __________________________________####__________________ __________________________________####__________________ ________________________________####____________________ ______________________________####______________________ ________________________##########______________________ ______________________########__####____________________ ________________________##__________##__________________ ____________________________________##__________________ __________________________________##____________________ __________________________________##____________________ ________________________________##______________________ ____________________________####________________________ __________##____________######__________________________ __________##############________________________________ ________________####____________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 1 CUDA devices: Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes mnist_model: using CUDA0 (NVIDIA GeForce RTX 3090) as primary backend mnist_model: unsupported operations will be executed on the following fallback backends (in order of priority): mnist_model: - CPU (AMD Ryzen 9 5950X 16-Core Processor) mnist_model_init_from_file: loading model weights from 'mnist-fc-f32.gguf' mnist_model_init_from_file: model arch is mnist-fc mnist_model_init_from_file: successfully loaded weights from mnist-fc-f32.gguf main: loaded model in 109.44 ms mnist_model_eval: model evaluation on 10000 images took 76.92 ms, 7.69 us/image main: predicted digit is 3 main: test_loss=0.066379+-0.009101 main: test_acc=97.94+-0.14% ``` In addition to the evaluation on the test set the GGML evaluation also prints a random image from the test set as well as the model prediction for said image. To train a fully connected model on the CPU using GGML run: ``` bash $ ../../build/bin/mnist-train mnist-fc mnist-fc-f32.gguf data/MNIST/raw/train-images-idx3-ubyte data/MNIST/raw/train-labels-idx1-ubyte ``` It can then be evaluated with the same binary as above. ## Convolutional network To train a convolutional network using TensorFlow run: ```bash $ python3 mnist-train-cnn.py mnist-cnn-f32.gguf ... Test loss: 0.047947 Test accuracy: 98.46% GGUF model saved to 'mnist-cnn-f32.gguf' ``` The saved model can be evaluated on the CPU using the `mnist-eval` binary: ```bash $ ../../build/bin/mnist-eval mnist-fc-f32.gguf data/MNIST/raw/t10k-images-idx3-ubyte data/MNIST/raw/t10k-labels-idx1-ubyte ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ______________________________________##________________ ______________________________________##________________ ______________________________________##________________ ____________________________________##__________________ __________________________________####__________________ __________________________________##____________________ ________________________________##______________________ ______________________________##________________________ ____________________________####________________________ ____________________________##__________________________ __________________________##____________________________ ________________________##______________________________ ______________________##________________________________ ____________________####________________________________ ____________________##__________________________________ __________________##____________________________________ ________________##______________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ________________________________________________________ ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 1 CUDA devices: Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes mnist_model: using CUDA0 (NVIDIA GeForce RTX 3090) as primary backend mnist_model: unsupported operations will be executed on the following fallback backends (in order of priority): mnist_model: - CPU (AMD Ryzen 9 5950X 16-Core Processor) mnist_model_init_from_file: loading model weights from 'mnist-cnn-f32.gguf' mnist_model_init_from_file: model arch is mnist-cnn mnist_model_init_from_file: successfully loaded weights from mnist-cnn-f32.gguf main: loaded model in 91.99 ms mnist_model_eval: model evaluation on 10000 images took 267.61 ms, 26.76 us/image main: predicted digit is 1 main: test_loss=0.047955+-0.007029 main: test_acc=98.46+-0.12% ``` Like with the fully connected network the convolutional network can also be trained using GGML: ``` bash $ ../../build/bin/mnist-train mnist-cnn mnist-cnn-f32.gguf data/MNIST/raw/train-images-idx3-ubyte data/MNIST/raw/train-labels-idx1-ubyte ``` As always, the evaluation is done using `mnist-eval` and like with the fully connected network the GGML graph is exported to `mnist-cnn-f32.ggml`. ## Hardware Acceleration Both the training and evaluation code is agnostic in terms of hardware as long as the corresponding GGML backend has implemented the necessary operations. A specific backend can be selected by appending the above commands with a backend name. The compute graphs then schedule the operations to preferentially use the specified backend. Note that if a backend does not implement some of the necessary operations a CPU fallback is used instead which may result in bad performance. ## Web demo The evaluation code can be compiled to WebAssembly using [Emscripten](https://emscripten.org/) (may need to re-login to update `$PATH` after installation). First, copy the GGUF file of either of the trained models to `examples/mnist` and name it `mnist-f32.gguf`. Copy the test set to `examples/mnist` and name it `t10k-images-idx3-ubyte`. Symlinking these files will *not* work! Compile the code like so: ```bash $ cd ../../ $ mkdir -p build-em $ emcmake cmake .. -DGGML_BUILD_EXAMPLES=ON \ -DCMAKE_C_FLAGS="-pthread -matomics -mbulk-memory" \ -DCMAKE_CXX_FLAGS="-pthread -matomics -mbulk-memory" $ make mnist ``` The compilation output is copied into `examples/mnist/web`. To run it, you need an HTTP server. For example: ``` bash $ python3 examples/mnist/server.py Serving directory '/home/danbev/work/ai/ggml/examples/mnist/web' at http://localhost:8000 Application context root: http://localhost:8000/ ``` The web demo can then be accessed via the link printed on the console. Simply draw a digit on the canvas and the model will try to predict what it's supposed to be. Alternatively, click the "Random" button to retrieve a random digit from the test set. Be aware that like all neural networks the one we trained is susceptible to distributional shift: if the numbers you draw look different than the ones in the training set (e.g. because they're not centered) the model will perform comparatively worse. An online demo can be accessed [here](https://mnist.ggerganov.com). ggml-org-ggml-3678254/examples/mnist/mnist-common.cpp000066400000000000000000000474661512524704700225010ustar00rootroot00000000000000#include "ggml.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "ggml-opt.h" #include "mnist-common.h" #include #include #include #include #include #include #include #include #include bool mnist_image_load(const std::string & fname, ggml_opt_dataset_t dataset) { auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "failed to open images file %s\n", fname.c_str()); return false; } fin.seekg(16); uint8_t image[MNIST_NINPUT]; struct ggml_tensor * images = ggml_opt_dataset_data(dataset); float * buf = ggml_get_data_f32(images); GGML_ASSERT(images->ne[0] == MNIST_NINPUT); for (int64_t iex = 0; iex < images->ne[1]; ++iex) { fin.read((char *) image, sizeof(image)); for (int64_t i = 0; i < MNIST_NINPUT; ++i) { buf[iex*MNIST_NINPUT + i] = image[i] / 255.0f; // Normalize to [0, 1] } } return true; } void mnist_image_print(FILE * stream, ggml_opt_dataset_t dataset, const int iex) { struct ggml_tensor * images = ggml_opt_dataset_data(dataset); GGML_ASSERT(images->ne[0] == MNIST_NINPUT); GGML_ASSERT(iex < images->ne[1]); const float * image = ggml_get_data_f32(images) + iex*MNIST_NINPUT; for (int64_t row = 0; row < MNIST_HW; row++) { for (int64_t col = 0; col < MNIST_HW; col++) { const int rgb = roundf(255.0f * image[row*MNIST_HW + col]); #ifdef _WIN32 fprintf(stream, "%s", rgb >= 220 ? "##" : "__"); // Represented via text. #else fprintf(stream, "\033[48;2;%d;%d;%dm \033[0m", rgb, rgb, rgb); // Represented via colored blocks. #endif // _WIN32 } fprintf(stream, "\n"); } } bool mnist_label_load(const std::string & fname, ggml_opt_dataset_t dataset) { auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "failed to open labels file %s\n", fname.c_str()); return 0; } fin.seekg(8); uint8_t label; struct ggml_tensor * labels = ggml_opt_dataset_labels(dataset); float * buf = ggml_get_data_f32(labels); GGML_ASSERT(labels->ne[0] == MNIST_NCLASSES); for (int64_t iex = 0; iex < labels->ne[1]; ++iex) { fin.read((char *) &label, sizeof(label)); for (int64_t i = 0; i < MNIST_NCLASSES; ++i) { buf[iex*MNIST_NCLASSES + i] = i == label ? 1.0f : 0.0f; } } return true; } // Temporary util function for loading data from GGUF to a backend != CPU until GGML itself provides this functionality: bool load_from_gguf(const char * fname, struct ggml_context * ctx_ggml, struct gguf_context * ctx_gguf) { FILE * f = ggml_fopen(fname, "rb"); if (!f) { return false; } const size_t buf_size = 4*1024*1024; void * buf = malloc(buf_size); const int n_tensors = gguf_get_n_tensors(ctx_gguf); for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * tensor = ggml_get_tensor(ctx_ggml, name); if (!tensor) { continue; } const size_t offs = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i); if (fseek(f, offs, SEEK_SET) != 0) { fclose(f); free(buf); return false; } const size_t nbytes = ggml_nbytes(tensor); for (size_t pos = 0; pos < nbytes; pos += buf_size) { const size_t nbytes_cpy = buf_size < nbytes - pos ? buf_size : nbytes - pos; if (fread(buf, 1, nbytes_cpy, f) != nbytes_cpy) { fclose(f); free(buf); return false; } ggml_backend_tensor_set(tensor, buf, pos, nbytes_cpy); } } fclose(f); free(buf); return true; } mnist_model mnist_model_init_from_file(const std::string & fname, const std::string & backend, const int nbatch_logical, const int nbatch_physical) { mnist_model model(backend, nbatch_logical, nbatch_physical); fprintf(stderr, "%s: loading model weights from '%s'\n", __func__, fname.c_str()); struct gguf_context * ctx; { struct gguf_init_params params = { /*.no_alloc =*/ true, /*.ctx =*/ &model.ctx_gguf, }; ctx = gguf_init_from_file(fname.c_str(), params); if (!ctx) { fprintf(stderr, "%s: gguf_init_from_file() failed\n", __func__); exit(1); } } model.arch = gguf_get_val_str(ctx, gguf_find_key(ctx, "general.architecture")); fprintf(stderr, "%s: model arch is %s\n", __func__, model.arch.c_str()); if (model.arch == "mnist-fc") { model.fc1_weight = ggml_get_tensor(model.ctx_gguf, "fc1.weight"); GGML_ASSERT(model.fc1_weight->ne[0] == MNIST_NINPUT); GGML_ASSERT(model.fc1_weight->ne[1] == MNIST_NHIDDEN); GGML_ASSERT(model.fc1_weight->ne[2] == 1); GGML_ASSERT(model.fc1_weight->ne[3] == 1); model.fc1_bias = ggml_get_tensor(model.ctx_gguf, "fc1.bias"); GGML_ASSERT(model.fc1_bias->ne[0] == MNIST_NHIDDEN); GGML_ASSERT(model.fc1_bias->ne[1] == 1); GGML_ASSERT(model.fc1_bias->ne[2] == 1); GGML_ASSERT(model.fc1_bias->ne[3] == 1); model.fc2_weight = ggml_get_tensor(model.ctx_gguf, "fc2.weight"); GGML_ASSERT(model.fc2_weight->ne[0] == MNIST_NHIDDEN); GGML_ASSERT(model.fc2_weight->ne[1] == MNIST_NCLASSES); GGML_ASSERT(model.fc2_weight->ne[2] == 1); GGML_ASSERT(model.fc2_weight->ne[3] == 1); model.fc2_bias = ggml_get_tensor(model.ctx_gguf, "fc2.bias"); GGML_ASSERT(model.fc2_bias->ne[0] == MNIST_NCLASSES); GGML_ASSERT(model.fc2_bias->ne[1] == 1); GGML_ASSERT(model.fc2_bias->ne[2] == 1); GGML_ASSERT(model.fc2_bias->ne[3] == 1); } else if (model.arch == "mnist-cnn") { model.conv1_kernel = ggml_get_tensor(model.ctx_gguf, "conv1.kernel"); GGML_ASSERT(model.conv1_kernel->type == GGML_TYPE_F32); GGML_ASSERT(model.conv1_kernel->ne[0] == 3); GGML_ASSERT(model.conv1_kernel->ne[1] == 3); GGML_ASSERT(model.conv1_kernel->ne[2] == 1); GGML_ASSERT(model.conv1_kernel->ne[3] == MNIST_CNN_NCB); model.conv1_bias = ggml_get_tensor(model.ctx_gguf, "conv1.bias"); GGML_ASSERT(model.conv1_bias->type == GGML_TYPE_F32); GGML_ASSERT(model.conv1_bias->ne[0] == 1); GGML_ASSERT(model.conv1_bias->ne[1] == 1); GGML_ASSERT(model.conv1_bias->ne[2] == MNIST_CNN_NCB); GGML_ASSERT(model.conv1_bias->ne[3] == 1); model.conv2_kernel = ggml_get_tensor(model.ctx_gguf, "conv2.kernel"); GGML_ASSERT(model.conv2_kernel->type == GGML_TYPE_F32); GGML_ASSERT(model.conv2_kernel->ne[0] == 3); GGML_ASSERT(model.conv2_kernel->ne[1] == 3); GGML_ASSERT(model.conv2_kernel->ne[2] == MNIST_CNN_NCB); GGML_ASSERT(model.conv2_kernel->ne[3] == MNIST_CNN_NCB*2); model.conv2_bias = ggml_get_tensor(model.ctx_gguf, "conv2.bias"); GGML_ASSERT(model.conv2_bias->type == GGML_TYPE_F32); GGML_ASSERT(model.conv2_bias->ne[0] == 1); GGML_ASSERT(model.conv2_bias->ne[1] == 1); GGML_ASSERT(model.conv2_bias->ne[2] == MNIST_CNN_NCB*2); GGML_ASSERT(model.conv2_bias->ne[3] == 1); model.dense_weight = ggml_get_tensor(model.ctx_gguf, "dense.weight"); GGML_ASSERT(model.dense_weight->type == GGML_TYPE_F32); GGML_ASSERT(model.dense_weight->ne[0] == (MNIST_HW/4)*(MNIST_HW/4)*(MNIST_CNN_NCB*2)); GGML_ASSERT(model.dense_weight->ne[1] == MNIST_NCLASSES); GGML_ASSERT(model.dense_weight->ne[2] == 1); GGML_ASSERT(model.dense_weight->ne[3] == 1); model.dense_bias = ggml_get_tensor(model.ctx_gguf, "dense.bias"); GGML_ASSERT(model.dense_bias->type == GGML_TYPE_F32); GGML_ASSERT(model.dense_bias->ne[0] == MNIST_NCLASSES); GGML_ASSERT(model.dense_bias->ne[1] == 1); GGML_ASSERT(model.dense_bias->ne[2] == 1); GGML_ASSERT(model.dense_bias->ne[3] == 1); } else { fprintf(stderr, "%s: unknown model arch: %s\n", __func__, model.arch.c_str()); } model.buf_gguf = ggml_backend_alloc_ctx_tensors(model.ctx_gguf, model.backends[0]); if(!load_from_gguf(fname.c_str(), model.ctx_gguf, ctx)) { fprintf(stderr, "%s: loading weights from %s failed\n", __func__, fname.c_str()); exit(1); } // The space in ctx_gguf exactly fits the model weights, // the images (which also need to be statically allocated) need to be put in a different context. model.images = ggml_new_tensor_2d(model.ctx_static, GGML_TYPE_F32, MNIST_NINPUT, nbatch_physical); ggml_set_name(model.images, "images"); ggml_set_input(model.images); model.buf_static = ggml_backend_alloc_ctx_tensors(model.ctx_static, model.backends[0]); fprintf(stderr, "%s: successfully loaded weights from %s\n", __func__, fname.c_str()); return model; } mnist_model mnist_model_init_random(const std::string & arch, const std::string & backend, const int nbatch_logical, const int nbatch_physical) { mnist_model model(backend, nbatch_logical, nbatch_physical); model.arch = arch; std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution nd{0.0f, 1e-2f}; std::vector init_tensors; if (model.arch == "mnist-fc") { fprintf(stderr, "%s: initializing random weights for a fully connected model\n", __func__); model.fc1_weight = ggml_new_tensor_2d(model.ctx_static, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NHIDDEN); model.fc1_bias = ggml_new_tensor_1d(model.ctx_static, GGML_TYPE_F32, MNIST_NHIDDEN); model.fc2_weight = ggml_new_tensor_2d(model.ctx_static, GGML_TYPE_F32, MNIST_NHIDDEN, MNIST_NCLASSES); model.fc2_bias = ggml_new_tensor_1d(model.ctx_static, GGML_TYPE_F32, MNIST_NCLASSES); ggml_set_name(model.fc1_weight, "fc1.weight"); ggml_set_name(model.fc1_bias, "fc1.bias"); ggml_set_name(model.fc2_weight, "fc2.weight"); ggml_set_name(model.fc2_bias, "fc2.bias"); init_tensors.push_back(model.fc1_weight); init_tensors.push_back(model.fc1_bias); init_tensors.push_back(model.fc2_weight); init_tensors.push_back(model.fc2_bias); } else if (model.arch == "mnist-cnn") { model.conv1_kernel = ggml_new_tensor_4d(model.ctx_static, GGML_TYPE_F32, 3, 3, 1, MNIST_CNN_NCB); model.conv1_bias = ggml_new_tensor_3d(model.ctx_static, GGML_TYPE_F32, 1, 1, MNIST_CNN_NCB); model.conv2_kernel = ggml_new_tensor_4d(model.ctx_static, GGML_TYPE_F32, 3, 3, MNIST_CNN_NCB, MNIST_CNN_NCB*2); model.conv2_bias = ggml_new_tensor_3d(model.ctx_static, GGML_TYPE_F32, 1, 1, MNIST_CNN_NCB*2); model.dense_weight = ggml_new_tensor_2d(model.ctx_static, GGML_TYPE_F32, (MNIST_HW/4)*(MNIST_HW/4)*(MNIST_CNN_NCB*2), MNIST_NCLASSES); model.dense_bias = ggml_new_tensor_1d(model.ctx_static, GGML_TYPE_F32, MNIST_NCLASSES); ggml_set_name(model.conv1_kernel, "conv1.kernel"); ggml_set_name(model.conv1_bias, "conv1.bias"); ggml_set_name(model.conv2_kernel, "conv2.kernel"); ggml_set_name(model.conv2_bias, "conv2.bias"); ggml_set_name(model.dense_weight, "dense.weight"); ggml_set_name(model.dense_bias, "dense.bias"); init_tensors.push_back(model.conv1_kernel); init_tensors.push_back(model.conv1_bias); init_tensors.push_back(model.conv2_kernel); init_tensors.push_back(model.conv2_bias); init_tensors.push_back(model.dense_weight); init_tensors.push_back(model.dense_bias); } else { fprintf(stderr, "%s: unknown model arch: %s\n", __func__, model.arch.c_str()); } model.images = ggml_new_tensor_2d(model.ctx_static, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NBATCH_PHYSICAL); ggml_set_name(model.images, "images"); ggml_set_input(model.images); model.buf_static = ggml_backend_alloc_ctx_tensors(model.ctx_static, model.backends[0]); for (ggml_tensor * t : init_tensors) { GGML_ASSERT(t->type == GGML_TYPE_F32); const int64_t ne = ggml_nelements(t); std::vector tmp(ne); for (int64_t i = 0; i < ne; ++i) { tmp[i] = nd(gen); } ggml_backend_tensor_set(t, tmp.data(), 0, ggml_nbytes(t)); } return model; } void mnist_model_build(mnist_model & model) { if (model.arch == "mnist-fc") { ggml_set_param(model.fc1_weight); ggml_set_param(model.fc1_bias); ggml_set_param(model.fc2_weight); ggml_set_param(model.fc2_bias); ggml_tensor * fc1 = ggml_relu(model.ctx_compute, ggml_add(model.ctx_compute, ggml_mul_mat(model.ctx_compute, model.fc1_weight, model.images), model.fc1_bias)); model.logits = ggml_add(model.ctx_compute, ggml_mul_mat(model.ctx_compute, model.fc2_weight, fc1), model.fc2_bias); } else if (model.arch == "mnist-cnn") { ggml_set_param(model.conv1_kernel); ggml_set_param(model.conv1_bias); ggml_set_param(model.conv2_kernel); ggml_set_param(model.conv2_bias); ggml_set_param(model.dense_weight); ggml_set_param(model.dense_bias); struct ggml_tensor * images_2D = ggml_reshape_4d(model.ctx_compute, model.images, MNIST_HW, MNIST_HW, 1, model.images->ne[1]); struct ggml_tensor * conv1_out = ggml_relu(model.ctx_compute, ggml_add(model.ctx_compute, ggml_conv_2d(model.ctx_compute, model.conv1_kernel, images_2D, 1, 1, 1, 1, 1, 1), model.conv1_bias)); GGML_ASSERT(conv1_out->ne[0] == MNIST_HW); GGML_ASSERT(conv1_out->ne[1] == MNIST_HW); GGML_ASSERT(conv1_out->ne[2] == MNIST_CNN_NCB); GGML_ASSERT(conv1_out->ne[3] == model.nbatch_physical); struct ggml_tensor * conv2_in = ggml_pool_2d(model.ctx_compute, conv1_out, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); GGML_ASSERT(conv2_in->ne[0] == MNIST_HW/2); GGML_ASSERT(conv2_in->ne[1] == MNIST_HW/2); GGML_ASSERT(conv2_in->ne[2] == MNIST_CNN_NCB); GGML_ASSERT(conv2_in->ne[3] == model.nbatch_physical); struct ggml_tensor * conv2_out = ggml_relu(model.ctx_compute, ggml_add(model.ctx_compute, ggml_conv_2d(model.ctx_compute, model.conv2_kernel, conv2_in, 1, 1, 1, 1, 1, 1), model.conv2_bias)); GGML_ASSERT(conv2_out->ne[0] == MNIST_HW/2); GGML_ASSERT(conv2_out->ne[1] == MNIST_HW/2); GGML_ASSERT(conv2_out->ne[2] == MNIST_CNN_NCB*2); GGML_ASSERT(conv2_out->ne[3] == model.nbatch_physical); struct ggml_tensor * dense_in = ggml_pool_2d(model.ctx_compute, conv2_out, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); GGML_ASSERT(dense_in->ne[0] == MNIST_HW/4); GGML_ASSERT(dense_in->ne[1] == MNIST_HW/4); GGML_ASSERT(dense_in->ne[2] == MNIST_CNN_NCB*2); GGML_ASSERT(dense_in->ne[3] == model.nbatch_physical); dense_in = ggml_reshape_2d(model.ctx_compute, ggml_cont(model.ctx_compute, ggml_permute(model.ctx_compute, dense_in, 1, 2, 0, 3)), (MNIST_HW/4)*(MNIST_HW/4)*(MNIST_CNN_NCB*2), model.nbatch_physical); GGML_ASSERT(dense_in->ne[0] == (MNIST_HW/4)*(MNIST_HW/4)*(MNIST_CNN_NCB*2)); GGML_ASSERT(dense_in->ne[1] == model.nbatch_physical); GGML_ASSERT(dense_in->ne[2] == 1); GGML_ASSERT(dense_in->ne[3] == 1); model.logits = ggml_add(model.ctx_compute, ggml_mul_mat(model.ctx_compute, model.dense_weight, dense_in), model.dense_bias); } else { GGML_ASSERT(false); } ggml_set_name(model.logits, "logits"); ggml_set_output(model.logits); GGML_ASSERT(model.logits->type == GGML_TYPE_F32); GGML_ASSERT(model.logits->ne[0] == MNIST_NCLASSES); GGML_ASSERT(model.logits->ne[1] == model.nbatch_physical); GGML_ASSERT(model.logits->ne[2] == 1); GGML_ASSERT(model.logits->ne[3] == 1); } ggml_opt_result_t mnist_model_eval(mnist_model & model, ggml_opt_dataset_t dataset) { ggml_opt_result_t result = ggml_opt_result_init(); ggml_opt_params params = ggml_opt_default_params(model.backend_sched, GGML_OPT_LOSS_TYPE_CROSS_ENTROPY); params.ctx_compute = model.ctx_compute; params.inputs = model.images; params.outputs = model.logits; params.build_type = GGML_OPT_BUILD_TYPE_FORWARD; ggml_opt_context_t opt_ctx = ggml_opt_init(params); { const int64_t t_start_us = ggml_time_us(); ggml_opt_epoch(opt_ctx, dataset, nullptr, result, /*idata_split =*/ 0, nullptr, nullptr); const int64_t t_total_us = ggml_time_us() - t_start_us; const double t_total_ms = 1e-3*t_total_us; const int nex = ggml_opt_dataset_data(dataset)->ne[1]; fprintf(stderr, "%s: model evaluation on %d images took %.2lf ms, %.2lf us/image\n", __func__, nex, t_total_ms, (double) t_total_us/nex); } ggml_opt_free(opt_ctx); return result; } void mnist_model_train(mnist_model & model, ggml_opt_dataset_t dataset, const int nepoch, const float val_split) { ggml_opt_fit(model.backend_sched, model.ctx_compute, model.images, model.logits, dataset, GGML_OPT_LOSS_TYPE_CROSS_ENTROPY, GGML_OPT_OPTIMIZER_TYPE_ADAMW, ggml_opt_get_default_optimizer_params, nepoch, model.nbatch_logical, val_split, false); } void mnist_model_save(mnist_model & model, const std::string & fname) { printf("%s: saving model to '%s'\n", __func__, fname.c_str()); struct ggml_context * ggml_ctx; { struct ggml_init_params params = { /*.mem_size =*/ 100 * 1024*1024, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; ggml_ctx = ggml_init(params); } gguf_context * gguf_ctx = gguf_init_empty(); gguf_set_val_str(gguf_ctx, "general.architecture", model.arch.c_str()); std::vector weights; if (model.arch == "mnist-fc") { weights = {model.fc1_weight, model.fc1_bias, model.fc2_weight, model.fc2_bias}; } else if (model.arch == "mnist-cnn") { weights = {model.conv1_kernel, model.conv1_bias, model.conv2_kernel, model.conv2_bias, model.dense_weight, model.dense_bias}; } else { GGML_ASSERT(false); } for (struct ggml_tensor * t : weights) { struct ggml_tensor * copy = ggml_dup_tensor(ggml_ctx, t); ggml_set_name(copy, t->name); ggml_backend_tensor_get(t, copy->data, 0, ggml_nbytes(t)); gguf_add_tensor(gguf_ctx, copy); } gguf_write_to_file(gguf_ctx, fname.c_str(), false); ggml_free(ggml_ctx); gguf_free(gguf_ctx); } #ifdef __cplusplus extern "C" { #endif int wasm_eval(uint8_t * digitPtr) { std::vector digit(digitPtr, digitPtr + MNIST_NINPUT); ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, 1, 1); struct ggml_tensor * data = ggml_opt_dataset_data(dataset); float * buf = ggml_get_data_f32(data); for (int i = 0; i < MNIST_NINPUT; ++i) { buf[i] = digitPtr[i] / 255.0f; } ggml_set_zero(ggml_opt_dataset_labels(dataset)); // The labels are not needed. mnist_model model = mnist_model_init_from_file("mnist-f32.gguf", "CPU", /*nbatch_logical =*/ 1, /*nbatch_physical =*/ 1); mnist_model_build(model); ggml_opt_result_t result = mnist_model_eval(model, dataset); int32_t pred; ggml_opt_result_pred(result, &pred); return pred; } int wasm_random_digit(char * digitPtr) { auto fin = std::ifstream("t10k-images-idx3-ubyte", std::ios::binary); if (!fin) { fprintf(stderr, "failed to open digits file\n"); return 0; } srand(time(NULL)); // Seek to a random digit: 16-byte header + 28*28 * (random 0 - 10000) fin.seekg(16 + MNIST_NINPUT * (rand() % MNIST_NTEST)); fin.read(digitPtr, MNIST_NINPUT); return 1; } #ifdef __cplusplus } #endif ggml-org-ggml-3678254/examples/mnist/mnist-common.h000066400000000000000000000156721512524704700221400ustar00rootroot00000000000000#include #include #include #include #include #include #include "ggml-alloc.h" #include "ggml-backend.h" #include "ggml.h" #include "gguf.h" #include "ggml-cpu.h" #include "ggml-opt.h" #define MNIST_NTRAIN 60000 #define MNIST_NTEST 10000 // Gradient accumulation can be achieved by setting the logical batch size to a multiple of the physical one. // The logical batch size determines how many datapoints are used for a gradient update. // The physical batch size determines how many datapoints are processed in parallel, larger values utilize compute better but need more memory. #define MNIST_NBATCH_LOGICAL 1000 #define MNIST_NBATCH_PHYSICAL 500 static_assert(MNIST_NBATCH_LOGICAL % MNIST_NBATCH_PHYSICAL == 0, "MNIST_NBATCH_LOGICAL % MNIST_NBATCH_PHYSICAL != 0"); static_assert(MNIST_NTRAIN % MNIST_NBATCH_LOGICAL == 0, "MNIST_NTRAIN % MNIST_NBATCH_LOGICAL != 0"); static_assert(MNIST_NTEST % MNIST_NBATCH_LOGICAL == 0, "MNIST_NTRAIN % MNIST_NBATCH_LOGICAL != 0"); #define MNIST_HW 28 #define MNIST_NINPUT (MNIST_HW*MNIST_HW) #define MNIST_NCLASSES 10 #define MNIST_NHIDDEN 500 // NCB = number of channels base #define MNIST_CNN_NCB 8 struct mnist_model { std::string arch; ggml_backend_sched_t backend_sched; std::vector backends; const int nbatch_logical; const int nbatch_physical; struct ggml_tensor * images = nullptr; struct ggml_tensor * logits = nullptr; struct ggml_tensor * fc1_weight = nullptr; struct ggml_tensor * fc1_bias = nullptr; struct ggml_tensor * fc2_weight = nullptr; struct ggml_tensor * fc2_bias = nullptr; struct ggml_tensor * conv1_kernel = nullptr; struct ggml_tensor * conv1_bias = nullptr; struct ggml_tensor * conv2_kernel = nullptr; struct ggml_tensor * conv2_bias = nullptr; struct ggml_tensor * dense_weight = nullptr; struct ggml_tensor * dense_bias = nullptr; struct ggml_context * ctx_gguf = nullptr; struct ggml_context * ctx_static = nullptr; struct ggml_context * ctx_compute = nullptr; ggml_backend_buffer_t buf_gguf = nullptr; ggml_backend_buffer_t buf_static = nullptr; mnist_model(const std::string & backend_name, const int nbatch_logical, const int nbatch_physical) : nbatch_logical(nbatch_logical), nbatch_physical(nbatch_physical) { std::vector devices; const int ncores_logical = std::thread::hardware_concurrency(); const int nthreads = std::min(ncores_logical, (ncores_logical + 4) / 2); // Add primary backend: if (!backend_name.empty()) { ggml_backend_dev_t dev = ggml_backend_dev_by_name(backend_name.c_str()); if (dev == nullptr) { fprintf(stderr, "%s: ERROR: backend %s not found, available:\n", __func__, backend_name.c_str()); for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { ggml_backend_dev_t dev_i = ggml_backend_dev_get(i); fprintf(stderr, " - %s (%s)\n", ggml_backend_dev_name(dev_i), ggml_backend_dev_description(dev_i)); } exit(1); } ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); GGML_ASSERT(backend); if (ggml_backend_is_cpu(backend)) { ggml_backend_cpu_set_n_threads(backend, nthreads); } backends.push_back(backend); devices.push_back(dev); } // Add all available backends as fallback. // A "backend" is a stream on a physical device so there is no problem with adding multiple backends for the same device. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { ggml_backend_dev_t dev = ggml_backend_dev_get(i); ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); GGML_ASSERT(backend); if (ggml_backend_is_cpu(backend)) { ggml_backend_cpu_set_n_threads(backend, nthreads); } backends.push_back(backend); devices.push_back(dev); } // The order of the backends passed to ggml_backend_sched_new determines which backend is given priority. backend_sched = ggml_backend_sched_new(backends.data(), nullptr, backends.size(), GGML_DEFAULT_GRAPH_SIZE, false, true); fprintf(stderr, "%s: using %s (%s) as primary backend\n", __func__, ggml_backend_name(backends[0]), ggml_backend_dev_description(devices[0])); if (backends.size() >= 2) { fprintf(stderr, "%s: unsupported operations will be executed on the following fallback backends (in order of priority):\n", __func__); for (size_t i = 1; i < backends.size(); ++i) { fprintf(stderr, "%s: - %s (%s)\n", __func__, ggml_backend_name(backends[i]), ggml_backend_dev_description(devices[i])); } } { const size_t size_meta = 1024*ggml_tensor_overhead(); struct ggml_init_params params = { /*.mem_size =*/ size_meta, /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; ctx_static = ggml_init(params); } { // The compute context needs a total of 3 compute graphs: forward pass + backwards pass (with/without optimizer step). const size_t size_meta = GGML_DEFAULT_GRAPH_SIZE*ggml_tensor_overhead() + 3*ggml_graph_overhead(); struct ggml_init_params params = { /*.mem_size =*/ size_meta, /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; ctx_compute = ggml_init(params); } } ~mnist_model() { ggml_free(ctx_gguf); ggml_free(ctx_static); ggml_free(ctx_compute); ggml_backend_buffer_free(buf_gguf); ggml_backend_buffer_free(buf_static); ggml_backend_sched_free(backend_sched); for (ggml_backend_t backend : backends) { ggml_backend_free(backend); } } }; bool mnist_image_load(const std::string & fname, ggml_opt_dataset_t dataset); void mnist_image_print(FILE * f, ggml_opt_dataset_t dataset, const int iex); bool mnist_label_load(const std::string & fname, ggml_opt_dataset_t dataset); mnist_model mnist_model_init_from_file(const std::string & fname, const std::string & backend, const int nbatch_logical, const int nbatch_physical); mnist_model mnist_model_init_random(const std::string & arch, const std::string & backend, const int nbatch_logical, const int nbatch_physical); void mnist_model_build(mnist_model & model); ggml_opt_result_t mnist_model_eval(mnist_model & model, ggml_opt_dataset_t dataset); void mnist_model_train(mnist_model & model, ggml_opt_dataset_t dataset, const int nepoch, const float val_split); void mnist_model_save(mnist_model & model, const std::string & fname); ggml-org-ggml-3678254/examples/mnist/mnist-eval.cpp000066400000000000000000000040221512524704700221150ustar00rootroot00000000000000#include "ggml.h" #include "ggml-opt.h" #include "mnist-common.h" #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif int main(int argc, char ** argv) { srand(time(NULL)); ggml_time_init(); if (argc != 4 && argc != 5) { fprintf(stderr, "Usage: %s mnist-fc-f32.gguf data/MNIST/raw/t10k-images-idx3-ubyte data/MNIST/raw/t10k-labels-idx1-ubyte [CPU/CUDA0]\n", argv[0]); exit(1); } ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTEST, MNIST_NBATCH_PHYSICAL); if (!mnist_image_load(argv[2], dataset)) { return 1; } if (!mnist_label_load(argv[3], dataset)) { return 1; } const int iex = rand() % MNIST_NTEST; mnist_image_print(stdout, dataset, iex); const std::string backend = argc >= 5 ? argv[4] : ""; const int64_t t_start_us = ggml_time_us(); mnist_model model = mnist_model_init_from_file(argv[1], backend, MNIST_NBATCH_LOGICAL, MNIST_NBATCH_PHYSICAL); mnist_model_build(model); const int64_t t_load_us = ggml_time_us() - t_start_us; fprintf(stdout, "%s: loaded model in %.2lf ms\n", __func__, t_load_us / 1000.0); ggml_opt_result_t result_eval = mnist_model_eval(model, dataset); std::vector pred(MNIST_NTEST); ggml_opt_result_pred(result_eval, pred.data()); fprintf(stdout, "%s: predicted digit is %d\n", __func__, pred[iex]); double loss; double loss_unc; ggml_opt_result_loss(result_eval, &loss, &loss_unc); fprintf(stdout, "%s: test_loss=%.6lf+-%.6lf\n", __func__, loss, loss_unc); double accuracy; double accuracy_unc; ggml_opt_result_accuracy(result_eval, &accuracy, &accuracy_unc); fprintf(stdout, "%s: test_acc=%.2lf+-%.2lf%%\n", __func__, 100.0*accuracy, 100.0*accuracy_unc); ggml_opt_result_free(result_eval); return 0; } ggml-org-ggml-3678254/examples/mnist/mnist-train-cnn.py000077500000000000000000000063401512524704700227350ustar00rootroot00000000000000#!/usr/bin/env python3 import sys from time import time import gguf import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers def train(model_path): # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # Load the data and split it between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = keras.Sequential( [ keras.Input(shape=input_shape, dtype=tf.float32), layers.Conv2D(8, kernel_size=(3, 3), padding="same", activation="relu", dtype=tf.float32), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation="relu", dtype=tf.float32), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(num_classes, activation="softmax", dtype=tf.float32), ] ) model.summary() batch_size = 1000 epochs = 30 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) t_start = time() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) print(f"Training took {time()-t_start:.2f}s") score = model.evaluate(x_test, y_test, verbose=0) print(f"Test loss: {score[0]:.6f}") print(f"Test accuracy: {100*score[1]:.2f}%") gguf_writer = gguf.GGUFWriter(model_path, "mnist-cnn") conv1_kernel = model.layers[0].weights[0].numpy() conv1_kernel = np.moveaxis(conv1_kernel, [2, 3], [0, 1]) gguf_writer.add_tensor("conv1.kernel", conv1_kernel, raw_shape=(8, 1, 3, 3)) conv1_bias = model.layers[0].weights[1].numpy() gguf_writer.add_tensor("conv1.bias", conv1_bias, raw_shape=(1, 8, 1, 1)) conv2_kernel = model.layers[2].weights[0].numpy() conv2_kernel = np.moveaxis(conv2_kernel, [0, 1, 2, 3], [2, 3, 1, 0]) gguf_writer.add_tensor("conv2.kernel", conv2_kernel, raw_shape=(16, 8, 3, 3)) conv2_bias = model.layers[2].weights[1].numpy() gguf_writer.add_tensor("conv2.bias", conv2_bias, raw_shape=(1, 16, 1, 1)) dense_weight = model.layers[-1].weights[0].numpy() dense_weight = dense_weight.transpose() gguf_writer.add_tensor("dense.weight", dense_weight, raw_shape=(10, 7*7*16)) dense_bias = model.layers[-1].weights[1].numpy() gguf_writer.add_tensor("dense.bias", dense_bias) gguf_writer.write_header_to_file() gguf_writer.write_kv_data_to_file() gguf_writer.write_tensors_to_file() gguf_writer.close() print(f"GGUF model saved to '{model_path}'") if __name__ == '__main__': if len(sys.argv) != 2: print(f"Usage: {sys.argv[0]} ") sys.exit(1) train(sys.argv[1]) ggml-org-ggml-3678254/examples/mnist/mnist-train-fc.py000066400000000000000000000106711512524704700225460ustar00rootroot00000000000000import gguf import numpy as np import torch import torch.nn as nn import torchvision.datasets as dsets import torchvision.transforms as transforms from torch.autograd import Variable import sys from time import time input_size = 784 # img_size = (28,28) ---> 28*28=784 in total hidden_size = 500 # number of nodes at hidden layer num_classes = 10 # number of output classes discrete range [0,9] num_epochs = 30 # number of times which the entire dataset is passed throughout the model batch_size = 1000 # the size of input data used for one iteration lr = 1e-3 # size of step class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_classes) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) return out def train(model_path): train_data = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_data = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor()) assert len(train_data) == 60000 assert len(test_data) == 10000 kwargs_train_test = dict(batch_size=batch_size, num_workers=4, pin_memory=True) train_gen = torch.utils.data.DataLoader(dataset=train_data, shuffle=True, **kwargs_train_test) test_gen = torch.utils.data.DataLoader(dataset=test_data, shuffle=False, **kwargs_train_test) net = Net(input_size, hidden_size, num_classes) if torch.cuda.is_available(): net.cuda() loss_function = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(), lr=lr) t_start = time() for epoch in range(num_epochs): loss_history = [] ncorrect = 0 for i, (images, labels) in enumerate(train_gen): images = Variable(images.view(-1, 28*28)) labels = Variable(labels) if torch.cuda.is_available(): images = images.cuda() labels = labels.cuda() optimizer.zero_grad() outputs = net(images) loss = loss_function(outputs, labels) loss_history.append(loss.cpu().data) _, predictions = torch.max(outputs, 1) ncorrect += (predictions == labels).sum() loss.backward() optimizer.step() if (i + 1)*batch_size % 10000 == 0: loss_mean = np.mean(loss_history) accuracy = ncorrect / ((i + 1) * batch_size) print( f"Epoch [{epoch+1:02d}/{num_epochs}], " f"Step [{(i+1)*batch_size:05d}/{len(train_data)}], " f"Loss: {loss_mean:.4f}, Accuracy: {100*accuracy:.2f}%") print() print(f"Training took {time()-t_start:.2f}s") loss_history = [] ncorrect = 0 for i, (images, labels) in enumerate(test_gen): images = Variable(images.view(-1, 28*28)) labels = Variable(labels) if torch.cuda.is_available(): images = images.cuda() labels = labels.cuda() outputs = net(images) loss = loss_function(outputs, labels) loss_history.append(loss.cpu().data) _, predictions = torch.max(outputs, 1) ncorrect += (predictions == labels).sum().cpu().numpy() loss_mean = np.mean(loss_history) loss_uncertainty = np.std(loss_history) / np.sqrt(len(loss_history) - 1) accuracy_mean = ncorrect / (len(test_gen) * batch_size) accuracy_uncertainty = np.sqrt(accuracy_mean * (1.0 - accuracy_mean) / (len(test_gen) * batch_size)) print() print(f"Test loss: {loss_mean:.6f}+-{loss_uncertainty:.6f}, Test accuracy: {100*accuracy_mean:.2f}+-{100*accuracy_uncertainty:.2f}%") gguf_writer = gguf.GGUFWriter(model_path, "mnist-fc") print() print(f"Model tensors saved to {model_path}:") for tensor_name in net.state_dict().keys(): data = net.state_dict()[tensor_name].squeeze().cpu().numpy() print(tensor_name, "\t", data.shape) gguf_writer.add_tensor(tensor_name, data) gguf_writer.write_header_to_file() gguf_writer.write_kv_data_to_file() gguf_writer.write_tensors_to_file() gguf_writer.close() if __name__ == '__main__': if len(sys.argv) != 2: print(f"Usage: {sys.argv[0]} ") sys.exit(1) train(sys.argv[1]) ggml-org-ggml-3678254/examples/mnist/mnist-train.cpp000066400000000000000000000026321512524704700223100ustar00rootroot00000000000000#include "ggml-opt.h" #include "mnist-common.h" #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif int main(int argc, char ** argv) { if (argc != 5 && argc != 6) { fprintf(stderr, "Usage: %s mnist-fc mnist-fc-f32.gguf data/MNIST/raw/train-images-idx3-ubyte data/MNIST/raw/train-labels-idx1-ubyte [CPU/CUDA0]\n", argv[0]); exit(0); } // The MNIST model is so small that the overhead from data shuffling is non-negligible, especially with CUDA. // With a shard size of 10 this overhead is greatly reduced at the cost of less shuffling (does not seem to have a significant impact). // A batch of 500 images then consists of 50 random shards of size 10 instead of 500 random shards of size 1. ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTRAIN, /*ndata_shard =*/ 10); if (!mnist_image_load(argv[3], dataset)) { return 1; } if (!mnist_label_load(argv[4], dataset)) { return 1; } mnist_model model = mnist_model_init_random(argv[1], argc >= 6 ? argv[5] : "", MNIST_NBATCH_LOGICAL, MNIST_NBATCH_PHYSICAL); mnist_model_build(model); mnist_model_train(model, dataset, /*nepoch =*/ 30, /*val_split =*/ 0.05f); mnist_model_save(model, argv[2]); } ggml-org-ggml-3678254/examples/mnist/server.py000066400000000000000000000022601512524704700212140ustar00rootroot00000000000000import http.server import socketserver import os import sys DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), 'web')) PORT = 8000 class CustomHTTPRequestHandler(http.server.SimpleHTTPRequestHandler): def __init__(self, *args, **kwargs): super().__init__(*args, directory=DIRECTORY, **kwargs) def end_headers(self): # Add required headers for SharedArrayBuffer self.send_header("Cross-Origin-Opener-Policy", "same-origin") self.send_header("Cross-Origin-Embedder-Policy", "require-corp") self.send_header("Access-Control-Allow-Origin", "*") super().end_headers() # Enable address reuse class CustomServer(socketserver.TCPServer): allow_reuse_address = True try: with CustomServer(("", PORT), CustomHTTPRequestHandler) as httpd: print(f"Serving directory '{DIRECTORY}' at http://localhost:{PORT}") print(f"Application context root: http://localhost:{PORT}/") try: httpd.serve_forever() except KeyboardInterrupt: print("\nServer stopped.") # Force complete exit sys.exit(0) except OSError as e: print(f"Error: {e}") sys.exit(1) ggml-org-ggml-3678254/examples/mnist/web/000077500000000000000000000000001512524704700201115ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/mnist/web/.gitignore000066400000000000000000000000021512524704700220710ustar00rootroot00000000000000* ggml-org-ggml-3678254/examples/mnist/web/index.html000066400000000000000000000127351512524704700221160ustar00rootroot00000000000000 MNIST with GGML

MNIST digit recognizer with GGML

Loading model and data set, please wait ...

Your browser does not support the HTML canvas tag.

ggml-org-ggml-3678254/examples/perf-metal/000077500000000000000000000000001512524704700202365ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/perf-metal/CMakeLists.txt000066400000000000000000000002161512524704700227750ustar00rootroot00000000000000# # perf-metal set(TEST_TARGET perf-metal) add_executable(${TEST_TARGET} perf-metal.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml) ggml-org-ggml-3678254/examples/perf-metal/perf-metal.cpp000066400000000000000000000105531512524704700230020ustar00rootroot00000000000000// basic tool to experiment with the Metal backend // // 1. Get GPU trace of a dummy graph: // // rm -rf /tmp/perf-metal.gputrace // make -j perf-metal && METAL_CAPTURE_ENABLED=1 ./bin/perf-metal // open /tmp/perf-metal.gputrace // // https://github.com/ggerganov/llama.cpp/issues/9507 // #include "ggml.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "ggml-metal.h" #include #include #include int main(int argc, char ** argv) { int n_op = 1024; int n_iter = 128; if (argc > 1) { n_op = std::atoi(argv[1]); } if (argc > 2) { n_iter = std::atoi(argv[2]); } printf("%s: n_op = %d, n_iter = %d\n", __func__, n_op, n_iter); const int ne00 = 8; const int ne01 = 8; const int ne11 = 8; std::vector data0(ne00*ne01, 1.0f); std::vector data1(ne00*ne01, 1.0f/ne00); ggml_backend_t backend = ggml_backend_metal_init(); if (!backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); return 1; } const size_t ctx_size = 2 * ggml_tensor_overhead(); struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; struct ggml_context * ctx = ggml_init(params); struct ggml_tensor * t0 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne00, ne01); struct ggml_tensor * t1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne00, ne11); ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend); ggml_backend_tensor_set(t0, data0.data(), 0, ggml_nbytes(t0)); ggml_backend_tensor_set(t1, data1.data(), 0, ggml_nbytes(t1)); struct ggml_cgraph * gf = NULL; struct ggml_context * ctx_cgraph = NULL; // create a dummy compute graph: // // x = mul_mat(t0, t1) // x = x * 1.0f // x = mul_mat(x, t1) // x = x * 1.0f // ... repeat n_op times ... // { struct ggml_init_params params0 = { /*.mem_size =*/ 4*n_op*ggml_tensor_overhead() + ggml_graph_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ctx_cgraph = ggml_init(params0); gf = ggml_new_graph_custom(ctx_cgraph, 4*n_op, false); struct ggml_tensor * cur = ggml_mul_mat(ctx_cgraph, t0, t1); cur = ggml_scale(ctx_cgraph, cur, 1.0f); for (int i = 0; i < n_op - 1; i++) { cur = ggml_mul_mat(ctx_cgraph, cur, t1); cur = ggml_scale(ctx_cgraph, cur, 1.0f); } cur = ggml_scale(ctx_cgraph, cur, 42.0f); ggml_build_forward_expand(gf, cur); } printf("%s: graph nodes = %d\n", __func__, ggml_graph_n_nodes(gf)); ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend)); ggml_gallocr_alloc_graph(allocr, gf); { // warm-up ggml_backend_graph_compute(backend, gf); const int64_t t_start = ggml_time_us(); for (int iter = 0; iter < n_iter; iter++) { ggml_backend_graph_compute(backend, gf); } const int64_t t_end = ggml_time_us(); // actual trace ggml_backend_metal_capture_next_compute(backend); ggml_backend_graph_compute(backend, gf); //std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // NOTE: these intervals do not appear in the XCode trace! ggml_backend_metal_capture_next_compute(backend); ggml_backend_graph_compute(backend, gf); //std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // NOTE: these intervals do not appear in the XCode trace! ggml_backend_metal_capture_next_compute(backend); ggml_backend_graph_compute(backend, gf); printf("%s: time = %f ms\n", __func__, (t_end - t_start) / 1000.0 / n_iter); } { struct ggml_tensor * res = ggml_graph_node(gf, -1); std::vector data(res->ne[0] * res->ne[1], 0.0f); ggml_backend_tensor_get(res, data.data(), 0, ggml_nbytes(res)); for (int i1 = 0; i1 < res->ne[1]; i1++) { for (int i0 = 0; i0 < res->ne[0]; i0++) { printf("%f ", data[i1*res->ne[0] + i0]); } printf("\n"); } } ggml_free(ctx_cgraph); ggml_gallocr_free(allocr); ggml_free(ctx); ggml_backend_buffer_free(buffer); ggml_backend_free(backend); return 0; } ggml-org-ggml-3678254/examples/prompts/000077500000000000000000000000001512524704700177065ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/prompts/dolly-v2.txt000066400000000000000000000200361512524704700221200ustar00rootroot00000000000000Hello World! => 12092,3645,2 I can't believe it's already Friday!" => 42,476,626,2868,352,434,2168,6794,1476 The URL for the website is https://www.example.com." => 510,10611,323,253,4422,310,5987,1358,2700,15,11667,15,681,449 "She said, 'I love to travel.'" => 3,2993,753,13,686,42,2389,281,4288,18574 'The temperature is 25.5°C.' => 8,510,3276,310,2030,15,22,3272,36,2464 "Let's meet at 2:30 p.m. in the park." => 3,1466,434,2525,387,374,27,1229,268,15,78,15,275,253,5603,449 The book costs $19.99 => 510,1984,4815,370,746,15,1525 "John's favorite color is blue." => 3,8732,434,7583,3295,310,4797,449 Th@nk y0u f0r y0ur h3lp! => 1044,33,30664,340,17,86,269,17,83,340,17,321,288,20,24343,2 C@n I g3t a c0ffee, pl3@se? => 36,33,79,309,305,20,85,247,260,17,71,6851,13,499,20,33,339,32 W0w! Th@t's @m@zing! => 56,17,88,2,596,33,85,434,1214,78,33,8537,2 H0w 4re y0u t0d@y? => 41,17,88,577,250,340,17,86,246,17,69,33,90,32 I l0ve t0 tr@vel @r0und the w0rld. => 42,298,17,306,246,17,492,33,652,1214,83,17,1504,253,259,17,83,392,15 Wh@t's y0ur f@v0rite m0vie? => 3152,33,85,434,340,17,321,269,33,87,17,3852,278,17,25858,32 The cat is sleeping on the mat. => 510,5798,310,14343,327,253,1111,15 I need to buy some groceries for dinner. => 42,878,281,4489,690,45160,447,323,8955,15 The sun is shining brightly in the sky. => 510,5101,310,28115,43925,275,253,8467,15 She is reading a book in the park. => 2993,310,4361,247,1984,275,253,5603,15 We went for a walk on the beach yesterday. => 1231,2427,323,247,2940,327,253,11600,11066,15 He plays the guitar like a pro. => 1328,7120,253,12609,751,247,354,15 They are going to the movies tonight. => 3726,403,1469,281,253,11321,11608,15 The flowers are blooming in the garden. => 510,12405,403,30601,272,275,253,10329,15 I enjoy listening to classical music. => 42,4264,11298,281,8946,3440,15 We need to buy groceries for the week. => 1231,878,281,4489,45160,447,323,253,2129,15 The dog is chasing its tail in circles. => 510,4370,310,31702,697,8105,275,14240,15 She is wearing a beautiful red dress. => 2993,310,9398,247,5389,2502,7619,15 He is a talented actor in Hollywood. => 1328,310,247,21220,12353,275,14759,15 The children are playing in the playground. => 510,2151,403,4882,275,253,41008,15 I'm going to visit my grandparents this weekend. => 42,1353,1469,281,4143,619,37186,436,8849,15 The coffee tastes bitter without sugar. => 510,8574,27491,17123,1293,8618,15 They are planning a surprise party for her. => 3726,403,7219,247,9326,3128,323,617,15 She sings like an angel on stage. => 2993,44718,751,271,23087,327,3924,15 We should take a vacation to relax. => 1231,943,1379,247,18125,281,7921,15 He is studying medicine at the university. => 1328,310,12392,9921,387,253,9835,15 The rain is pouring heavily outside. => 510,9313,310,31226,11306,3345,15 I enjoy watching romantic movies. => 42,4264,7487,18109,11321,15 They are celebrating their anniversary today. => 3726,403,28765,616,19054,3063,15 She dances gracefully to the music. => 2993,47078,14426,2920,281,253,3440,15 He is an excellent basketball player. => 1328,310,271,7126,14648,4760,15 The baby is sleeping soundly in the crib. => 510,6858,310,14343,3590,314,275,253,260,725,15 I need to finish my homework before dinner. => 42,878,281,8416,619,32110,1078,8955,15 They are organizing a charity event next month. => 3726,403,26169,247,19489,2362,1735,1770,15 She is cooking a delicious meal for us. => 2993,310,12398,247,17319,11484,323,441,15 We should go hiking in the mountains. => 1231,943,564,33061,275,253,14700,15 The car broke down on the way to work. => 510,1113,9377,1066,327,253,1039,281,789,15 He loves playing video games in his free time. => 1328,14528,4882,3492,3958,275,521,1959,673,15 The birds are chirping in the trees. => 510,11260,403,36494,14650,275,253,7139,15 I want to learn how to play the piano. => 42,971,281,3037,849,281,1132,253,18542,15 They are building a new shopping mall in the city. => 3726,403,3652,247,747,12701,28974,275,253,2846,15 She is writing a novel in her spare time. => 2993,310,4028,247,4460,275,617,18345,673,15 We are going to the zoo this Saturday. => 1231,403,1469,281,253,41089,436,7814,15 The cake looks delicious with chocolate frosting. => 510,15221,4453,17319,342,14354,34724,272,15 He is a talented painter who sells his artwork. => 1328,310,247,21220,27343,665,27924,521,28227,15 The students are studying for their exams. => 510,3484,403,12392,323,616,34666,15 I enjoy swimming in the ocean. => 42,4264,17120,275,253,12927,15 They are renovating their house. => 3726,403,30074,839,616,2419,15 She is practicing yoga to stay healthy. => 2993,310,25815,25551,281,3297,5875,15 We should plant flowers in the garden. => 1231,943,4444,12405,275,253,10329,15 The traffic is heavy during rush hour. => 510,7137,310,5536,1309,16949,4964,15 He is a skilled chef who creates amazing dishes. => 1328,310,247,18024,26540,665,10513,8644,17114,15 The baby is crawling on the floor. => 510,6858,310,44922,327,253,5254,15 I need to buy a new pair of shoes. => 42,878,281,4489,247,747,4667,273,12682,15 They are going on a road trip across the country. => 3726,403,1469,327,247,3971,7408,2439,253,2586,15 She is playing the piano beautifully. => 2993,310,4882,253,18542,27839,15 We are going to a concert tomorrow night. => 1231,403,1469,281,247,12699,10873,2360,15 The cake tastes delicious with vanilla frosting. => 510,15221,27491,17319,342,26724,34724,272,15 He is a dedicated teacher who inspires his students. => 1328,310,247,9940,9732,665,6381,2731,521,3484,15 The students are participating in a science fair. => 510,3484,403,15299,275,247,5859,4344,15 I enjoy hiking in the mountains. => 42,4264,33061,275,253,14700,15 They are organizing a beach cleanup next weekend. => 3726,403,26169,247,11600,34709,1735,8849,15 She is taking photographs of nature. => 2993,310,3192,15928,273,3753,15 We should try a new restaurant in town. => 1231,943,1611,247,747,10301,275,3874,15 The traffic is moving slowly on the highway. => 510,7137,310,4886,7808,327,253,17657,15 He is a talented singer with a beautiful voice. => 1328,310,247,21220,16057,342,247,5389,4318,15 The baby is laughing and giggling. => 510,6858,310,17053,285,41542,1981,15 I need to do laundry and wash my clothes. => 42,878,281,513,29023,285,14841,619,10015,15 They are planning a trip to Europe. => 3726,403,7219,247,7408,281,3060,15 She is learning how to play the guitar. => 2993,310,4715,849,281,1132,253,12609,15 We are going to a museum this Sunday. => 1231,403,1469,281,247,16064,436,6926,15 The coffee smells amazing in the morning. => 510,8574,34247,8644,275,253,4131,15 He is a hardworking farmer who grows crops. => 1328,310,247,1892,21107,24718,665,17202,19492,15 The students are presenting their research projects. => 510,3484,403,15250,616,2561,6493,15 I enjoy playing soccer with my friends. => 42,4264,4882,20391,342,619,3858,15 They are volunteering at a local shelter. => 3726,403,10057,2158,387,247,1980,17824,15 She is practicing martial arts for self-defense. => 2993,310,25815,29731,14635,323,1881,14,29337,15 We should try a new recipe for dinner. => 1231,943,1611,247,747,13612,323,8955,15 The traffic is congest => 510,7137,310,25801 The sun is shining brightly today. => 510,5101,310,28115,43925,3063,15 I enjoy reading books in my free time. => 42,4264,4361,5098,275,619,1959,673,15 She plays the piano beautifully. => 2993,7120,253,18542,27839,15 The cat chased the mouse around the room. => 510,5798,40754,253,6521,1475,253,2316,15 I love eating pizza with extra cheese. => 42,2389,9123,22534,342,4465,12173,15 He always wears a hat wherever he goes. => 1328,1900,31394,247,7856,20312,344,4566,15 The flowers in the garden are blooming. => 510,12405,275,253,10329,403,30601,272,15 She danced gracefully on the stage. => 2993,39860,14426,2920,327,253,3924,15 The dog barked loudly in the park. => 510,4370,21939,264,31311,275,253,5603,15 We went swimming in the ocean yesterday. => 1231,2427,17120,275,253,12927,11066,15 He speaks fluent French and Spanish. => 1328,16544,2938,290,5112,285,9883,15 The train arrived at the station on time. => 510,6194,7244,387,253,4660,327,673,15 She cooked a delicious meal for her family. => 2993,18621,247,17319,11484,323,617,2021,15 ggml-org-ggml-3678254/examples/prompts/gpt-2-chinese.txt000066400000000000000000000001141512524704700230100ustar00rootroot00000000000000请问洗手间在哪里? => 6435,7309,3819,2797,7313,1762,1525,7027,8043 ggml-org-ggml-3678254/examples/prompts/gpt-2.txt000066400000000000000000000177601512524704700214130ustar00rootroot00000000000000Hello World! => 15496,2159,0 I can't believe it's already Friday!" => 40,460,470,1975,340,338,1541,3217,2474 The URL for the website is https://www.example.com." => 464,10289,329,262,3052,318,3740,1378,2503,13,20688,13,785,526 "She said, 'I love to travel.'" => 1,3347,531,11,705,40,1842,284,3067,11496 'The temperature is 25.5°C.' => 6,464,5951,318,1679,13,20,7200,34,2637 "Let's meet at 2:30 p.m. in the park." => 1,5756,338,1826,379,362,25,1270,279,13,76,13,287,262,3952,526 The book costs $19.99 => 464,1492,3484,720,1129,13,2079 "John's favorite color is blue." => 1,7554,338,4004,3124,318,4171,526 Th@nk y0u f0r y0ur h3lp! => 817,31,77,74,331,15,84,277,15,81,331,15,333,289,18,34431,0 C@n I g3t a c0ffee, pl3@se? => 34,31,77,314,308,18,83,257,269,15,5853,11,458,18,31,325,30 W0w! Th@t's @m@zing! => 54,15,86,0,536,31,83,338,2488,76,31,9510,0 H0w 4re y0u t0d@y? => 39,15,86,604,260,331,15,84,256,15,67,31,88,30 I l0ve t0 tr@vel @r0und the w0rld. => 40,300,15,303,256,15,491,31,626,2488,81,15,917,262,266,15,81,335,13 Wh@t's y0ur f@v0rite m0vie? => 1199,31,83,338,331,15,333,277,31,85,15,6525,285,15,85,494,30 The cat is sleeping on the mat. => 464,3797,318,11029,319,262,2603,13 I need to buy some groceries for dinner. => 40,761,284,2822,617,38464,329,8073,13 The sun is shining brightly in the sky. => 464,4252,318,22751,35254,287,262,6766,13 She is reading a book in the park. => 3347,318,3555,257,1492,287,262,3952,13 We went for a walk on the beach yesterday. => 1135,1816,329,257,2513,319,262,10481,7415,13 He plays the guitar like a pro. => 1544,5341,262,10047,588,257,386,13 They are going to the movies tonight. => 2990,389,1016,284,262,6918,9975,13 The flowers are blooming in the garden. => 464,12734,389,24924,3383,287,262,11376,13 I enjoy listening to classical music. => 40,2883,8680,284,15993,2647,13 We need to buy groceries for the week. => 1135,761,284,2822,38464,329,262,1285,13 The dog is chasing its tail in circles. => 464,3290,318,20023,663,7894,287,13332,13 She is wearing a beautiful red dress. => 3347,318,5762,257,4950,2266,6576,13 He is a talented actor in Hollywood. => 1544,318,257,12356,8674,287,8502,13 The children are playing in the playground. => 464,1751,389,2712,287,262,24817,13 I'm going to visit my grandparents this weekend. => 40,1101,1016,284,3187,616,28571,428,5041,13 The coffee tastes bitter without sugar. => 464,6891,18221,12922,1231,7543,13 They are planning a surprise party for her. => 2990,389,5410,257,5975,2151,329,607,13 She sings like an angel on stage. => 3347,33041,588,281,18304,319,3800,13 We should take a vacation to relax. => 1135,815,1011,257,14600,284,8960,13 He is studying medicine at the university. => 1544,318,11065,9007,379,262,6403,13 The rain is pouring heavily outside. => 464,6290,318,23147,7272,2354,13 I enjoy watching romantic movies. => 40,2883,4964,14348,6918,13 They are celebrating their anniversary today. => 2990,389,17499,511,11162,1909,13 She dances gracefully to the music. => 3347,38207,11542,2759,284,262,2647,13 He is an excellent basketball player. => 1544,318,281,6275,9669,2137,13 The baby is sleeping soundly in the crib. => 464,5156,318,11029,2128,306,287,262,48083,13 I need to finish my homework before dinner. => 40,761,284,5461,616,26131,878,8073,13 They are organizing a charity event next month. => 2990,389,16924,257,11016,1785,1306,1227,13 She is cooking a delicious meal for us. => 3347,318,10801,257,12625,9799,329,514,13 We should go hiking in the mountains. => 1135,815,467,24522,287,262,12269,13 The car broke down on the way to work. => 464,1097,6265,866,319,262,835,284,670,13 He loves playing video games in his free time. => 1544,10408,2712,2008,1830,287,465,1479,640,13 The birds are chirping in the trees. => 464,10087,389,442,343,13886,287,262,7150,13 I want to learn how to play the piano. => 40,765,284,2193,703,284,711,262,19132,13 They are building a new shopping mall in the city. => 2990,389,2615,257,649,9735,17374,287,262,1748,13 She is writing a novel in her spare time. => 3347,318,3597,257,5337,287,607,13952,640,13 We are going to the zoo this Saturday. => 1135,389,1016,284,262,26626,428,3909,13 The cake looks delicious with chocolate frosting. => 464,12187,3073,12625,351,11311,21682,278,13 He is a talented painter who sells his artwork. => 1544,318,257,12356,34537,508,16015,465,16257,13 The students are studying for their exams. => 464,2444,389,11065,329,511,26420,13 I enjoy swimming in the ocean. => 40,2883,14899,287,262,9151,13 They are renovating their house. => 2990,389,24317,803,511,2156,13 She is practicing yoga to stay healthy. => 3347,318,18207,20351,284,2652,5448,13 We should plant flowers in the garden. => 1135,815,4618,12734,287,262,11376,13 The traffic is heavy during rush hour. => 464,4979,318,4334,1141,10484,1711,13 He is a skilled chef who creates amazing dishes. => 1544,318,257,14297,21221,508,8075,4998,16759,13 The baby is crawling on the floor. => 464,5156,318,34499,319,262,4314,13 I need to buy a new pair of shoes. => 40,761,284,2822,257,649,5166,286,10012,13 They are going on a road trip across the country. => 2990,389,1016,319,257,2975,5296,1973,262,1499,13 She is playing the piano beautifully. => 3347,318,2712,262,19132,21104,13 We are going to a concert tomorrow night. => 1135,389,1016,284,257,10010,9439,1755,13 The cake tastes delicious with vanilla frosting. => 464,12187,18221,12625,351,16858,21682,278,13 He is a dedicated teacher who inspires his students. => 1544,318,257,7256,4701,508,38934,465,2444,13 The students are participating in a science fair. => 464,2444,389,11983,287,257,3783,3148,13 I enjoy hiking in the mountains. => 40,2883,24522,287,262,12269,13 They are organizing a beach cleanup next weekend. => 2990,389,16924,257,10481,27425,1306,5041,13 She is taking photographs of nature. => 3347,318,2263,12566,286,3450,13 We should try a new restaurant in town. => 1135,815,1949,257,649,7072,287,3240,13 The traffic is moving slowly on the highway. => 464,4979,318,3867,6364,319,262,12763,13 He is a talented singer with a beautiful voice. => 1544,318,257,12356,14015,351,257,4950,3809,13 The baby is laughing and giggling. => 464,5156,318,14376,290,30442,1359,13 I need to do laundry and wash my clothes. => 40,761,284,466,25724,290,13502,616,8242,13 They are planning a trip to Europe. => 2990,389,5410,257,5296,284,2031,13 She is learning how to play the guitar. => 3347,318,4673,703,284,711,262,10047,13 We are going to a museum this Sunday. => 1135,389,1016,284,257,13257,428,3502,13 The coffee smells amazing in the morning. => 464,6891,25760,4998,287,262,3329,13 He is a hardworking farmer who grows crops. => 1544,318,257,1327,16090,18739,508,13676,14450,13 The students are presenting their research projects. => 464,2444,389,17728,511,2267,4493,13 I enjoy playing soccer with my friends. => 40,2883,2712,11783,351,616,2460,13 They are volunteering at a local shelter. => 2990,389,41434,379,257,1957,11772,13 She is practicing martial arts for self-defense. => 3347,318,18207,15618,10848,329,2116,12,19774,13 We should try a new recipe for dinner. => 1135,815,1949,257,649,8364,329,8073,13 The traffic is congest => 464,4979,318,22791 The sun is shining brightly today. => 464,4252,318,22751,35254,1909,13 I enjoy reading books in my free time. => 40,2883,3555,3835,287,616,1479,640,13 She plays the piano beautifully. => 3347,5341,262,19132,21104,13 The cat chased the mouse around the room. => 464,3797,26172,262,10211,1088,262,2119,13 I love eating pizza with extra cheese. => 40,1842,6600,14256,351,3131,9891,13 He always wears a hat wherever he goes. => 1544,1464,17326,257,6877,14530,339,2925,13 The flowers in the garden are blooming. => 464,12734,287,262,11376,389,24924,3383,13 She danced gracefully on the stage. => 3347,39480,11542,2759,319,262,3800,13 The dog barked loudly in the park. => 464,3290,21405,276,23112,287,262,3952,13 We went swimming in the ocean yesterday. => 1135,1816,14899,287,262,9151,7415,13 He speaks fluent French and Spanish. => 1544,9209,43472,4141,290,7897,13 The train arrived at the station on time. => 464,4512,5284,379,262,4429,319,640,13 She cooked a delicious meal for her family. => 3347,15847,257,12625,9799,329,607,1641,13 ggml-org-ggml-3678254/examples/prompts/gpt-j.txt000066400000000000000000000177601512524704700215030ustar00rootroot00000000000000Hello World! => 15496,2159,0 I can't believe it's already Friday!" => 40,460,470,1975,340,338,1541,3217,2474 The URL for the website is https://www.example.com." => 464,10289,329,262,3052,318,3740,1378,2503,13,20688,13,785,526 "She said, 'I love to travel.'" => 1,3347,531,11,705,40,1842,284,3067,11496 'The temperature is 25.5°C.' => 6,464,5951,318,1679,13,20,7200,34,2637 "Let's meet at 2:30 p.m. in the park." => 1,5756,338,1826,379,362,25,1270,279,13,76,13,287,262,3952,526 The book costs $19.99 => 464,1492,3484,720,1129,13,2079 "John's favorite color is blue." => 1,7554,338,4004,3124,318,4171,526 Th@nk y0u f0r y0ur h3lp! => 817,31,77,74,331,15,84,277,15,81,331,15,333,289,18,34431,0 C@n I g3t a c0ffee, pl3@se? => 34,31,77,314,308,18,83,257,269,15,5853,11,458,18,31,325,30 W0w! Th@t's @m@zing! => 54,15,86,0,536,31,83,338,2488,76,31,9510,0 H0w 4re y0u t0d@y? => 39,15,86,604,260,331,15,84,256,15,67,31,88,30 I l0ve t0 tr@vel @r0und the w0rld. => 40,300,15,303,256,15,491,31,626,2488,81,15,917,262,266,15,81,335,13 Wh@t's y0ur f@v0rite m0vie? => 1199,31,83,338,331,15,333,277,31,85,15,6525,285,15,85,494,30 The cat is sleeping on the mat. => 464,3797,318,11029,319,262,2603,13 I need to buy some groceries for dinner. => 40,761,284,2822,617,38464,329,8073,13 The sun is shining brightly in the sky. => 464,4252,318,22751,35254,287,262,6766,13 She is reading a book in the park. => 3347,318,3555,257,1492,287,262,3952,13 We went for a walk on the beach yesterday. => 1135,1816,329,257,2513,319,262,10481,7415,13 He plays the guitar like a pro. => 1544,5341,262,10047,588,257,386,13 They are going to the movies tonight. => 2990,389,1016,284,262,6918,9975,13 The flowers are blooming in the garden. => 464,12734,389,24924,3383,287,262,11376,13 I enjoy listening to classical music. => 40,2883,8680,284,15993,2647,13 We need to buy groceries for the week. => 1135,761,284,2822,38464,329,262,1285,13 The dog is chasing its tail in circles. => 464,3290,318,20023,663,7894,287,13332,13 She is wearing a beautiful red dress. => 3347,318,5762,257,4950,2266,6576,13 He is a talented actor in Hollywood. => 1544,318,257,12356,8674,287,8502,13 The children are playing in the playground. => 464,1751,389,2712,287,262,24817,13 I'm going to visit my grandparents this weekend. => 40,1101,1016,284,3187,616,28571,428,5041,13 The coffee tastes bitter without sugar. => 464,6891,18221,12922,1231,7543,13 They are planning a surprise party for her. => 2990,389,5410,257,5975,2151,329,607,13 She sings like an angel on stage. => 3347,33041,588,281,18304,319,3800,13 We should take a vacation to relax. => 1135,815,1011,257,14600,284,8960,13 He is studying medicine at the university. => 1544,318,11065,9007,379,262,6403,13 The rain is pouring heavily outside. => 464,6290,318,23147,7272,2354,13 I enjoy watching romantic movies. => 40,2883,4964,14348,6918,13 They are celebrating their anniversary today. => 2990,389,17499,511,11162,1909,13 She dances gracefully to the music. => 3347,38207,11542,2759,284,262,2647,13 He is an excellent basketball player. => 1544,318,281,6275,9669,2137,13 The baby is sleeping soundly in the crib. => 464,5156,318,11029,2128,306,287,262,48083,13 I need to finish my homework before dinner. => 40,761,284,5461,616,26131,878,8073,13 They are organizing a charity event next month. => 2990,389,16924,257,11016,1785,1306,1227,13 She is cooking a delicious meal for us. => 3347,318,10801,257,12625,9799,329,514,13 We should go hiking in the mountains. => 1135,815,467,24522,287,262,12269,13 The car broke down on the way to work. => 464,1097,6265,866,319,262,835,284,670,13 He loves playing video games in his free time. => 1544,10408,2712,2008,1830,287,465,1479,640,13 The birds are chirping in the trees. => 464,10087,389,442,343,13886,287,262,7150,13 I want to learn how to play the piano. => 40,765,284,2193,703,284,711,262,19132,13 They are building a new shopping mall in the city. => 2990,389,2615,257,649,9735,17374,287,262,1748,13 She is writing a novel in her spare time. => 3347,318,3597,257,5337,287,607,13952,640,13 We are going to the zoo this Saturday. => 1135,389,1016,284,262,26626,428,3909,13 The cake looks delicious with chocolate frosting. => 464,12187,3073,12625,351,11311,21682,278,13 He is a talented painter who sells his artwork. => 1544,318,257,12356,34537,508,16015,465,16257,13 The students are studying for their exams. => 464,2444,389,11065,329,511,26420,13 I enjoy swimming in the ocean. => 40,2883,14899,287,262,9151,13 They are renovating their house. => 2990,389,24317,803,511,2156,13 She is practicing yoga to stay healthy. => 3347,318,18207,20351,284,2652,5448,13 We should plant flowers in the garden. => 1135,815,4618,12734,287,262,11376,13 The traffic is heavy during rush hour. => 464,4979,318,4334,1141,10484,1711,13 He is a skilled chef who creates amazing dishes. => 1544,318,257,14297,21221,508,8075,4998,16759,13 The baby is crawling on the floor. => 464,5156,318,34499,319,262,4314,13 I need to buy a new pair of shoes. => 40,761,284,2822,257,649,5166,286,10012,13 They are going on a road trip across the country. => 2990,389,1016,319,257,2975,5296,1973,262,1499,13 She is playing the piano beautifully. => 3347,318,2712,262,19132,21104,13 We are going to a concert tomorrow night. => 1135,389,1016,284,257,10010,9439,1755,13 The cake tastes delicious with vanilla frosting. => 464,12187,18221,12625,351,16858,21682,278,13 He is a dedicated teacher who inspires his students. => 1544,318,257,7256,4701,508,38934,465,2444,13 The students are participating in a science fair. => 464,2444,389,11983,287,257,3783,3148,13 I enjoy hiking in the mountains. => 40,2883,24522,287,262,12269,13 They are organizing a beach cleanup next weekend. => 2990,389,16924,257,10481,27425,1306,5041,13 She is taking photographs of nature. => 3347,318,2263,12566,286,3450,13 We should try a new restaurant in town. => 1135,815,1949,257,649,7072,287,3240,13 The traffic is moving slowly on the highway. => 464,4979,318,3867,6364,319,262,12763,13 He is a talented singer with a beautiful voice. => 1544,318,257,12356,14015,351,257,4950,3809,13 The baby is laughing and giggling. => 464,5156,318,14376,290,30442,1359,13 I need to do laundry and wash my clothes. => 40,761,284,466,25724,290,13502,616,8242,13 They are planning a trip to Europe. => 2990,389,5410,257,5296,284,2031,13 She is learning how to play the guitar. => 3347,318,4673,703,284,711,262,10047,13 We are going to a museum this Sunday. => 1135,389,1016,284,257,13257,428,3502,13 The coffee smells amazing in the morning. => 464,6891,25760,4998,287,262,3329,13 He is a hardworking farmer who grows crops. => 1544,318,257,1327,16090,18739,508,13676,14450,13 The students are presenting their research projects. => 464,2444,389,17728,511,2267,4493,13 I enjoy playing soccer with my friends. => 40,2883,2712,11783,351,616,2460,13 They are volunteering at a local shelter. => 2990,389,41434,379,257,1957,11772,13 She is practicing martial arts for self-defense. => 3347,318,18207,15618,10848,329,2116,12,19774,13 We should try a new recipe for dinner. => 1135,815,1949,257,649,8364,329,8073,13 The traffic is congest => 464,4979,318,22791 The sun is shining brightly today. => 464,4252,318,22751,35254,1909,13 I enjoy reading books in my free time. => 40,2883,3555,3835,287,616,1479,640,13 She plays the piano beautifully. => 3347,5341,262,19132,21104,13 The cat chased the mouse around the room. => 464,3797,26172,262,10211,1088,262,2119,13 I love eating pizza with extra cheese. => 40,1842,6600,14256,351,3131,9891,13 He always wears a hat wherever he goes. => 1544,1464,17326,257,6877,14530,339,2925,13 The flowers in the garden are blooming. => 464,12734,287,262,11376,389,24924,3383,13 She danced gracefully on the stage. => 3347,39480,11542,2759,319,262,3800,13 The dog barked loudly in the park. => 464,3290,21405,276,23112,287,262,3952,13 We went swimming in the ocean yesterday. => 1135,1816,14899,287,262,9151,7415,13 He speaks fluent French and Spanish. => 1544,9209,43472,4141,290,7897,13 The train arrived at the station on time. => 464,4512,5284,379,262,4429,319,640,13 She cooked a delicious meal for her family. => 3347,15847,257,12625,9799,329,607,1641,13 ggml-org-ggml-3678254/examples/prompts/gpt-neox-japanese.txt000066400000000000000000000001161512524704700237720ustar00rootroot00000000000000明日の天気はどうですか。 => 263,7353,268,18461,271,1722,18405,265 ggml-org-ggml-3678254/examples/prompts/gpt-neox.txt000066400000000000000000000200361512524704700222110ustar00rootroot00000000000000Hello World! => 12092,3645,2 I can't believe it's already Friday!" => 42,476,626,2868,352,434,2168,6794,1476 The URL for the website is https://www.example.com." => 510,10611,323,253,4422,310,5987,1358,2700,15,11667,15,681,449 "She said, 'I love to travel.'" => 3,2993,753,13,686,42,2389,281,4288,18574 'The temperature is 25.5°C.' => 8,510,3276,310,2030,15,22,3272,36,2464 "Let's meet at 2:30 p.m. in the park." => 3,1466,434,2525,387,374,27,1229,268,15,78,15,275,253,5603,449 The book costs $19.99 => 510,1984,4815,370,746,15,1525 "John's favorite color is blue." => 3,8732,434,7583,3295,310,4797,449 Th@nk y0u f0r y0ur h3lp! => 1044,33,30664,340,17,86,269,17,83,340,17,321,288,20,24343,2 C@n I g3t a c0ffee, pl3@se? => 36,33,79,309,305,20,85,247,260,17,71,6851,13,499,20,33,339,32 W0w! Th@t's @m@zing! => 56,17,88,2,596,33,85,434,1214,78,33,8537,2 H0w 4re y0u t0d@y? => 41,17,88,577,250,340,17,86,246,17,69,33,90,32 I l0ve t0 tr@vel @r0und the w0rld. => 42,298,17,306,246,17,492,33,652,1214,83,17,1504,253,259,17,83,392,15 Wh@t's y0ur f@v0rite m0vie? => 3152,33,85,434,340,17,321,269,33,87,17,3852,278,17,25858,32 The cat is sleeping on the mat. => 510,5798,310,14343,327,253,1111,15 I need to buy some groceries for dinner. => 42,878,281,4489,690,45160,447,323,8955,15 The sun is shining brightly in the sky. => 510,5101,310,28115,43925,275,253,8467,15 She is reading a book in the park. => 2993,310,4361,247,1984,275,253,5603,15 We went for a walk on the beach yesterday. => 1231,2427,323,247,2940,327,253,11600,11066,15 He plays the guitar like a pro. => 1328,7120,253,12609,751,247,354,15 They are going to the movies tonight. => 3726,403,1469,281,253,11321,11608,15 The flowers are blooming in the garden. => 510,12405,403,30601,272,275,253,10329,15 I enjoy listening to classical music. => 42,4264,11298,281,8946,3440,15 We need to buy groceries for the week. => 1231,878,281,4489,45160,447,323,253,2129,15 The dog is chasing its tail in circles. => 510,4370,310,31702,697,8105,275,14240,15 She is wearing a beautiful red dress. => 2993,310,9398,247,5389,2502,7619,15 He is a talented actor in Hollywood. => 1328,310,247,21220,12353,275,14759,15 The children are playing in the playground. => 510,2151,403,4882,275,253,41008,15 I'm going to visit my grandparents this weekend. => 42,1353,1469,281,4143,619,37186,436,8849,15 The coffee tastes bitter without sugar. => 510,8574,27491,17123,1293,8618,15 They are planning a surprise party for her. => 3726,403,7219,247,9326,3128,323,617,15 She sings like an angel on stage. => 2993,44718,751,271,23087,327,3924,15 We should take a vacation to relax. => 1231,943,1379,247,18125,281,7921,15 He is studying medicine at the university. => 1328,310,12392,9921,387,253,9835,15 The rain is pouring heavily outside. => 510,9313,310,31226,11306,3345,15 I enjoy watching romantic movies. => 42,4264,7487,18109,11321,15 They are celebrating their anniversary today. => 3726,403,28765,616,19054,3063,15 She dances gracefully to the music. => 2993,47078,14426,2920,281,253,3440,15 He is an excellent basketball player. => 1328,310,271,7126,14648,4760,15 The baby is sleeping soundly in the crib. => 510,6858,310,14343,3590,314,275,253,260,725,15 I need to finish my homework before dinner. => 42,878,281,8416,619,32110,1078,8955,15 They are organizing a charity event next month. => 3726,403,26169,247,19489,2362,1735,1770,15 She is cooking a delicious meal for us. => 2993,310,12398,247,17319,11484,323,441,15 We should go hiking in the mountains. => 1231,943,564,33061,275,253,14700,15 The car broke down on the way to work. => 510,1113,9377,1066,327,253,1039,281,789,15 He loves playing video games in his free time. => 1328,14528,4882,3492,3958,275,521,1959,673,15 The birds are chirping in the trees. => 510,11260,403,36494,14650,275,253,7139,15 I want to learn how to play the piano. => 42,971,281,3037,849,281,1132,253,18542,15 They are building a new shopping mall in the city. => 3726,403,3652,247,747,12701,28974,275,253,2846,15 She is writing a novel in her spare time. => 2993,310,4028,247,4460,275,617,18345,673,15 We are going to the zoo this Saturday. => 1231,403,1469,281,253,41089,436,7814,15 The cake looks delicious with chocolate frosting. => 510,15221,4453,17319,342,14354,34724,272,15 He is a talented painter who sells his artwork. => 1328,310,247,21220,27343,665,27924,521,28227,15 The students are studying for their exams. => 510,3484,403,12392,323,616,34666,15 I enjoy swimming in the ocean. => 42,4264,17120,275,253,12927,15 They are renovating their house. => 3726,403,30074,839,616,2419,15 She is practicing yoga to stay healthy. => 2993,310,25815,25551,281,3297,5875,15 We should plant flowers in the garden. => 1231,943,4444,12405,275,253,10329,15 The traffic is heavy during rush hour. => 510,7137,310,5536,1309,16949,4964,15 He is a skilled chef who creates amazing dishes. => 1328,310,247,18024,26540,665,10513,8644,17114,15 The baby is crawling on the floor. => 510,6858,310,44922,327,253,5254,15 I need to buy a new pair of shoes. => 42,878,281,4489,247,747,4667,273,12682,15 They are going on a road trip across the country. => 3726,403,1469,327,247,3971,7408,2439,253,2586,15 She is playing the piano beautifully. => 2993,310,4882,253,18542,27839,15 We are going to a concert tomorrow night. => 1231,403,1469,281,247,12699,10873,2360,15 The cake tastes delicious with vanilla frosting. => 510,15221,27491,17319,342,26724,34724,272,15 He is a dedicated teacher who inspires his students. => 1328,310,247,9940,9732,665,6381,2731,521,3484,15 The students are participating in a science fair. => 510,3484,403,15299,275,247,5859,4344,15 I enjoy hiking in the mountains. => 42,4264,33061,275,253,14700,15 They are organizing a beach cleanup next weekend. => 3726,403,26169,247,11600,34709,1735,8849,15 She is taking photographs of nature. => 2993,310,3192,15928,273,3753,15 We should try a new restaurant in town. => 1231,943,1611,247,747,10301,275,3874,15 The traffic is moving slowly on the highway. => 510,7137,310,4886,7808,327,253,17657,15 He is a talented singer with a beautiful voice. => 1328,310,247,21220,16057,342,247,5389,4318,15 The baby is laughing and giggling. => 510,6858,310,17053,285,41542,1981,15 I need to do laundry and wash my clothes. => 42,878,281,513,29023,285,14841,619,10015,15 They are planning a trip to Europe. => 3726,403,7219,247,7408,281,3060,15 She is learning how to play the guitar. => 2993,310,4715,849,281,1132,253,12609,15 We are going to a museum this Sunday. => 1231,403,1469,281,247,16064,436,6926,15 The coffee smells amazing in the morning. => 510,8574,34247,8644,275,253,4131,15 He is a hardworking farmer who grows crops. => 1328,310,247,1892,21107,24718,665,17202,19492,15 The students are presenting their research projects. => 510,3484,403,15250,616,2561,6493,15 I enjoy playing soccer with my friends. => 42,4264,4882,20391,342,619,3858,15 They are volunteering at a local shelter. => 3726,403,10057,2158,387,247,1980,17824,15 She is practicing martial arts for self-defense. => 2993,310,25815,29731,14635,323,1881,14,29337,15 We should try a new recipe for dinner. => 1231,943,1611,247,747,13612,323,8955,15 The traffic is congest => 510,7137,310,25801 The sun is shining brightly today. => 510,5101,310,28115,43925,3063,15 I enjoy reading books in my free time. => 42,4264,4361,5098,275,619,1959,673,15 She plays the piano beautifully. => 2993,7120,253,18542,27839,15 The cat chased the mouse around the room. => 510,5798,40754,253,6521,1475,253,2316,15 I love eating pizza with extra cheese. => 42,2389,9123,22534,342,4465,12173,15 He always wears a hat wherever he goes. => 1328,1900,31394,247,7856,20312,344,4566,15 The flowers in the garden are blooming. => 510,12405,275,253,10329,403,30601,272,15 She danced gracefully on the stage. => 2993,39860,14426,2920,327,253,3924,15 The dog barked loudly in the park. => 510,4370,21939,264,31311,275,253,5603,15 We went swimming in the ocean yesterday. => 1231,2427,17120,275,253,12927,11066,15 He speaks fluent French and Spanish. => 1328,16544,2938,290,5112,285,9883,15 The train arrived at the station on time. => 510,6194,7244,387,253,4660,327,673,15 She cooked a delicious meal for her family. => 2993,18621,247,17319,11484,323,617,2021,15 ggml-org-ggml-3678254/examples/prompts/polyglot-ko.txt000066400000000000000000000002561512524704700227320ustar00rootroot00000000000000이것은 테스트 이다. => 12271,296,6474,28037,17 걱정할 필요 없다. => 18311,482,1062,550,267,17 버그는 언젠가 고쳐진다. => 6904,272,8575,10381,1765,17 ggml-org-ggml-3678254/examples/prompts/replit.txt000066400000000000000000000235771512524704700217640ustar00rootroot00000000000000Hello World! => 6466,147,2317,350 I can't believe it's already Friday!" => 286,512,172,185,13392,393,172,155,3239,147,29249,8537 The URL for the website is https://www.example.com." => 505,5635,250,170,11745,235,147,303,262,552,148,811,148,241,148,161 "She said, 'I love to travel.'" => 161,10386,4089,150,206,286,8440,194,147,12363,148,172,161 'The temperature is 25.5°C.' => 172,505,147,9502,235,147,20022,8516,228,148,172 "Let's meet at 2:30 p.m. in the park." => 161,8997,172,155,17120,536,147,162,5245,147,207,148,204,148,219,170,147,17664,148,161 The book costs $19.99 => 505,147,2277,17494,236,166,11824 "John's favorite color is blue." => 161,7475,172,155,147,11105,147,349,235,17046,148,161 Th@nk y0u f0r y0ur h3lp! => 6309,240,9019,147,237,159,247,147,202,159,223,147,237,159,2458,147,226,171,3899,350 C@n I g3t a c0ffee, pl3@se? => 228,240,211,398,147,267,171,185,216,147,196,159,13360,163,150,147,1287,171,240,155,163,272 W0w! Th@t's @m@zing! => 450,159,274,350,147,6309,240,185,172,155,268,204,240,301,248,350 H0w 4re y0u t0d@y? => 304,159,274,320,440,147,237,159,247,147,185,159,182,240,237,272 I l0ve t0 tr@vel @r0und the w0rld. => 286,997,159,1290,147,185,159,147,490,240,3893,268,223,159,3981,170,147,274,159,223,2833,148 Wh@t's y0ur f@v0rite m0vie? => 450,226,240,185,172,155,147,237,159,2458,147,202,240,252,159,5961,163,147,204,159,24373,272 The cat is sleeping on the mat. => 505,147,1604,235,147,3987,248,347,170,147,1297,148 I need to buy some groceries for dinner. => 286,1645,194,147,8068,1499,147,10022,1037,10023,250,147,182,2749,148 The sun is shining brightly in the sky. => 505,147,5852,235,147,7304,2967,147,215,649,391,219,170,147,7310,148 She is reading a book in the park. => 10386,235,9838,216,147,2277,219,170,147,17664,148 We went for a walk on the beach yesterday. => 3250,10825,250,216,147,8156,347,170,294,5371,147,28830,148 He plays the guitar like a pro. => 5301,7084,155,170,147,4604,2214,1425,216,3474,148 They are going to the movies tonight. => 18815,429,6552,194,170,147,15877,194,7907,148 The flowers are blooming in the garden. => 505,147,22953,155,429,147,10411,2799,248,219,170,147,22140,148 I enjoy listening to classical music. => 286,23162,15876,248,194,239,4251,147,7395,148 We need to buy groceries for the week. => 3250,1645,194,147,8068,147,10022,1037,10023,250,170,9238,148 The dog is chasing its tail in circles. => 505,147,6540,235,147,196,916,248,1602,147,5129,219,147,4095,155,148 She is wearing a beautiful red dress. => 10386,235,147,16427,248,216,147,23447,147,1160,147,14592,148 He is a talented actor in Hollywood. => 5301,235,216,147,29750,246,147,5112,219,147,16924,391,10477,148 The children are playing in the playground. => 505,7934,429,7084,248,219,170,7084,12055,148 I'm going to visit my grandparents this weekend. => 286,172,204,6552,194,9939,1247,147,11806,12019,291,9238,314,148 The coffee tastes bitter without sugar. => 505,147,21526,147,20931,155,5145,1430,1988,147,28759,148 They are planning a surprise party for her. => 18815,429,147,23661,216,147,29240,147,7344,250,1869,148 She sings like an angel on stage. => 10386,147,155,6502,1425,426,147,26028,347,12685,148 We should take a vacation to relax. => 3250,936,4654,216,147,15388,946,194,1998,2744,148 He is studying medicine at the university. => 5301,235,7959,248,147,20742,1668,536,170,147,8025,148 The rain is pouring heavily outside. => 505,147,6885,235,5306,248,1189,5451,391,8096,148 I enjoy watching romantic movies. => 286,23162,147,3355,248,147,26080,4140,147,15877,148 They are celebrating their anniversary today. => 18815,429,147,30000,5841,1669,147,24734,5464,1770,13386,148 She dances gracefully to the music. => 10386,147,182,1626,155,147,267,8771,8001,194,170,147,7395,148 He is an excellent basketball player. => 5301,235,426,147,12300,675,185,147,26646,5132,6294,148 The baby is sleeping soundly in the crib. => 505,147,23597,235,147,3987,248,12642,391,219,170,147,7696,215,148 I need to finish my homework before dinner. => 286,1645,194,147,6717,1247,147,1071,2722,2643,147,182,2749,148 They are organizing a charity event next month. => 18815,429,147,16442,248,216,1054,1511,1663,2399,12821,148 She is cooking a delicious meal for us. => 10386,235,147,20453,248,216,3936,23455,147,26658,250,147,539,148 We should go hiking in the mountains. => 3250,936,4242,147,2254,5357,219,170,147,204,18028,155,148 The car broke down on the way to work. => 505,7553,147,510,10036,4288,347,170,3699,194,1916,148 He loves playing video games in his free time. => 5301,8440,155,7084,248,8722,147,11281,219,1439,4002,801,148 The birds are chirping in the trees. => 505,147,13043,155,429,147,3904,223,4639,219,170,5311,155,148 I want to learn how to play the piano. => 286,1857,194,14167,2496,194,7084,170,147,207,23635,148 They are building a new shopping mall in the city. => 18815,429,11038,216,277,147,22184,147,204,609,219,170,147,2416,148 She is writing a novel in her spare time. => 10386,235,3242,216,147,25814,219,1869,6772,2382,801,148 We are going to the zoo this Saturday. => 3250,429,6552,194,170,147,25101,291,147,31426,148 The cake looks delicious with chocolate frosting. => 505,147,24422,16303,3936,23455,312,147,5619,533,2239,147,202,3973,3431,148 He is a talented painter who sells his artwork. => 5301,235,216,147,29750,246,147,9226,279,2888,13004,155,1439,12234,2722,148 The students are studying for their exams. => 505,15707,429,7959,248,250,1669,147,12398,155,148 I enjoy swimming in the ocean. => 286,23162,147,4729,8528,248,219,170,147,26193,148 They are renovating their house. => 18815,429,991,10724,3643,1669,13788,148 She is practicing yoga to stay healthy. => 10386,235,147,18453,248,147,5063,1186,194,15344,147,28550,148 We should plant flowers in the garden. => 3250,936,147,9212,147,22953,155,219,170,147,22140,148 The traffic is heavy during rush hour. => 505,147,11097,235,147,22232,4340,147,22319,147,5686,148 He is a skilled chef who creates amazing dishes. => 5301,235,216,147,8891,246,9784,202,2888,13720,147,28880,147,23852,383,148 The baby is crawling on the floor. => 505,147,23597,235,147,22120,248,347,170,147,5895,148 I need to buy a new pair of shoes. => 286,1645,194,147,8068,216,277,12632,210,147,155,21953,155,148 They are going on a road trip across the country. => 18815,429,6552,347,216,147,6362,147,11395,9762,170,11305,148 She is playing the piano beautifully. => 10386,235,7084,248,170,147,207,23635,147,23447,391,148 We are going to a concert tomorrow night. => 3250,429,6552,194,216,1710,4391,29524,12716,148 The cake tastes delicious with vanilla frosting. => 505,147,24422,147,20931,155,3936,23455,312,5535,7476,147,202,3973,3431,148 He is a dedicated teacher who inspires his students. => 5301,235,216,326,8298,3460,147,9675,2888,147,28801,155,1439,15707,148 The students are participating in a science fair. => 505,15707,429,147,30961,3643,219,216,147,10587,147,7636,148 I enjoy hiking in the mountains. => 286,23162,147,2254,5357,219,170,147,204,18028,155,148 They are organizing a beach cleanup next weekend. => 18815,429,147,16442,248,216,294,5371,147,10401,2399,9238,314,148 She is taking photographs of nature. => 10386,235,147,12345,147,4709,1547,155,210,147,211,8603,148 We should try a new restaurant in town. => 3250,936,147,746,216,277,147,11007,219,147,10200,148 The traffic is moving slowly on the highway. => 505,147,11097,235,147,8601,147,9880,391,347,170,5976,3330,148 He is a talented singer with a beautiful voice. => 5301,235,216,147,29750,246,147,155,248,279,312,216,147,23447,147,9316,148 The baby is laughing and giggling. => 505,147,23597,235,147,23066,248,221,147,2341,3631,2869,148 I need to do laundry and wash my clothes. => 286,1645,194,543,960,3981,2154,221,147,27589,1247,147,22141,383,148 They are planning a trip to Europe. => 18815,429,147,23661,216,147,11395,194,13131,148 She is learning how to play the guitar. => 10386,235,11754,2496,194,7084,170,147,4604,2214,148 We are going to a museum this Sunday. => 3250,429,6552,194,216,147,204,433,1177,291,147,29111,148 The coffee smells amazing in the morning. => 505,147,21526,31454,155,147,28880,219,170,20701,148 He is a hardworking farmer who grows crops. => 5301,235,216,8524,14992,147,16679,279,2888,147,6044,155,147,8650,155,148 The students are presenting their research projects. => 505,15707,429,5130,248,1669,13217,14235,148 I enjoy playing soccer with my friends. => 286,23162,7084,248,147,9351,5318,312,1247,147,5347,155,148 They are volunteering at a local shelter. => 18815,429,147,5238,7478,163,12798,536,216,2491,2905,1359,279,148 She is practicing martial arts for self-defense. => 10386,235,147,18453,248,147,3261,185,4381,12234,155,250,623,153,29896,148 We should try a new recipe for dinner. => 3250,936,147,746,216,277,147,9851,250,147,182,2749,148 The traffic is congest => 505,147,11097,235,1710,14169 The sun is shining brightly today. => 505,147,5852,235,147,7304,2967,147,215,649,391,13386,148 I enjoy reading books in my free time. => 286,23162,9838,147,9670,219,1247,4002,801,148 She plays the piano beautifully. => 10386,7084,155,170,147,207,23635,147,23447,391,148 The cat chased the mouse around the room. => 505,147,1604,147,196,916,246,170,12551,6890,170,9654,148 I love eating pizza with extra cheese. => 286,8440,147,163,3643,147,207,8403,312,8230,9784,383,163,148 He always wears a hat wherever he goes. => 5301,5418,147,16427,155,216,147,4879,2171,2433,1189,16177,148 The flowers in the garden are blooming. => 505,147,22953,155,219,170,147,22140,429,147,10411,2799,248,148 She danced gracefully on the stage. => 10386,13378,12408,147,267,8771,8001,347,170,12685,148 The dog barked loudly in the park. => 505,147,6540,147,973,293,246,147,30182,391,219,170,147,17664,148 We went swimming in the ocean yesterday. => 3250,10825,147,4729,8528,248,219,170,147,26193,147,28830,148 He speaks fluent French and Spanish. => 5301,147,13285,155,147,21677,147,254,17590,221,147,31519,148 The train arrived at the station on time. => 505,147,872,147,20712,182,536,170,147,7184,347,801,148 She cooked a delicious meal for her family. => 10386,147,20453,246,216,3936,23455,147,26658,250,1869,147,2002,148 ggml-org-ggml-3678254/examples/prompts/starcoder.txt000066400000000000000000000216561512524704700224470ustar00rootroot00000000000000Hello World! => 8279,10896,19 I can't believe it's already Friday!" => 59,883,1330,13710,561,1182,3425,506,25674,11555 The URL for the website is https://www.example.com." => 1318,3834,436,322,9575,438,1678,555,1499,32,2763,32,508,3107 "She said, 'I love to travel.'" => 20,25387,9884,30,330,59,14290,372,25283,29329 'The temperature is 25.5°C.' => 25,1318,13587,438,225,36,39,32,39,23767,53,4564 "Let's meet at 2:30 p.m. in the park." => 20,9809,1182,18450,821,225,36,44,37,34,298,32,95,32,328,322,880,93,3107 The book costs $19.99 => 1318,7618,25950,398,35,43,32,43,43 "John's favorite color is blue." => 20,19693,1182,27448,1963,438,10087,3107 Th@nk y0u f0r y0ur h3lp! => 1027,50,19877,533,34,103,296,34,100,533,34,305,420,37,1915,19 C@n I g3t a c0ffee, pl3@se? => 53,50,96,439,485,37,102,312,281,34,21298,30,1278,37,50,277,49 W0w! Th@t's @m@zing! => 73,34,105,19,947,50,102,1182,477,95,50,26768,19 H0w 4re y0u t0d@y? => 58,34,105,225,38,268,533,34,103,273,34,86,50,107,49 I l0ve t0 tr@vel @r0und the w0rld. => 59,456,34,587,273,34,554,50,1203,477,100,34,642,322,341,34,100,1381,32 Wh@t's y0ur f@v0rite m0vie? => 2444,50,102,1182,533,34,305,296,50,104,34,1049,345,34,104,1075,49 The cat is sleeping on the mat. => 1318,10501,438,9368,299,544,322,2491,32 I need to buy some groceries for dinner. => 59,1849,372,16968,1629,20234,85,6958,436,343,3369,32 The sun is shining brightly in the sky. => 1318,15323,438,787,19068,38231,631,328,322,26718,32 She is reading a book in the park. => 25387,438,9175,312,7618,328,322,880,93,32 We went for a walk on the beach yesterday. => 3122,14236,436,312,13503,544,322,526,867,39485,32 He plays the guitar like a pro. => 1331,41271,322,3932,19931,2124,312,534,32 They are going to the movies tonight. => 31805,884,6783,372,322,27889,26076,694,32 The flowers are blooming in the garden. => 1318,7290,483,884,323,18466,299,328,322,485,22461,32 I enjoy listening to classical music. => 59,31567,20498,372,443,1578,17522,32 We need to buy groceries for the week. => 3122,1849,372,16968,20234,85,6958,436,322,8209,32 The dog is chasing its tail in circles. => 1318,27435,438,663,9949,2819,13203,328,46428,32 She is wearing a beautiful red dress. => 25387,438,996,6992,312,36493,3346,343,714,32 He is a talented actor in Hollywood. => 1331,438,312,273,9556,318,16038,328,48228,631,21118,32 The children are playing in the playground. => 1318,5713,884,19788,328,322,4654,1749,32 I'm going to visit my grandparents this weekend. => 59,3464,6783,372,7725,1672,33162,19277,458,40618,32 The coffee tastes bitter without sugar. => 1318,36917,273,633,307,3493,391,2876,309,18628,32 They are planning a surprise party for her. => 31805,884,26116,312,6178,9251,15270,436,7791,32 She sings like an angel on stage. => 25387,309,2052,2124,600,600,17691,544,10019,32 We should take a vacation to relax. => 3122,1395,4818,312,29164,367,372,41972,32 He is studying medicine at the university. => 1331,438,14866,299,32388,482,821,322,707,9190,32 The rain is pouring heavily outside. => 1318,36987,438,9202,299,46003,2801,11127,32 I enjoy watching romantic movies. => 59,31567,37652,26045,7268,27889,32 They are celebrating their anniversary today. => 31805,884,48278,839,1741,3623,23921,5810,672,11610,32 She dances gracefully to the music. => 25387,343,3151,31376,4938,372,322,17522,32 He is an excellent basketball player. => 1331,438,600,39203,48400,11653,4362,32 The baby is sleeping soundly in the crib. => 1318,323,17156,438,9368,299,9934,631,328,322,281,7972,32 I need to finish my homework before dinner. => 59,1849,372,11361,1672,6765,1007,2670,343,3369,32 They are organizing a charity event next month. => 31805,884,10558,6183,312,1351,543,1692,2354,6811,32 She is cooking a delicious meal for us. => 25387,438,23682,299,312,409,406,2406,597,279,436,1770,32 We should go hiking in the mountains. => 3122,1395,1983,420,1546,299,328,322,10874,1907,32 The car broke down on the way to work. => 1318,6346,43289,2835,544,322,3352,372,1389,32 He loves playing video games in his free time. => 1331,598,4954,19788,6027,19705,328,6697,3741,1133,32 The birds are chirping in the trees. => 1318,8424,3210,884,663,476,7075,328,322,23453,32 I want to learn how to play the piano. => 59,2637,372,7350,2624,372,4654,322,298,25757,32 They are building a new shopping mall in the city. => 31805,884,9038,312,537,40692,345,464,328,322,11297,32 She is writing a novel in her spare time. => 25387,438,4127,312,32913,328,7791,1869,586,1133,32 We are going to the zoo this Saturday. => 3122,884,6783,372,322,1288,604,458,358,30288,32 The cake looks delicious with chocolate frosting. => 1318,281,1062,7780,409,406,2406,623,10408,27589,296,20932,299,32 He is a talented painter who sells his artwork. => 1331,438,312,273,9556,318,42300,6560,10800,101,6697,5549,1007,32 The students are studying for their exams. => 1318,16512,884,14866,299,436,3623,538,1462,32 I enjoy swimming in the ocean. => 59,31567,2535,449,6714,328,322,337,18857,32 They are renovating their house. => 31805,884,316,15007,1741,3623,17075,32 She is practicing yoga to stay healthy. => 25387,438,11808,11636,533,40067,372,20005,44538,32 We should plant flowers in the garden. => 3122,1395,26795,7290,483,328,322,485,22461,32 The traffic is heavy during rush hour. => 1318,16391,438,32389,5929,540,1372,12021,32 He is a skilled chef who creates amazing dishes. => 1331,438,312,3001,12088,44051,6560,9585,36986,1214,4279,32 The baby is crawling on the floor. => 1318,323,17156,438,281,1294,2920,544,322,17648,32 I need to buy a new pair of shoes. => 59,1849,372,16968,312,537,6092,432,787,37764,32 They are going on a road trip across the country. => 31805,884,6783,544,312,24122,19337,10160,322,10769,32 She is playing the piano beautifully. => 25387,438,19788,322,298,25757,526,4846,325,514,107,32 We are going to a concert tomorrow night. => 3122,884,6783,372,312,457,6989,31841,19212,32 The cake tastes delicious with vanilla frosting. => 1318,281,1062,273,633,307,409,406,2406,623,44653,296,20932,299,32 He is a dedicated teacher who inspires his students. => 1331,438,312,23112,30877,6560,26194,8017,6697,16512,32 The students are participating in a science fair. => 1318,16512,884,24623,1741,328,312,27536,19375,32 I enjoy hiking in the mountains. => 59,31567,420,1546,299,328,322,10874,1907,32 They are organizing a beach cleanup next weekend. => 31805,884,10558,6183,312,526,867,13144,2354,40618,32 She is taking photographs of nature. => 25387,438,15137,15110,23626,432,24406,32 We should try a new restaurant in town. => 3122,1395,1596,312,537,43719,328,38212,32 The traffic is moving slowly on the highway. => 1318,16391,438,14089,12899,631,544,322,3857,3073,32 He is a talented singer with a beautiful voice. => 1331,438,312,273,9556,318,309,10118,623,312,36493,20309,32 The baby is laughing and giggling. => 1318,323,17156,438,2317,2943,299,461,485,365,36088,32 I need to do laundry and wash my clothes. => 59,1849,372,745,2317,642,994,461,341,917,1672,7375,46948,32 They are planning a trip to Europe. => 31805,884,26116,312,19337,372,27268,32 She is learning how to play the guitar. => 25387,438,9608,2624,372,4654,322,3932,19931,32 We are going to a museum this Sunday. => 3122,884,6783,372,312,345,539,378,458,358,28036,32 The coffee smells amazing in the morning. => 1318,36917,309,42153,101,36986,328,322,33768,32 He is a hardworking farmer who grows crops. => 1331,438,312,6784,13578,9019,2302,6560,485,2138,25170,1069,32 The students are presenting their research projects. => 1318,16512,884,5024,299,3623,13234,8528,32 I enjoy playing soccer with my friends. => 59,31567,19788,22682,10035,623,1672,22523,32 They are volunteering at a local shelter. => 31805,884,3920,45585,8637,821,312,2196,309,2542,391,32 She is practicing martial arts for self-defense. => 25387,438,11808,11636,345,502,564,5549,101,436,630,31,43694,32 We should try a new recipe for dinner. => 3122,1395,1596,312,537,15233,436,343,3369,32 The traffic is congest => 1318,16391,438,457,2776 The sun is shining brightly today. => 1318,15323,438,787,19068,38231,631,11610,32 I enjoy reading books in my free time. => 59,31567,9175,21739,328,1672,3741,1133,32 She plays the piano beautifully. => 25387,41271,322,298,25757,526,4846,325,514,107,32 The cat chased the mouse around the room. => 1318,10501,663,16109,322,8459,6835,322,8355,32 I love eating pizza with extra cheese. => 59,14290,484,1741,47630,623,6717,8277,30315,32 He always wears a hat wherever he goes. => 1331,5182,996,4177,312,25793,2154,424,938,13107,32 The flowers in the garden are blooming. => 1318,7290,483,328,322,485,22461,884,323,18466,299,32 She danced gracefully on the stage. => 25387,343,6087,31376,4938,544,322,10019,32 The dog barked loudly in the park. => 1318,27435,323,1087,318,598,836,631,328,322,880,93,32 We went swimming in the ocean yesterday. => 3122,14236,2535,449,6714,328,322,337,18857,39485,32 He speaks fluent French and Spanish. => 1331,24498,101,38055,43652,461,14911,1708,32 The train arrived at the station on time. => 1318,5683,2099,32114,821,322,18662,544,1133,32 She cooked a delicious meal for her family. => 25387,23682,318,312,409,406,2406,597,279,436,7791,13872,32 ggml-org-ggml-3678254/examples/prompts/test-cases.txt000066400000000000000000000117461512524704700225330ustar00rootroot00000000000000# test case format # : English: Hello World! English: I can't believe it's already Friday!" English: The URL for the website is https://www.example.com." English: "She said, 'I love to travel.'" English: 'The temperature is 25.5°C.' English: "Let's meet at 2:30 p.m. in the park." English: The book costs $19.99 English: "John's favorite color is blue." English: Th@nk y0u f0r y0ur h3lp! English: C@n I g3t a c0ffee, pl3@se? English: W0w! Th@t's @m@zing! English: H0w 4re y0u t0d@y? English: I l0ve t0 tr@vel @r0und the w0rld. English: Wh@t's y0ur f@v0rite m0vie? English: The cat is sleeping on the mat. English: I need to buy some groceries for dinner. English: The sun is shining brightly in the sky. English: She is reading a book in the park. English: We went for a walk on the beach yesterday. English: He plays the guitar like a pro. English: They are going to the movies tonight. English: The flowers are blooming in the garden. English: I enjoy listening to classical music. English: We need to buy groceries for the week. English: The dog is chasing its tail in circles. English: She is wearing a beautiful red dress. English: He is a talented actor in Hollywood. English: The children are playing in the playground. English: I'm going to visit my grandparents this weekend. English: The coffee tastes bitter without sugar. English: They are planning a surprise party for her. English: She sings like an angel on stage. English: We should take a vacation to relax. English: He is studying medicine at the university. English: The rain is pouring heavily outside. English: I enjoy watching romantic movies. English: They are celebrating their anniversary today. English: She dances gracefully to the music. English: He is an excellent basketball player. English: The baby is sleeping soundly in the crib. English: I need to finish my homework before dinner. English: They are organizing a charity event next month. English: She is cooking a delicious meal for us. English: We should go hiking in the mountains. English: The car broke down on the way to work. English: He loves playing video games in his free time. English: The birds are chirping in the trees. English: I want to learn how to play the piano. English: They are building a new shopping mall in the city. English: She is writing a novel in her spare time. English: We are going to the zoo this Saturday. English: The cake looks delicious with chocolate frosting. English: He is a talented painter who sells his artwork. English: The students are studying for their exams. English: I enjoy swimming in the ocean. English: They are renovating their house. English: She is practicing yoga to stay healthy. English: We should plant flowers in the garden. English: The traffic is heavy during rush hour. English: He is a skilled chef who creates amazing dishes. English: The baby is crawling on the floor. English: I need to buy a new pair of shoes. English: They are going on a road trip across the country. English: She is playing the piano beautifully. English: We are going to a concert tomorrow night. English: The cake tastes delicious with vanilla frosting. English: He is a dedicated teacher who inspires his students. English: The students are participating in a science fair. English: I enjoy hiking in the mountains. English: They are organizing a beach cleanup next weekend. English: She is taking photographs of nature. English: We should try a new restaurant in town. English: The traffic is moving slowly on the highway. English: He is a talented singer with a beautiful voice. English: The baby is laughing and giggling. English: I need to do laundry and wash my clothes. English: They are planning a trip to Europe. English: She is learning how to play the guitar. English: We are going to a museum this Sunday. English: The coffee smells amazing in the morning. English: He is a hardworking farmer who grows crops. English: The students are presenting their research projects. English: I enjoy playing soccer with my friends. English: They are volunteering at a local shelter. English: She is practicing martial arts for self-defense. English: We should try a new recipe for dinner. English: The traffic is congest English: The sun is shining brightly today. English: I enjoy reading books in my free time. English: She plays the piano beautifully. English: The cat chased the mouse around the room. English: I love eating pizza with extra cheese. English: He always wears a hat wherever he goes. English: The flowers in the garden are blooming. English: She danced gracefully on the stage. English: The dog barked loudly in the park. English: We went swimming in the ocean yesterday. English: He speaks fluent French and Spanish. English: The train arrived at the station on time. English: She cooked a delicious meal for her family. Korean: 이것은 테스트 이다. Korean: 걱정할 필요 없다. Korean: 버그는 언젠가 고쳐진다. Japanese: 明日の天気はどうですか。 Chinese: 请问洗手间在哪里? Emoji: I'm feeling 😄 today! Unicode: ◑ ▢ ▣ ◱ggml-org-ggml-3678254/examples/prompts/tokenize_huggingface.py000066400000000000000000000063251512524704700244450ustar00rootroot00000000000000import os from transformers import AutoTokenizer os.environ['TOKENIZERS_PARALLELISM'] = "false" list_repo_hf = ["databricks/dolly-v2-3b", # dolly-v2 (3b, 7b, 12b models share the same tokenizer) "gpt2", # gpt-2 (gpt2-xl, gpt2-large share the same tokenizer) "uer/gpt2-chinese-cluecorpussmall", # gpt-2-chinese "EleutherAI/gpt-j-6b", # gpt-j "EleutherAI/gpt-neox-20b", # gpt-neox "EleutherAI/polyglot-ko-1.3b", # gpt-neox (polyglot-ko 5.8b and 12.8b share the same tokenizer") "rinna/japanese-gpt-neox-3.6b", # gpt-neox # mpt-7b (uses gpt-neox-20b tokenizer) "replit/replit-code-v1-3b", # replit "bigcode/starcoder", # starcoder (huggingface-cli login required) "openai/whisper-tiny" # whisper (base, large, large-v2 share the same tokenizer) ] repo2ggml = {"databricks/dolly-v2-3b" : "dolly-v2", "gpt2" : "gpt-2", "uer/gpt2-chinese-cluecorpussmall" : "gpt-2-chinese", "EleutherAI/gpt-j-6b" : "gpt-j", "EleutherAI/gpt-neox-20b" : "gpt-neox", "EleutherAI/polyglot-ko-1.3b" : "polyglot-ko", "rinna/japanese-gpt-neox-3.6b" : "gpt-neox-japanese", "replit/replit-code-v1-3b" : "replit", "bigcode/starcoder" : "starcoder", "openai/whisper-tiny" : "whisper"} repo2language = {"databricks/dolly-v2-3b" : "english", "gpt2" : "english", "uer/gpt2-chinese-cluecorpussmall" : "chinese", "EleutherAI/gpt-j-6b" : "english", "EleutherAI/gpt-neox-20b" : "english", "EleutherAI/polyglot-ko-1.3b" : "korean", "rinna/japanese-gpt-neox-3.6b" : "japanese", "replit/replit-code-v1-3b" : "english", "bigcode/starcoder" : "english", "openai/whisper-tiny" : "english"} delimeter = ": " test_sentences = [] with open("test-cases.txt", "r") as f: lines = [l.rstrip() for l in f.readlines()] for l in lines: if delimeter in l: language = l[:l.index(delimeter)] sentence = l[l.index(delimeter) + len(delimeter):] test_sentences.append((language.lower(), sentence)) for repo in list_repo_hf: target_language = repo2language[repo] tokenizer = AutoTokenizer.from_pretrained(repo, trust_remote_code=True) tokens_hf = [] for language, sentence in test_sentences: if language == target_language: tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentence)) tokens_hf.append((sentence, tokens)) save_txt = repo2ggml[repo] + ".txt" with open(save_txt, "w") as f: f.writelines([sentence + " => " + ",".join(str(t) for t in tokens) + "\n" for sentence, tokens in tokens_hf]) ggml-org-ggml-3678254/examples/prompts/whisper.txt000066400000000000000000000210271512524704700221320ustar00rootroot00000000000000Hello World! => 15947,3937,0 I can't believe it's already Friday!" => 40,393,380,1697,309,311,1217,6984,2963 The URL for the website is https://www.example.com." => 2278,12905,337,220,3322,3144,307,34426,21492,17919,13,3121,335,781,13,1112,889 "She said, 'I love to travel.'" => 1,9526,848,11,922,40,959,220,1353,220,17227,779,28763 'The temperature is 25.5°C.' => 6,2278,220,18275,610,1503,307,3552,13,20,11782,34,4443 "Let's meet at 2:30 p.m. in the park." => 1,8373,311,1677,412,568,25,3446,280,13,76,13,294,220,3322,3884,889 The book costs $19.99 => 2278,1446,5497,1848,3405,13,8494 "John's favorite color is blue." => 1,16938,311,2954,2017,307,3344,889 Th@nk y0u f0r y0ur h3lp! => 2434,31,77,74,288,15,84,283,15,81,288,15,374,276,18,75,79,0 C@n I g3t a c0ffee, pl3@se? => 34,31,77,286,290,18,83,257,269,15,4617,11,499,18,31,405,30 W0w! Th@t's @m@zing! => 54,15,86,0,334,31,83,311,10428,76,31,8781,0 H0w 4re y0u t0d@y? => 39,15,86,1017,265,288,15,84,220,83,15,67,31,88,30 I l0ve t0 tr@vel @r0und the w0rld. => 40,287,15,303,220,83,15,220,6903,31,779,10428,81,15,997,220,3322,261,15,81,348,13 Wh@t's y0ur f@v0rite m0vie? => 2471,31,83,311,288,15,374,283,31,85,15,35002,275,15,12702,30 The cat is sleeping on the mat. => 2278,3857,307,8296,322,220,3322,3803,13 I need to buy some groceries for dinner. => 40,643,220,1353,2256,512,31391,337,6148,13 The sun is shining brightly in the sky. => 2278,3295,307,18269,47418,294,220,3322,5443,13 She is reading a book in the park. => 9526,307,3760,257,1446,294,220,3322,3884,13 We went for a walk on the beach yesterday. => 4360,1437,337,257,1792,322,220,3322,7534,5186,13 He plays the guitar like a pro. => 5205,5749,220,3322,7531,411,257,447,13 They are going to the movies tonight. => 8829,366,516,220,1353,220,3322,6233,220,1756,397,13 The flowers are blooming in the garden. => 2278,8085,366,45294,294,220,3322,7431,13 I enjoy listening to classical music. => 40,2103,4764,220,1353,13735,1318,13 We need to buy groceries for the week. => 4360,643,220,1353,2256,31391,337,220,3322,1243,13 The dog is chasing its tail in circles. => 2278,3000,307,17876,1080,220,14430,294,13040,13 She is wearing a beautiful red dress. => 9526,307,4769,257,2238,2182,5231,13 He is a talented actor in Hollywood. => 5205,307,257,220,32831,6003,8747,294,11628,13 The children are playing in the playground. => 2278,2227,366,2433,294,220,3322,24646,13 I'm going to visit my grandparents this weekend. => 40,478,516,220,1353,3441,452,21876,220,11176,6711,13 The coffee tastes bitter without sugar. => 2278,4982,220,83,40246,13871,1553,5076,13 They are planning a surprise party for her. => 8829,366,5038,257,6365,3595,337,720,13 She sings like an angel on stage. => 9526,23250,411,364,14250,322,3233,13 We should take a vacation to relax. => 4360,820,220,27612,257,12830,220,1353,5789,13 He is studying medicine at the university. => 5205,307,7601,7195,412,220,3322,5454,13 The rain is pouring heavily outside. => 2278,4830,307,20450,10950,2380,13 I enjoy watching romantic movies. => 40,2103,1976,13590,6233,13 They are celebrating their anniversary today. => 8829,366,15252,220,3322,347,12962,220,83,378,320,13 She dances gracefully to the music. => 9526,28322,10042,2277,220,1353,220,3322,1318,13 He is an excellent basketball player. => 5205,307,364,7103,11767,4256,13 The baby is sleeping soundly in the crib. => 2278,3186,307,8296,1626,356,294,220,3322,47163,13 I need to finish my homework before dinner. => 40,643,220,1353,2413,452,14578,949,6148,13 They are organizing a charity event next month. => 8829,366,17608,257,16863,2280,958,1618,13 She is cooking a delicious meal for us. => 9526,307,6361,257,4809,6791,337,505,13 We should go hiking in the mountains. => 4360,820,352,23784,294,220,3322,10233,13 The car broke down on the way to work. => 2278,1032,6902,760,322,220,3322,636,220,1353,589,13 He loves playing video games in his free time. => 5205,6752,2433,960,2813,294,702,1737,220,3766,13 The birds are chirping in the trees. => 2278,9009,366,36682,294,220,3322,220,3599,279,13 I want to learn how to play the piano. => 40,528,220,1353,1466,577,220,1353,862,220,3322,9211,13 They are building a new shopping mall in the city. => 8829,366,2390,257,777,8688,16026,294,220,3322,2307,13 She is writing a novel in her spare time. => 9526,307,3579,257,7613,294,720,13798,220,3766,13 We are going to the zoo this Saturday. => 4360,366,516,220,1353,220,3322,25347,220,11176,8803,13 The cake looks delicious with chocolate frosting. => 2278,5908,1542,4809,365,6215,37048,13 He is a talented painter who sells his artwork. => 5205,307,257,220,32831,6003,26619,567,20897,702,15829,13 The students are studying for their exams. => 2278,1731,366,7601,337,220,3322,347,20514,13 I enjoy swimming in the ocean. => 40,2103,11989,294,220,3322,7810,13 They are renovating their house. => 8829,366,18845,990,220,3322,347,1782,13 She is practicing yoga to stay healthy. => 9526,307,11350,15128,220,1353,1754,4627,13 We should plant flowers in the garden. => 4360,820,3709,8085,294,220,3322,7431,13 The traffic is heavy during rush hour. => 2278,220,17227,3341,307,4676,1830,9300,1773,13 He is a skilled chef who creates amazing dishes. => 5205,307,257,19690,10530,567,7829,2243,10814,13 The baby is crawling on the floor. => 2278,3186,307,32979,322,220,3322,4123,13 I need to buy a new pair of shoes. => 40,643,220,1353,2256,257,777,6119,295,6654,13 They are going on a road trip across the country. => 8829,366,516,322,257,3060,220,83,8400,2108,220,3322,1941,13 She is playing the piano beautifully. => 9526,307,2433,220,3322,9211,16525,13 We are going to a concert tomorrow night. => 4360,366,516,220,1353,257,8543,220,83,298,3162,1818,13 The cake tastes delicious with vanilla frosting. => 2278,5908,220,83,40246,4809,365,17528,37048,13 He is a dedicated teacher who inspires his students. => 5205,307,257,8374,220,975,4062,567,32566,702,1731,13 The students are participating in a science fair. => 2278,1731,366,13950,294,257,3497,3143,13 I enjoy hiking in the mountains. => 40,2103,23784,294,220,3322,10233,13 They are organizing a beach cleanup next weekend. => 8829,366,17608,257,7534,40991,958,6711,13 She is taking photographs of nature. => 9526,307,220,48625,17649,295,3687,13 We should try a new restaurant in town. => 4360,820,220,83,627,257,777,6383,294,220,30401,13 The traffic is moving slowly on the highway. => 2278,220,17227,3341,307,2684,5692,322,220,3322,17205,13 He is a talented singer with a beautiful voice. => 5205,307,257,220,32831,6003,11564,365,257,2238,3177,13 The baby is laughing and giggling. => 2278,3186,307,5059,293,290,24542,13 I need to do laundry and wash my clothes. => 40,643,220,1353,360,19811,293,5675,452,5534,13 They are planning a trip to Europe. => 8829,366,5038,257,220,83,8400,220,1353,3315,13 She is learning how to play the guitar. => 9526,307,2539,577,220,1353,862,220,3322,7531,13 We are going to a museum this Sunday. => 4360,366,516,220,1353,257,8441,220,11176,7776,13 The coffee smells amazing in the morning. => 2278,4982,10036,2243,294,220,3322,2446,13 He is a hardworking farmer who grows crops. => 5205,307,257,1152,22475,17891,567,13156,16829,13 The students are presenting their research projects. => 2278,1731,366,15578,220,3322,347,2132,4455,13 I enjoy playing soccer with my friends. => 40,2103,2433,15469,365,452,1855,13 They are volunteering at a local shelter. => 8829,366,33237,412,257,2654,13341,13 She is practicing martial arts for self-defense. => 9526,307,11350,20755,8609,337,2698,12,49268,13 We should try a new recipe for dinner. => 4360,820,220,83,627,257,777,6782,337,6148,13 The traffic is congest => 2278,220,17227,3341,307,31871 The sun is shining brightly today. => 2278,3295,307,18269,47418,220,83,378,320,13 I enjoy reading books in my free time. => 40,2103,3760,3642,294,452,1737,220,3766,13 She plays the piano beautifully. => 9526,5749,220,3322,9211,16525,13 The cat chased the mouse around the room. => 2278,3857,33091,220,3322,9719,926,220,3322,1808,13 I love eating pizza with extra cheese. => 40,959,3936,8298,365,2857,5399,13 He always wears a hat wherever he goes. => 5205,1009,20877,257,2385,8660,415,1709,13 The flowers in the garden are blooming. => 2278,8085,294,220,3322,7431,366,45294,13 She danced gracefully on the stage. => 9526,32909,10042,2277,322,220,3322,3233,13 The dog barked loudly in the park. => 2278,3000,16202,292,22958,294,220,3322,3884,13 We went swimming in the ocean yesterday. => 4360,1437,11989,294,220,3322,7810,5186,13 He speaks fluent French and Spanish. => 5205,10789,40799,5522,293,8058,13 The train arrived at the station on time. => 2278,220,83,7146,6678,412,220,3322,5214,322,220,3766,13 She cooked a delicious meal for her family. => 9526,9267,257,4809,6791,337,720,1605,13 ggml-org-ggml-3678254/examples/python/000077500000000000000000000000001512524704700175235ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/python/README.md000066400000000000000000000113151512524704700210030ustar00rootroot00000000000000# Simple autogenerated Python bindings for ggml This folder contains: - Scripts to generate full Python bindings from ggml headers (+ stubs for autocompletion in IDEs) - Some barebones utils (see [ggml/utils.py](./ggml/utils.py)): - `ggml.utils.init` builds a context that's freed automatically when the pointer gets GC'd - `ggml.utils.copy` **copies between same-shaped tensors (numpy or ggml), w/ automatic (de/re)quantization** - `ggml.utils.numpy` returns a numpy view over a ggml tensor; if it's quantized, it returns a copy (requires `allow_copy=True`) - Very basic examples (anyone wants to port [llama2.c](https://github.com/karpathy/llama2.c)?) Provided you set `GGML_LIBRARY=.../path/to/libggml_shared.so` (see instructions below), it's trivial to do some operations on quantized tensors: ```python # Make sure libllama.so is in your [DY]LD_LIBRARY_PATH, or set GGML_LIBRARY=.../libggml_shared.so from ggml import lib, ffi from ggml.utils import init, copy, numpy import numpy as np ctx = init(mem_size=12*1024*1024) n = 256 n_threads = 4 a = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) b = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) # Can't both be quantized sum = lib.ggml_add(ctx, a, b) # all zeroes for now. Will be quantized too! gf = ffi.new('struct ggml_cgraph*') lib.ggml_build_forward_expand(gf, sum) copy(np.array([i for i in range(n)], np.float32), a) copy(np.array([i*100 for i in range(n)], np.float32), b) lib.ggml_graph_compute_with_ctx(ctx, gf, n_threads) print(numpy(a, allow_copy=True)) # 0. 1.0439453 2.0878906 3.131836 4.1757812 5.2197266. ... print(numpy(b)) # 0. 100. 200. 300. 400. 500. ... print(numpy(sum, allow_copy=True)) # 0. 105.4375 210.875 316.3125 421.75 527.1875 ... ``` ### Prerequisites You'll need a shared library of ggml to use the bindings. #### Build libggml_shared.so or libllama.so As of this writing the best is to use [ggerganov/llama.cpp](https://github.com/ggerganov/llama.cpp)'s generated `libggml_shared.so` or `libllama.so`, which you can build as follows: ```bash git clone https://github.com/ggerganov/llama.cpp # On a CUDA-enabled system add -DLLAMA_CUDA=1 # On a Mac add -DLLAMA_METAL=1 cmake llama.cpp \ -B llama_build \ -DCMAKE_C_FLAGS=-Ofast \ -DLLAMA_NATIVE=1 \ -DLLAMA_LTO=1 \ -DBUILD_SHARED_LIBS=1 \ -DLLAMA_MPI=1 \ -DLLAMA_BUILD_TESTS=0 \ -DLLAMA_BUILD_EXAMPLES=0 ( cd llama_build && make -j ) # On Mac, this will be libggml_shared.dylib instead export GGML_LIBRARY=$PWD/llama_build/libggml_shared.so # Alternatively, you can just copy it to your system's lib dir, e.g /usr/local/lib ``` #### (Optional) Regenerate the bindings and stubs If you added or changed any signatures of the C API, you'll want to regenerate the bindings ([ggml/cffi.py](./ggml/cffi.py)) and stubs ([ggml/__init__.pyi](./ggml/__init__.pyi)). Luckily it's a one-liner using [regenerate.py](./regenerate.py): ```bash pip install -q cffi python regenerate.py ``` By default it assumes `llama.cpp` was cloned in ../../../llama.cpp (alongside the ggml folder). You can override this with: ```bash C_INCLUDE_DIR=$LLAMA_CPP_DIR python regenerate.py ``` You can also edit [api.h](./api.h) to control which files should be included in the generated bindings (defaults to `llama.cpp/ggml*.h`) In fact, if you wanted to only generate bindings for the current version of the `ggml` repo itself (instead of `llama.cpp`; you'd loose support for k-quants), you could run: ```bash API=../../include/ggml.h python regenerate.py ``` ## Develop Run tests: ```bash pytest ``` ### Alternatives This example's goal is to showcase [cffi](https://cffi.readthedocs.io/)-generated bindings that are trivial to use and update, but there are already alternatives in the wild: - https://github.com/abetlen/ggml-python: these bindings seem to be hand-written and use [ctypes](https://docs.python.org/3/library/ctypes.html). It has [high-quality API reference docs](https://ggml-python.readthedocs.io/en/latest/api-reference/#ggml.ggml) that can be used with these bindings too, but it doesn't expose Metal, CUDA, MPI or OpenCL calls, doesn't support transparent (de/re)quantization like this example does (see [ggml.utils](./ggml/utils.py) module), and won't pick up your local changes. - https://github.com/abetlen/llama-cpp-python: these expose the C++ `llama.cpp` interface, which this example cannot easily be extended to support (`cffi` only generates bindings of C libraries) - [pybind11](https://github.com/pybind/pybind11) and [nanobind](https://github.com/wjakob/nanobind) are two alternatives to cffi that support binding C++ libraries, but it doesn't seem either of them have an automatic generator (writing bindings is rather time-consuming). ggml-org-ggml-3678254/examples/python/api.h000066400000000000000000000006331512524704700204470ustar00rootroot00000000000000/* List here all the headers you want to expose in the Python bindings, then run `python regenerate.py` (see details in README.md) */ #include "ggml.h" #include "ggml-metal.h" #include "ggml-opencl.h" // Headers below are currently only present in the llama.cpp repository, comment them out if you don't have them. #include "k_quants.h" #include "ggml-alloc.h" #include "ggml-cuda.h" #include "ggml-mpi.h"ggml-org-ggml-3678254/examples/python/example_add_quant.py000066400000000000000000000015251512524704700235530ustar00rootroot00000000000000from ggml import lib, ffi from ggml.utils import init, copy, numpy import numpy as np ctx = init(mem_size=12*1024*1024) # automatically freed when pointer is GC'd n = 256 n_threads = 4 a = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) b = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) # can't both be quantized sum = lib.ggml_add(ctx, a, b) # all zeroes for now. Will be quantized too! # See cffi's doc on how to allocate native memory: it's very simple! # https://cffi.readthedocs.io/en/latest/ref.html#ffi-interface gf = ffi.new('struct ggml_cgraph*') lib.ggml_build_forward_expand(gf, sum) copy(np.array([i for i in range(n)], np.float32), a) copy(np.array([i*100 for i in range(n)], np.float32), b) lib.ggml_graph_compute_with_ctx(ctx, gf, n_threads) print(numpy(a, allow_copy=True)) print(numpy(b)) print(numpy(sum, allow_copy=True))ggml-org-ggml-3678254/examples/python/example_test_all_quants.py000066400000000000000000000036321512524704700250160ustar00rootroot00000000000000from ggml import ffi, lib from ggml.utils import init, numpy, copy import numpy as np from math import pi, cos, sin, ceil import matplotlib.pyplot as plt ctx = init(mem_size=100*1024*1024) # Will be auto-GC'd n = 256 orig = np.array([ [ cos(j * 2 * pi / n) * (sin(i * 2 * pi / n)) for j in range(n) ] for i in range(n) ], np.float32) orig_tensor = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F32, n, n) copy(orig, orig_tensor) quants = [ type for type in range(lib.GGML_TYPE_COUNT) if lib.ggml_is_quantized(type) and type not in [lib.GGML_TYPE_Q8_1, lib.GGML_TYPE_Q8_K] # Apparently not supported ] # quants = [lib.GGML_TYPE_Q2_K] # Test a single one def get_name(type): name = lib.ggml_type_name(type) return ffi.string(name).decode('utf-8') if name else '?' quants.sort(key=get_name) quants.insert(0, None) print(quants) ncols=4 nrows = ceil(len(quants) / ncols) plt.figure(figsize=(ncols * 5, nrows * 5), layout='tight') for i, type in enumerate(quants): plt.subplot(nrows, ncols, i + 1) try: if type == None: plt.title('Original') plt.imshow(orig) else: quantized_tensor = lib.ggml_new_tensor_2d(ctx, type, n, n) copy(orig_tensor, quantized_tensor) quantized = numpy(quantized_tensor, allow_copy=True) d = quantized - orig results = { "l2": np.linalg.norm(d, 2), "linf": np.linalg.norm(d, np.inf), "compression": round(lib.ggml_nbytes(orig_tensor) / lib.ggml_nbytes(quantized_tensor), 1) } name = get_name(type) print(f'{name}: {results}') plt.title(f'{name} ({results["compression"]}x smaller)') plt.imshow(quantized, interpolation='nearest') except Exception as e: print(f'Error: {e}') plt.show()ggml-org-ggml-3678254/examples/python/ggml/000077500000000000000000000000001512524704700204515ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/python/ggml/__init__.py000066400000000000000000000035721512524704700225710ustar00rootroot00000000000000""" Python bindings for the ggml library. Usage example: from ggml import lib, ffi from ggml.utils import init, copy, numpy import numpy as np ctx = init(mem_size=10*1024*1024) n = 1024 n_threads = 4 a = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) b = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) sum = lib.ggml_add(ctx, a, b) gf = ffi.new('struct ggml_cgraph*') lib.ggml_build_forward_expand(gf, sum) copy(np.array([i for i in range(n)], np.float32), a) copy(np.array([i*100 for i in range(n)], np.float32), b) lib.ggml_graph_compute_with_ctx(ctx, gf, n_threads) print(numpy(sum, allow_copy=True)) See https://cffi.readthedocs.io/en/latest/cdef.html for more on cffi. """ try: from ggml.cffi import ffi as ffi except ImportError as e: raise ImportError(f"Couldn't find ggml bindings ({e}). Run `python regenerate.py` or check your PYTHONPATH.") import os, platform __exact_library = os.environ.get("GGML_LIBRARY") if __exact_library: __candidates = [__exact_library] elif platform.system() == "Windows": __candidates = ["ggml_shared.dll", "llama.dll"] else: __candidates = ["libggml_shared.so", "libllama.so"] if platform.system() == "Darwin": __candidates += ["libggml_shared.dylib", "libllama.dylib"] for i, name in enumerate(__candidates): try: # This is where all the functions, enums and constants are defined lib = ffi.dlopen(name) except OSError: if i < len(__candidates) - 1: continue raise OSError(f"Couldn't find ggml's shared library (tried names: {__candidates}). Add its directory to DYLD_LIBRARY_PATH (on Mac) or LD_LIBRARY_PATH, or define GGML_LIBRARY.") # This contains the cffi helpers such as new, cast, string, etc. # https://cffi.readthedocs.io/en/latest/ref.html#ffi-interface ffi = ffi ggml-org-ggml-3678254/examples/python/ggml/__init__.pyi000066400000000000000000002701451512524704700227440ustar00rootroot00000000000000# auto-generated file import ggml.ffi as ffi import numpy as np class lib: @property def GGML_BACKEND_CPU(self) -> int: ... @property def GGML_BACKEND_GPU(self) -> int: ... @property def GGML_BACKEND_GPU_SPLIT(self) -> int: ... @property def GGML_FTYPE_ALL_F32(self) -> int: ... @property def GGML_FTYPE_MOSTLY_F16(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q2_K(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q3_K(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q4_0(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q4_1(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q4_1_SOME_F16(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q4_K(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q5_0(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q5_1(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q5_K(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q6_K(self) -> int: ... @property def GGML_FTYPE_MOSTLY_Q8_0(self) -> int: ... @property def GGML_FTYPE_UNKNOWN(self) -> int: ... @property def GGML_LINESEARCH_BACKTRACKING_ARMIJO(self) -> int: ... @property def GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE(self) -> int: ... @property def GGML_LINESEARCH_BACKTRACKING_WOLFE(self) -> int: ... @property def GGML_LINESEARCH_DEFAULT(self) -> int: ... @property def GGML_LINESEARCH_FAIL(self) -> int: ... @property def GGML_LINESEARCH_INVALID_PARAMETERS(self) -> int: ... @property def GGML_LINESEARCH_MAXIMUM_ITERATIONS(self) -> int: ... @property def GGML_LINESEARCH_MAXIMUM_STEP(self) -> int: ... @property def GGML_LINESEARCH_MINIMUM_STEP(self) -> int: ... @property def GGML_OBJECT_GRAPH(self) -> int: ... @property def GGML_OBJECT_TENSOR(self) -> int: ... @property def GGML_OBJECT_WORK_BUFFER(self) -> int: ... @property def GGML_OPT_ADAM(self) -> int: ... @property def GGML_OPT_DID_NOT_CONVERGE(self) -> int: ... @property def GGML_OPT_FAIL(self) -> int: ... @property def GGML_OPT_INVALID_WOLFE(self) -> int: ... @property def GGML_OPT_LBFGS(self) -> int: ... @property def GGML_OPT_NO_CONTEXT(self) -> int: ... @property def GGML_OPT_OK(self) -> int: ... @property def GGML_OP_ACC(self) -> int: ... @property def GGML_OP_ADD(self) -> int: ... @property def GGML_OP_ADD1(self) -> int: ... @property def GGML_OP_ALIBI(self) -> int: ... @property def GGML_OP_ARGMAX(self) -> int: ... @property def GGML_OP_CLAMP(self) -> int: ... @property def GGML_OP_CONT(self) -> int: ... @property def GGML_OP_CONV_1D(self) -> int: ... @property def GGML_OP_CONV_2D(self) -> int: ... @property def GGML_OP_COUNT(self) -> int: ... @property def GGML_OP_CPY(self) -> int: ... @property def GGML_OP_CROSS_ENTROPY_LOSS(self) -> int: ... @property def GGML_OP_CROSS_ENTROPY_LOSS_BACK(self) -> int: ... @property def GGML_OP_DIAG(self) -> int: ... @property def GGML_OP_DIAG_MASK_INF(self) -> int: ... @property def GGML_OP_DIAG_MASK_ZERO(self) -> int: ... @property def GGML_OP_DIV(self) -> int: ... @property def GGML_OP_DUP(self) -> int: ... @property def GGML_OP_FLASH_ATTN(self) -> int: ... @property def GGML_OP_FLASH_ATTN_BACK(self) -> int: ... @property def GGML_OP_FLASH_FF(self) -> int: ... @property def GGML_OP_GET_ROWS(self) -> int: ... @property def GGML_OP_GET_ROWS_BACK(self) -> int: ... @property def GGML_OP_LOG(self) -> int: ... @property def GGML_OP_MAP_BINARY(self) -> int: ... @property def GGML_OP_MAP_CUSTOM1(self) -> int: ... @property def GGML_OP_MAP_CUSTOM1_F32(self) -> int: ... @property def GGML_OP_MAP_CUSTOM2(self) -> int: ... @property def GGML_OP_MAP_CUSTOM2_F32(self) -> int: ... @property def GGML_OP_MAP_CUSTOM3(self) -> int: ... @property def GGML_OP_MAP_CUSTOM3_F32(self) -> int: ... @property def GGML_OP_MAP_UNARY(self) -> int: ... @property def GGML_OP_MEAN(self) -> int: ... @property def GGML_OP_MUL(self) -> int: ... @property def GGML_OP_MUL_MAT(self) -> int: ... @property def GGML_OP_NONE(self) -> int: ... @property def GGML_OP_NORM(self) -> int: ... @property def GGML_OP_OUT_PROD(self) -> int: ... @property def GGML_OP_PERMUTE(self) -> int: ... @property def GGML_OP_POOL_1D(self) -> int: ... @property def GGML_OP_POOL_2D(self) -> int: ... @property def GGML_OP_POOL_AVG(self) -> int: ... @property def GGML_OP_POOL_COUNT(self) -> int: ... @property def GGML_OP_POOL_MAX(self) -> int: ... @property def GGML_OP_REPEAT(self) -> int: ... @property def GGML_OP_REPEAT_BACK(self) -> int: ... @property def GGML_OP_RESHAPE(self) -> int: ... @property def GGML_OP_RMS_NORM(self) -> int: ... @property def GGML_OP_RMS_NORM_BACK(self) -> int: ... @property def GGML_OP_ROPE(self) -> int: ... @property def GGML_OP_ROPE_BACK(self) -> int: ... @property def GGML_OP_SCALE(self) -> int: ... @property def GGML_OP_SET(self) -> int: ... @property def GGML_OP_SILU_BACK(self) -> int: ... @property def GGML_OP_SOFT_MAX(self) -> int: ... @property def GGML_OP_SOFT_MAX_BACK(self) -> int: ... @property def GGML_OP_SQR(self) -> int: ... @property def GGML_OP_SQRT(self) -> int: ... @property def GGML_OP_SUB(self) -> int: ... @property def GGML_OP_SUM(self) -> int: ... @property def GGML_OP_SUM_ROWS(self) -> int: ... @property def GGML_OP_TRANSPOSE(self) -> int: ... @property def GGML_OP_UNARY(self) -> int: ... @property def GGML_OP_VIEW(self) -> int: ... @property def GGML_OP_WIN_PART(self) -> int: ... @property def GGML_OP_WIN_UNPART(self) -> int: ... @property def GGML_TASK_COMPUTE(self) -> int: ... @property def GGML_TASK_FINALIZE(self) -> int: ... @property def GGML_TASK_INIT(self) -> int: ... @property def GGML_TYPE_COUNT(self) -> int: ... @property def GGML_TYPE_F16(self) -> int: ... @property def GGML_TYPE_F32(self) -> int: ... @property def GGML_TYPE_I16(self) -> int: ... @property def GGML_TYPE_I32(self) -> int: ... @property def GGML_TYPE_I8(self) -> int: ... @property def GGML_TYPE_Q2_K(self) -> int: ... @property def GGML_TYPE_Q3_K(self) -> int: ... @property def GGML_TYPE_Q4_0(self) -> int: ... @property def GGML_TYPE_Q4_1(self) -> int: ... @property def GGML_TYPE_Q4_K(self) -> int: ... @property def GGML_TYPE_Q5_0(self) -> int: ... @property def GGML_TYPE_Q5_1(self) -> int: ... @property def GGML_TYPE_Q5_K(self) -> int: ... @property def GGML_TYPE_Q6_K(self) -> int: ... @property def GGML_TYPE_Q8_0(self) -> int: ... @property def GGML_TYPE_Q8_1(self) -> int: ... @property def GGML_TYPE_Q8_K(self) -> int: ... @property def GGML_UNARY_OP_ABS(self) -> int: ... @property def GGML_UNARY_OP_ELU(self) -> int: ... @property def GGML_UNARY_OP_GELU(self) -> int: ... @property def GGML_UNARY_OP_GELU_QUICK(self) -> int: ... @property def GGML_UNARY_OP_NEG(self) -> int: ... @property def GGML_UNARY_OP_RELU(self) -> int: ... @property def GGML_UNARY_OP_SGN(self) -> int: ... @property def GGML_UNARY_OP_SILU(self) -> int: ... @property def GGML_UNARY_OP_STEP(self) -> int: ... @property def GGML_UNARY_OP_TANH(self) -> int: ... @property def GGUF_TYPE_ARRAY(self) -> int: ... @property def GGUF_TYPE_BOOL(self) -> int: ... @property def GGUF_TYPE_COUNT(self) -> int: ... @property def GGUF_TYPE_FLOAT32(self) -> int: ... @property def GGUF_TYPE_INT16(self) -> int: ... @property def GGUF_TYPE_INT32(self) -> int: ... @property def GGUF_TYPE_INT8(self) -> int: ... @property def GGUF_TYPE_STRING(self) -> int: ... @property def GGUF_TYPE_UINT16(self) -> int: ... @property def GGUF_TYPE_UINT32(self) -> int: ... @property def GGUF_TYPE_UINT8(self) -> int: ... def abort_callback(data: ffi.CData) -> bool: """ abort ggml_graph_compute when true bool (*abort_callback)(void * data); """ ... def dequantize_row_q2_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """ Dequantization void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k); """ ... def dequantize_row_q3_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);""" ... def dequantize_row_q4_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);""" ... def dequantize_row_q5_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);""" ... def dequantize_row_q6_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);""" ... def dequantize_row_q8_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);""" ... def ggml_abs(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_abs( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_abs_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_abs_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_acc(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, nb2: int, nb3: int, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_acc( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); """ ... def ggml_acc_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, nb2: int, nb3: int, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_acc_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); """ ... def ggml_add(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_add( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_add1(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_add1( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_add1_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_add1_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_add_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_add_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_alibi(ctx: ffi.CData, a: ffi.CData, n_past: int, n_head: int, bias_max: float) -> ffi.CData: """ alibi position embedding in-place, returns view(a) struct ggml_tensor * ggml_alibi( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_head, float bias_max); """ ... def ggml_allocr_alloc(alloc: ffi.CData, tensor: ffi.CData) -> None: """GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);""" ... def ggml_allocr_alloc_graph(alloc: ffi.CData, graph: ffi.CData) -> int: """GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);""" ... def ggml_allocr_free(alloc: ffi.CData) -> None: """GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);""" ... def ggml_allocr_is_measure(alloc: ffi.CData) -> bool: """GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);""" ... def ggml_allocr_new(data: ffi.CData, size: int, alignment: int) -> ffi.CData: """GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);""" ... def ggml_allocr_new_measure(alignment: int) -> ffi.CData: """GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);""" ... def ggml_allocr_reset(alloc: ffi.CData) -> None: """GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);""" ... def ggml_allocr_set_parse_seq(alloc: ffi.CData, list: ffi.CData, n: int) -> None: """ tell the allocator to parse nodes following the order described in the list you should call this if your graph are optimized to execute out-of-order GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n); """ ... def ggml_are_same_shape(t0: ffi.CData, t1: ffi.CData) -> bool: """ GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);""" ... def ggml_argmax(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ argmax along rows GGML_API struct ggml_tensor * ggml_argmax( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_blck_size(type: int) -> int: """ GGML_API int ggml_blck_size (enum ggml_type type);""" ... def ggml_build_backward(ctx: ffi.CData, gf: ffi.CData, keep: bool) -> ffi.CData: """ GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);""" ... def ggml_build_forward(tensor: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);""" ... def ggml_build_forward_ctx(ctx: ffi.CData, tensor: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor);""" ... def ggml_build_forward_expand(cgraph: ffi.CData, tensor: ffi.CData) -> None: """ GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);""" ... def ggml_cl_can_mul_mat(src0: ffi.CData, src1: ffi.CData, dst: ffi.CData) -> bool: """bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);""" ... def ggml_cl_free_data(tensor: ffi.CData) -> None: """void ggml_cl_free_data(const struct ggml_tensor* tensor);""" ... def ggml_cl_host_free(ptr: ffi.CData) -> None: """void ggml_cl_host_free(void * ptr);""" ... def ggml_cl_host_malloc(size: int) -> ffi.CData: """void * ggml_cl_host_malloc(size_t size);""" ... def ggml_cl_init() -> None: """void ggml_cl_init(void);""" ... def ggml_cl_mul(src0: ffi.CData, src1: ffi.CData, dst: ffi.CData) -> None: """void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);""" ... def ggml_cl_mul_mat(src0: ffi.CData, src1: ffi.CData, dst: ffi.CData, wdata: ffi.CData, wsize: int) -> None: """void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);""" ... def ggml_cl_mul_mat_get_wsize(src0: ffi.CData, src1: ffi.CData, dst: ffi.CData) -> int: """size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);""" ... def ggml_cl_transform_tensor(data: ffi.CData, tensor: ffi.CData) -> None: """void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);""" ... def ggml_clamp(ctx: ffi.CData, a: ffi.CData, min: float, max: float) -> ffi.CData: """ clamp in-place, returns view(a) struct ggml_tensor * ggml_clamp( struct ggml_context * ctx, struct ggml_tensor * a, float min, float max); """ ... def ggml_cont(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ make contiguous GGML_API struct ggml_tensor * ggml_cont( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_conv_1d(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, s0: int, p0: int, d0: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, // stride int p0, // padding int d0); // dilation """ ... def ggml_conv_1d_ph(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, s: int, d: int) -> ffi.CData: """ conv_1d with padding = half alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d) GGML_API struct ggml_tensor * ggml_conv_1d_ph( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s, int d); """ ... def ggml_conv_2d(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, s0: int, s1: int, p0: int, p1: int, d0: int, d1: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_conv_2d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int s1, int p0, int p1, int d0, int d1); """ ... def ggml_cpu_has_arm_fma() -> int: """ GGML_API int ggml_cpu_has_arm_fma (void);""" ... def ggml_cpu_has_avx() -> int: """ GGML_API int ggml_cpu_has_avx (void);""" ... def ggml_cpu_has_avx2() -> int: """ GGML_API int ggml_cpu_has_avx2 (void);""" ... def ggml_cpu_has_avx512() -> int: """ GGML_API int ggml_cpu_has_avx512 (void);""" ... def ggml_cpu_has_avx512_vbmi() -> int: """ GGML_API int ggml_cpu_has_avx512_vbmi(void);""" ... def ggml_cpu_has_avx512_vnni() -> int: """ GGML_API int ggml_cpu_has_avx512_vnni(void);""" ... def ggml_cpu_has_blas() -> int: """ GGML_API int ggml_cpu_has_blas (void);""" ... def ggml_cpu_has_clblast() -> int: """ GGML_API int ggml_cpu_has_clblast (void);""" ... def ggml_cpu_has_cuda() -> int: """ GGML_API int ggml_cpu_has_cuda (void);""" ... def ggml_cpu_has_f16c() -> int: """ GGML_API int ggml_cpu_has_f16c (void);""" ... def ggml_cpu_has_fma() -> int: """ GGML_API int ggml_cpu_has_fma (void);""" ... def ggml_cpu_has_fp16_va() -> int: """ GGML_API int ggml_cpu_has_fp16_va (void);""" ... def ggml_cpu_has_gpublas() -> int: """ GGML_API int ggml_cpu_has_gpublas (void);""" ... def ggml_cpu_has_neon() -> int: """ GGML_API int ggml_cpu_has_neon (void);""" ... def ggml_cpu_has_sse3() -> int: """ GGML_API int ggml_cpu_has_sse3 (void);""" ... def ggml_cpu_has_vsx() -> int: """ GGML_API int ggml_cpu_has_vsx (void);""" ... def ggml_cpu_has_wasm_simd() -> int: """ GGML_API int ggml_cpu_has_wasm_simd (void);""" ... def ggml_cpy(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ a -> b, return view(b) GGML_API struct ggml_tensor * ggml_cpy( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_cross_entropy_loss(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_cross_entropy_loss( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_cross_entropy_loss_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c); """ ... def ggml_cuda_assign_buffers(tensor: ffi.CData) -> None: """GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);""" ... def ggml_cuda_assign_buffers_force_inplace(tensor: ffi.CData) -> None: """GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);""" ... def ggml_cuda_assign_buffers_no_scratch(tensor: ffi.CData) -> None: """GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);""" ... def ggml_cuda_can_mul_mat(src0: ffi.CData, src1: ffi.CData, dst: ffi.CData) -> bool: """GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);""" ... def ggml_cuda_compute_forward(params: ffi.CData, tensor: ffi.CData) -> bool: """GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);""" ... def ggml_cuda_free_data(tensor: ffi.CData) -> None: """GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);""" ... def ggml_cuda_free_scratch() -> None: """GGML_API void ggml_cuda_free_scratch(void);""" ... def ggml_cuda_get_device_count() -> int: """GGML_API int ggml_cuda_get_device_count(void);""" ... def ggml_cuda_get_device_description(device: int, description: ffi.CData, description_size: int) -> None: """GGML_API void ggml_cuda_get_device_description(int device, char * description, size_t description_size);""" ... def ggml_cuda_host_free(ptr: ffi.CData) -> None: """GGML_API void ggml_cuda_host_free(void * ptr);""" ... def ggml_cuda_host_malloc(size: int) -> ffi.CData: """GGML_API void * ggml_cuda_host_malloc(size_t size);""" ... def ggml_cuda_set_main_device(main_device: int) -> None: """GGML_API void ggml_cuda_set_main_device(int main_device);""" ... def ggml_cuda_set_mul_mat_q(mul_mat_q: bool) -> None: """GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);""" ... def ggml_cuda_set_scratch_size(scratch_size: int) -> None: """GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);""" ... def ggml_cuda_set_tensor_split(tensor_split: ffi.CData) -> None: """GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);""" ... def ggml_cuda_transform_tensor(data: ffi.CData, tensor: ffi.CData) -> None: """GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);""" ... def ggml_cycles() -> int: """ GGML_API int64_t ggml_cycles(void);""" ... def ggml_cycles_per_ms() -> int: """ GGML_API int64_t ggml_cycles_per_ms(void);""" ... def ggml_diag(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_diag( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_diag_mask_inf(ctx: ffi.CData, a: ffi.CData, n_past: int) -> ffi.CData: """ set elements above the diagonal to -INF GGML_API struct ggml_tensor * ggml_diag_mask_inf( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); """ ... def ggml_diag_mask_inf_inplace(ctx: ffi.CData, a: ffi.CData, n_past: int) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); """ ... def ggml_diag_mask_zero(ctx: ffi.CData, a: ffi.CData, n_past: int) -> ffi.CData: """ set elements above the diagonal to 0 GGML_API struct ggml_tensor * ggml_diag_mask_zero( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); """ ... def ggml_diag_mask_zero_inplace(ctx: ffi.CData, a: ffi.CData, n_past: int) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); """ ... def ggml_div(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_div( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_div_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_div_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_dup(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_dup( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_dup_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_dup_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_dup_tensor(ctx: ffi.CData, src: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);""" ... def ggml_element_size(tensor: ffi.CData) -> int: """ GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);""" ... def ggml_elu(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_elu( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_elu_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_elu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_flash_attn(ctx: ffi.CData, q: ffi.CData, k: ffi.CData, v: ffi.CData, masked: bool) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_flash_attn( struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, bool masked); """ ... def ggml_flash_attn_back(ctx: ffi.CData, q: ffi.CData, k: ffi.CData, v: ffi.CData, d: ffi.CData, masked: bool) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_flash_attn_back( struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * d, bool masked); """ ... def ggml_flash_ff(ctx: ffi.CData, a: ffi.CData, b0: ffi.CData, b1: ffi.CData, c0: ffi.CData, c1: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_flash_ff( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b0, struct ggml_tensor * b1, struct ggml_tensor * c0, struct ggml_tensor * c1); """ ... def ggml_format_name(tensor: ffi.CData, fmt: ffi.CData, *args2) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);""" ... def ggml_fp16_to_fp32(x: np.float16) -> float: """ convert FP16 <-> FP32 GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x); """ ... def ggml_fp16_to_fp32_row(x: ffi.CData, y: ffi.CData, n: int) -> None: """ GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);""" ... def ggml_fp32_to_fp16(x: float) -> np.float16: """ GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);""" ... def ggml_fp32_to_fp16_row(x: ffi.CData, y: ffi.CData, n: int) -> None: """ GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);""" ... def ggml_free(ctx: ffi.CData) -> None: """ GGML_API void ggml_free(struct ggml_context * ctx);""" ... def ggml_ftype_to_ggml_type(ftype: int) -> int: """ TODO: temporary until model loading of ggml examples is refactored GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype); """ ... def ggml_gelu(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ TODO: double-check this computation is correct GGML_API struct ggml_tensor * ggml_gelu( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_gelu_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_gelu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_gelu_quick(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_gelu_quick( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_gelu_quick_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_gelu_quick_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_get_data(tensor: ffi.CData) -> ffi.CData: """ GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);""" ... def ggml_get_data_f32(tensor: ffi.CData) -> ffi.CData: """ GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);""" ... def ggml_get_f32_1d(tensor: ffi.CData, i: int) -> float: """ GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);""" ... def ggml_get_i32_1d(tensor: ffi.CData, i: int) -> int: """ GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);""" ... def ggml_get_max_tensor_size(ctx: ffi.CData) -> int: """ GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);""" ... def ggml_get_mem_buffer(ctx: ffi.CData) -> ffi.CData: """ GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);""" ... def ggml_get_mem_size(ctx: ffi.CData) -> int: """ GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);""" ... def ggml_get_name(tensor: ffi.CData) -> ffi.CData: """ GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);""" ... def ggml_get_no_alloc(ctx: ffi.CData) -> bool: """ GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);""" ... def ggml_get_rows(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_get_rows( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_get_rows_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_get_rows_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c); """ ... def ggml_get_tensor(ctx: ffi.CData, name: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);""" ... def ggml_get_unary_op(tensor: ffi.CData) -> int: """ GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);""" ... def ggml_graph_compute(cgraph: ffi.CData, cplan: ffi.CData) -> int: """ GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);""" ... def ggml_graph_compute_with_ctx(ctx: ffi.CData, cgraph: ffi.CData, n_threads: int) -> None: """ same as ggml_graph_compute() but the work data is allocated as a part of the context note: the drawback of this API is that you must have ensured that the context has enough memory for the work data GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads); """ ... def ggml_graph_dump_dot(gb: ffi.CData, gf: ffi.CData, filename: ffi.CData) -> None: """ dump the graph into a file using the dot format GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); """ ... def ggml_graph_get_tensor(cgraph: ffi.CData, name: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);""" ... def ggml_graph_overhead() -> int: """ GGML_API size_t ggml_graph_overhead(void);""" ... def ggml_graph_plan(cgraph: ffi.CData, n_threads: int) -> ffi.CData: """ ggml_graph_plan() has to be called before ggml_graph_compute() when plan.work_size > 0, caller must allocate memory for plan.work_data GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/); """ ... def ggml_graph_print(cgraph: ffi.CData) -> None: """ print info and performance information for the graph GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); """ ... def ggml_graph_reset(cgraph: ffi.CData) -> None: """ GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);""" ... def ggml_init(params: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);""" ... def ggml_init_cuda() -> None: """GGML_API void ggml_init_cuda(void);""" ... def ggml_internal_get_type_traits(type: int) -> ffi.CData: """ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);""" ... def ggml_is_contiguous(tensor: ffi.CData) -> bool: """ GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);""" ... def ggml_is_numa() -> bool: """ GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node""" ... def ggml_is_permuted(tensor: ffi.CData) -> bool: """ GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);""" ... def ggml_is_quantized(type: int) -> bool: """ GGML_API bool ggml_is_quantized(enum ggml_type type);""" ... def ggml_is_transposed(tensor: ffi.CData) -> bool: """ GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor);""" ... def ggml_log(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_log( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_log_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_log_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_map_binary_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_binary_op_f32_t fun), "use ggml_map_custom2 instead"); """ ... def ggml_map_binary_inplace_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_binary_op_f32_t fun), "use ggml_map_custom2_inplace instead"); """ ... def ggml_map_custom1(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom1( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom1_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_f32_t fun), "use ggml_map_custom1 instead"); """ ... def ggml_map_custom1_inplace(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom1_inplace( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom1_inplace_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_f32_t fun), "use ggml_map_custom1_inplace instead"); """ ... def ggml_map_custom2(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom2( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom2_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_f32_t fun), "use ggml_map_custom2 instead"); """ ... def ggml_map_custom2_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom2_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom2_inplace_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_f32_t fun), "use ggml_map_custom2_inplace instead"); """ ... def ggml_map_custom3(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom3( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom3_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_f32_t fun), "use ggml_map_custom3 instead"); """ ... def ggml_map_custom3_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData, fun: ffi.CData, n_tasks: int, userdata: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_map_custom3_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_t fun, int n_tasks, void * userdata); """ ... def ggml_map_custom3_inplace_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_f32_t fun), "use ggml_map_custom3_inplace instead"); """ ... def ggml_map_unary_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32( struct ggml_context * ctx, struct ggml_tensor * a, ggml_unary_op_f32_t fun), "use ggml_map_custom1 instead"); """ ... def ggml_map_unary_inplace_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData: """ GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32( struct ggml_context * ctx, struct ggml_tensor * a, ggml_unary_op_f32_t fun), "use ggml_map_custom1_inplace instead"); """ ... def ggml_mean(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ mean along rows GGML_API struct ggml_tensor * ggml_mean( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_metal_add_buffer(ctx: ffi.CData, name: ffi.CData, data: ffi.CData, size: int, max_size: int) -> bool: """ creates a mapping between a host memory buffer and a device memory buffer - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute - the mapping is used during computation to determine the arguments of the compute kernels - you don't need to keep the host memory buffer allocated as it is never accessed by Metal - max_size specifies the maximum size of a tensor and is used to create shared views such that it is guaranteed that the tensor will fit in at least one of the views bool ggml_metal_add_buffer( struct ggml_metal_context * ctx, const char * name, void * data, size_t size, size_t max_size); """ ... def ggml_metal_free(ctx: ffi.CData) -> None: """void ggml_metal_free(struct ggml_metal_context * ctx);""" ... def ggml_metal_get_concur_list(ctx: ffi.CData) -> ffi.CData: """ output the concur_list for ggml_alloc int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx); """ ... def ggml_metal_get_tensor(ctx: ffi.CData, t: ffi.CData) -> None: """ get data from the device into host memory void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t); """ ... def ggml_metal_graph_compute(ctx: ffi.CData, gf: ffi.CData) -> None: """ same as ggml_graph_compute but uses Metal creates gf->n_threads command buffers in parallel void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf); """ ... def ggml_metal_graph_find_concurrency(ctx: ffi.CData, gf: ffi.CData, check_mem: bool) -> None: """ try to find operations that can be run concurrently in the graph you should run it again if the topology of your graph changes void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf, bool check_mem); """ ... def ggml_metal_host_free(data: ffi.CData) -> None: """void ggml_metal_host_free (void * data);""" ... def ggml_metal_host_malloc(n: int) -> ffi.CData: """void * ggml_metal_host_malloc(size_t n);""" ... def ggml_metal_if_optimized(ctx: ffi.CData) -> int: """ if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized int ggml_metal_if_optimized(struct ggml_metal_context * ctx); """ ... def ggml_metal_init(n_cb: int) -> ffi.CData: """ number of command buffers to use struct ggml_metal_context * ggml_metal_init(int n_cb); """ ... def ggml_metal_set_n_cb(ctx: ffi.CData, n_cb: int) -> None: """ set the number of command buffers to use void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb); """ ... def ggml_metal_set_tensor(ctx: ffi.CData, t: ffi.CData) -> None: """ set data from host memory into the device void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t); """ ... def ggml_mpi_backend_free() -> None: """void ggml_mpi_backend_free(void);""" ... def ggml_mpi_backend_init() -> None: """void ggml_mpi_backend_init(void);""" ... def ggml_mpi_eval_init(ctx_mpi: ffi.CData, n_tokens: ffi.CData, n_past: ffi.CData, n_threads: ffi.CData) -> None: """ void ggml_mpi_eval_init( struct ggml_mpi_context * ctx_mpi, int * n_tokens, int * n_past, int * n_threads); """ ... def ggml_mpi_free(ctx: ffi.CData) -> None: """void ggml_mpi_free(struct ggml_mpi_context * ctx);""" ... def ggml_mpi_graph_compute_post(ctx_mpi: ffi.CData, gf: ffi.CData, n_layers: int) -> None: """ void ggml_mpi_graph_compute_post( struct ggml_mpi_context * ctx_mpi, struct ggml_cgraph * gf, int n_layers); """ ... def ggml_mpi_graph_compute_pre(ctx_mpi: ffi.CData, gf: ffi.CData, n_layers: int) -> None: """ void ggml_mpi_graph_compute_pre( struct ggml_mpi_context * ctx_mpi, struct ggml_cgraph * gf, int n_layers); """ ... def ggml_mpi_init() -> ffi.CData: """struct ggml_mpi_context * ggml_mpi_init(void);""" ... def ggml_mpi_rank(ctx: ffi.CData) -> int: """int ggml_mpi_rank(struct ggml_mpi_context * ctx);""" ... def ggml_mul(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_mul( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_mul_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_mul_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_mul_mat(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ A: n columns, m rows B: n columns, p rows (i.e. we transpose it internally) result is m columns, p rows GGML_API struct ggml_tensor * ggml_mul_mat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_nbytes(tensor: ffi.CData) -> int: """ GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);""" ... def ggml_nbytes_pad(tensor: ffi.CData) -> int: """ GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN""" ... def ggml_nbytes_split(tensor: ffi.CData, nrows_split: int) -> int: """ GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);""" ... def ggml_neg(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_neg( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_neg_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_neg_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_nelements(tensor: ffi.CData) -> int: """ GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);""" ... def ggml_new_f32(ctx: ffi.CData, value: float) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);""" ... def ggml_new_graph(ctx: ffi.CData) -> ffi.CData: """ graph allocation in a context GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); """ ... def ggml_new_i32(ctx: ffi.CData, value: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);""" ... def ggml_new_tensor(ctx: ffi.CData, type: int, n_dims: int, ne: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_tensor( struct ggml_context * ctx, enum ggml_type type, int n_dims, const int64_t *ne); """ ... def ggml_new_tensor_1d(ctx: ffi.CData, type: int, ne0: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_tensor_1d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0); """ ... def ggml_new_tensor_2d(ctx: ffi.CData, type: int, ne0: int, ne1: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_tensor_2d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1); """ ... def ggml_new_tensor_3d(ctx: ffi.CData, type: int, ne0: int, ne1: int, ne2: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_tensor_3d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2); """ ... def ggml_new_tensor_4d(ctx: ffi.CData, type: int, ne0: int, ne1: int, ne2: int, ne3: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_new_tensor_4d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); """ ... def ggml_norm(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ normalize along rows TODO: eps is hardcoded to 1e-5 for now GGML_API struct ggml_tensor * ggml_norm( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_norm_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_nrows(tensor: ffi.CData) -> int: """ GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);""" ... def ggml_numa_init() -> None: """ GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems""" ... def ggml_op_name(op: int) -> ffi.CData: """ GGML_API const char * ggml_op_name (enum ggml_op op);""" ... def ggml_op_symbol(op: int) -> ffi.CData: """ GGML_API const char * ggml_op_symbol(enum ggml_op op);""" ... def ggml_opt(ctx: ffi.CData, params: ffi.CData, f: ffi.CData) -> int: """ optimize the function defined by the tensor f GGML_API enum ggml_opt_result ggml_opt( struct ggml_context * ctx, struct ggml_opt_params params, struct ggml_tensor * f); """ ... def ggml_opt_default_params(type: int) -> ffi.CData: """ GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);""" ... def ggml_opt_init(ctx: ffi.CData, opt: ffi.CData, params: ffi.CData, nx: int) -> None: """ initialize optimizer context GGML_API void ggml_opt_init( struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_opt_params params, int64_t nx); """ ... def ggml_opt_resume(ctx: ffi.CData, opt: ffi.CData, f: ffi.CData) -> int: """ continue optimizing the function defined by the tensor f GGML_API enum ggml_opt_result ggml_opt_resume( struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_tensor * f); """ ... def ggml_opt_resume_g(ctx: ffi.CData, opt: ffi.CData, f: ffi.CData, gf: ffi.CData, gb: ffi.CData) -> int: """ continue optimizing the function defined by the tensor f GGML_API enum ggml_opt_result ggml_opt_resume_g( struct ggml_context * ctx, struct ggml_opt_context * opt, struct ggml_tensor * f, struct ggml_cgraph * gf, struct ggml_cgraph * gb); """ ... def ggml_out_prod(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ A: m columns, n rows, B: p columns, n rows, result is m columns, p rows GGML_API struct ggml_tensor * ggml_out_prod( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_permute(ctx: ffi.CData, a: ffi.CData, axis0: int, axis1: int, axis2: int, axis3: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_permute( struct ggml_context * ctx, struct ggml_tensor * a, int axis0, int axis1, int axis2, int axis3); """ ... def ggml_pool_1d(ctx: ffi.CData, a: ffi.CData, op: int, k0: int, s0: int, p0: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_pool_1d( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, // kernel size int s0, // stride int p0); // padding """ ... def ggml_pool_2d(ctx: ffi.CData, a: ffi.CData, op: int, k0: int, k1: int, s0: int, s1: int, p0: int, p1: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_pool_2d( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, int k1, int s0, int s1, int p0, int p1); """ ... def ggml_print_object(obj: ffi.CData) -> None: """ GGML_API void ggml_print_object (const struct ggml_object * obj);""" ... def ggml_print_objects(ctx: ffi.CData) -> None: """ GGML_API void ggml_print_objects(const struct ggml_context * ctx);""" ... def ggml_quantize_chunk(type: int, src: ffi.CData, dst: ffi.CData, start: int, n: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);""" ... def ggml_quantize_q2_K(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ Quantization with histogram collection size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist); """ ... def ggml_quantize_q3_K(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q4_0(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q4_1(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q4_K(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q5_0(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q5_1(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q5_K(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q6_K(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_quantize_q8_0(src: ffi.CData, dst: ffi.CData, n: int, k: int, hist: ffi.CData) -> int: """ GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);""" ... def ggml_relu(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_relu( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_relu_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_relu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_repeat(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ if a is the same shape as b, and a is not parameter, return a otherwise, return a new tensor: repeat(a) to fit in b GGML_API struct ggml_tensor * ggml_repeat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_repeat_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_repeat_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_reshape(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ return view(a), b specifies the new shape TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_reshape_1d(ctx: ffi.CData, a: ffi.CData, ne0: int) -> ffi.CData: """ return view(a) TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape_1d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0); """ ... def ggml_reshape_2d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_reshape_2d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1); """ ... def ggml_reshape_3d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, ne2: int) -> ffi.CData: """ return view(a) TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape_3d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2); """ ... def ggml_reshape_4d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, ne2: int, ne3: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_reshape_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); """ ... def ggml_rms_norm(ctx: ffi.CData, a: ffi.CData, eps: float) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_rms_norm( struct ggml_context * ctx, struct ggml_tensor * a, float eps); """ ... def ggml_rms_norm_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ a - x b - dy TODO: update with configurable eps GGML_API struct ggml_tensor * ggml_rms_norm_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_rms_norm_inplace(ctx: ffi.CData, a: ffi.CData, eps: float) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_rms_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float eps); """ ... def ggml_rope(ctx: ffi.CData, a: ffi.CData, n_past: int, n_dims: int, mode: int, n_ctx: int) -> ffi.CData: """ rotary position embedding if mode & 1 == 1, skip n_past elements if mode & 2 == 1, GPT-NeoX style if mode & 4 == 1, ChatGLM style TODO: avoid creating a new tensor every time GGML_API struct ggml_tensor * ggml_rope( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode, int n_ctx); """ ... def ggml_rope_back(ctx: ffi.CData, a: ffi.CData, n_past: int, n_dims: int, mode: int, n_ctx: int) -> ffi.CData: """ rotary position embedding backward, i.e compute dx from dy a - dy GGML_API struct ggml_tensor * ggml_rope_back( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode, int n_ctx); """ ... def ggml_rope_custom(ctx: ffi.CData, a: ffi.CData, n_past: int, n_dims: int, mode: int, n_ctx: int, freq_base: float, freq_scale: float) -> ffi.CData: """ custom RoPE GGML_API struct ggml_tensor * ggml_rope_custom( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode, int n_ctx, float freq_base, float freq_scale); """ ... def ggml_rope_custom_inplace(ctx: ffi.CData, a: ffi.CData, n_past: int, n_dims: int, mode: int, n_ctx: int, freq_base: float, freq_scale: float) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_custom_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode, int n_ctx, float freq_base, float freq_scale); """ ... def ggml_rope_inplace(ctx: ffi.CData, a: ffi.CData, n_past: int, n_dims: int, mode: int, n_ctx: int) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode, int n_ctx); """ ... def ggml_scale(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_scale( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_scale_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_scale_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_set(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, nb2: int, nb3: int, offset: int) -> ffi.CData: """ b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); """ ... def ggml_set_1d(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_1d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t offset); """ ... def ggml_set_1d_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_1d_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t offset); """ ... def ggml_set_2d(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, offset: int) -> ffi.CData: """ b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set_2d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t offset); """ ... def ggml_set_2d_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, offset: int) -> ffi.CData: """ b -> view(a,offset,nb1,nb2,3), return view(a) GGML_API struct ggml_tensor * ggml_set_2d_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t offset); """ ... def ggml_set_f32(tensor: ffi.CData, value: float) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);""" ... def ggml_set_f32_1d(tensor: ffi.CData, i: int, value: float) -> None: """ GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);""" ... def ggml_set_i32(tensor: ffi.CData, value: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);""" ... def ggml_set_i32_1d(tensor: ffi.CData, i: int, value: int) -> None: """ GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);""" ... def ggml_set_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, nb1: int, nb2: int, nb3: int, offset: int) -> ffi.CData: """ b -> view(a,offset,nb1,nb2,3), return view(a) GGML_API struct ggml_tensor * ggml_set_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); """ ... def ggml_set_name(tensor: ffi.CData, name: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);""" ... def ggml_set_no_alloc(ctx: ffi.CData, no_alloc: bool) -> None: """ GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);""" ... def ggml_set_param(ctx: ffi.CData, tensor: ffi.CData) -> None: """ GGML_API void ggml_set_param( struct ggml_context * ctx, struct ggml_tensor * tensor); """ ... def ggml_set_scratch(ctx: ffi.CData, scratch: ffi.CData) -> int: """ GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);""" ... def ggml_set_zero(tensor: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);""" ... def ggml_sgn(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sgn( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sgn_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sgn_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_silu(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_silu( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_silu_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ a - x b - dy GGML_API struct ggml_tensor * ggml_silu_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_silu_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_silu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_soft_max(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_soft_max( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_soft_max_back(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_soft_max_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_soft_max_back_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_soft_max_back_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_soft_max_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ in-place, returns view(a) GGML_API struct ggml_tensor * ggml_soft_max_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sqr(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sqr( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sqr_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sqr_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sqrt(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sqrt( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sqrt_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sqrt_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_step(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_step( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_step_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_step_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sub(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sub( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_sub_inplace(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_sub_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); """ ... def ggml_sum(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ return scalar GGML_API struct ggml_tensor * ggml_sum( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_sum_rows(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d] GGML_API struct ggml_tensor * ggml_sum_rows( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_tanh(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_tanh( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_tanh_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_tanh_inplace( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_tensor_overhead() -> int: """ use this to compute the memory overhead of a tensor GGML_API size_t ggml_tensor_overhead(void); """ ... def ggml_time_init() -> None: """ GGML_API void ggml_time_init(void); // call this once at the beginning of the program""" ... def ggml_time_ms() -> int: """ GGML_API int64_t ggml_time_ms(void);""" ... def ggml_time_us() -> int: """ GGML_API int64_t ggml_time_us(void);""" ... def ggml_transpose(ctx: ffi.CData, a: ffi.CData) -> ffi.CData: """ alias for ggml_permute(ctx, a, 1, 0, 2, 3) GGML_API struct ggml_tensor * ggml_transpose( struct ggml_context * ctx, struct ggml_tensor * a); """ ... def ggml_type_name(type: int) -> ffi.CData: """ GGML_API const char * ggml_type_name(enum ggml_type type);""" ... def ggml_type_size(type: int) -> int: """ GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block""" ... def ggml_type_sizef(type: int) -> float: """ GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float""" ... def ggml_unary(ctx: ffi.CData, a: ffi.CData, op: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_unary( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op); """ ... def ggml_unary_inplace(ctx: ffi.CData, a: ffi.CData, op: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_unary_inplace( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op); """ ... def ggml_used_mem(ctx: ffi.CData) -> int: """ GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);""" ... def ggml_vec_dot_q2_K_q8_K(n: int, s: ffi.CData, vx: ffi.CData, vy: ffi.CData) -> None: """ Dot product void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); """ ... def ggml_vec_dot_q3_K_q8_K(n: int, s: ffi.CData, vx: ffi.CData, vy: ffi.CData) -> None: """void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);""" ... def ggml_vec_dot_q4_K_q8_K(n: int, s: ffi.CData, vx: ffi.CData, vy: ffi.CData) -> None: """void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);""" ... def ggml_vec_dot_q5_K_q8_K(n: int, s: ffi.CData, vx: ffi.CData, vy: ffi.CData) -> None: """void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);""" ... def ggml_vec_dot_q6_K_q8_K(n: int, s: ffi.CData, vx: ffi.CData, vy: ffi.CData) -> None: """void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);""" ... def ggml_view_1d(ctx: ffi.CData, a: ffi.CData, ne0: int, offset: int) -> ffi.CData: """ offset in bytes GGML_API struct ggml_tensor * ggml_view_1d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, size_t offset); """ ... def ggml_view_2d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, nb1: int, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_view_2d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, size_t nb1, // row stride in bytes size_t offset); """ ... def ggml_view_3d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, ne2: int, nb1: int, nb2: int, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_view_3d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, size_t nb1, // row stride in bytes size_t nb2, // slice stride in bytes size_t offset); """ ... def ggml_view_4d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, ne2: int, ne3: int, nb1: int, nb2: int, nb3: int, offset: int) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_view_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, size_t nb1, // row stride in bytes size_t nb2, // slice stride in bytes size_t nb3, size_t offset); """ ... def ggml_view_tensor(ctx: ffi.CData, src: ffi.CData) -> ffi.CData: """ GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);""" ... def ggml_win_part(ctx: ffi.CData, a: ffi.CData, w: int) -> ffi.CData: """ partition into non-overlapping windows with padding if needed example: a: 768 64 64 1 w: 14 res: 768 14 14 25 used in sam GGML_API struct ggml_tensor * ggml_win_part( struct ggml_context * ctx, struct ggml_tensor * a, int w); """ ... def ggml_win_unpart(ctx: ffi.CData, a: ffi.CData, w0: int, h0: int, w: int) -> ffi.CData: """ reverse of ggml_win_part used in sam GGML_API struct ggml_tensor * ggml_win_unpart( struct ggml_context * ctx, struct ggml_tensor * a, int w0, int h0, int w); """ ... def gguf_add_tensor(ctx: ffi.CData, tensor: ffi.CData) -> None: """ manage tensor info GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); """ ... def gguf_find_key(ctx: ffi.CData, key: ffi.CData) -> int: """ GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key);""" ... def gguf_find_tensor(ctx: ffi.CData, name: ffi.CData) -> int: """ GGML_API int gguf_find_tensor (struct gguf_context * ctx, const char * name);""" ... def gguf_free(ctx: ffi.CData) -> None: """ GGML_API void gguf_free(struct gguf_context * ctx);""" ... def gguf_get_alignment(ctx: ffi.CData) -> int: """ GGML_API size_t gguf_get_alignment (struct gguf_context * ctx);""" ... def gguf_get_arr_data(ctx: ffi.CData, i: int) -> ffi.CData: """ GGML_API const void * gguf_get_arr_data(struct gguf_context * ctx, int i);""" ... def gguf_get_arr_n(ctx: ffi.CData, i: int) -> int: """ GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i);""" ... def gguf_get_arr_str(ctx: ffi.CData, key_id: int, i: int) -> ffi.CData: """ GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i);""" ... def gguf_get_arr_type(ctx: ffi.CData, i: int) -> int: """ GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i);""" ... def gguf_get_data(ctx: ffi.CData) -> ffi.CData: """ GGML_API void * gguf_get_data (struct gguf_context * ctx);""" ... def gguf_get_data_offset(ctx: ffi.CData) -> int: """ GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx);""" ... def gguf_get_key(ctx: ffi.CData, i: int) -> ffi.CData: """ GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i);""" ... def gguf_get_kv_type(ctx: ffi.CData, i: int) -> int: """ GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i);""" ... def gguf_get_meta_data(ctx: ffi.CData, data: ffi.CData) -> None: """ GGML_API void gguf_get_meta_data(struct gguf_context * ctx, void * data);""" ... def gguf_get_meta_size(ctx: ffi.CData) -> int: """ get the size in bytes of the meta data (header, kv pairs, tensor info) including padding GGML_API size_t gguf_get_meta_size(struct gguf_context * ctx); """ ... def gguf_get_n_kv(ctx: ffi.CData) -> int: """ GGML_API int gguf_get_n_kv(struct gguf_context * ctx);""" ... def gguf_get_n_tensors(ctx: ffi.CData) -> int: """ GGML_API int gguf_get_n_tensors (struct gguf_context * ctx);""" ... def gguf_get_tensor_name(ctx: ffi.CData, i: int) -> ffi.CData: """ GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i);""" ... def gguf_get_tensor_offset(ctx: ffi.CData, i: int) -> int: """ GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i);""" ... def gguf_get_val_bool(ctx: ffi.CData, i: int) -> bool: """ GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i);""" ... def gguf_get_val_f32(ctx: ffi.CData, i: int) -> float: """ GGML_API float gguf_get_val_f32 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_i16(ctx: ffi.CData, i: int) -> int: """ GGML_API int16_t gguf_get_val_i16 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_i32(ctx: ffi.CData, i: int) -> int: """ GGML_API int32_t gguf_get_val_i32 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_i8(ctx: ffi.CData, i: int) -> int: """ GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_str(ctx: ffi.CData, i: int) -> ffi.CData: """ GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i);""" ... def gguf_get_val_u16(ctx: ffi.CData, i: int) -> int: """ GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_u32(ctx: ffi.CData, i: int) -> int: """ GGML_API uint32_t gguf_get_val_u32 (struct gguf_context * ctx, int i);""" ... def gguf_get_val_u8(ctx: ffi.CData, i: int) -> int: """ results are undefined if the wrong type is used for the key GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); """ ... def gguf_get_version(ctx: ffi.CData) -> int: """ GGML_API int gguf_get_version (struct gguf_context * ctx);""" ... def gguf_init_empty() -> ffi.CData: """ GGML_API struct gguf_context * gguf_init_empty(void);""" ... def gguf_init_from_file(fname: ffi.CData, params: ffi.CData) -> ffi.CData: """ GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);""" ... def gguf_set_arr_data(ctx: ffi.CData, key: ffi.CData, type: int, data: ffi.CData, n: int) -> None: """ GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);""" ... def gguf_set_arr_str(ctx: ffi.CData, key: ffi.CData, data: ffi.CData, n: int) -> None: """ GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);""" ... def gguf_set_kv(ctx: ffi.CData, src: ffi.CData) -> None: """ set or add KV pairs from another context GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); """ ... def gguf_set_tensor_data(ctx: ffi.CData, name: ffi.CData, data: ffi.CData, size: int) -> None: """ GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);""" ... def gguf_set_tensor_type(ctx: ffi.CData, name: ffi.CData, type: int) -> None: """ GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);""" ... def gguf_set_val_bool(ctx: ffi.CData, key: ffi.CData, val: bool) -> None: """ GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);""" ... def gguf_set_val_f32(ctx: ffi.CData, key: ffi.CData, val: float) -> None: """ GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);""" ... def gguf_set_val_i16(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);""" ... def gguf_set_val_i32(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);""" ... def gguf_set_val_i8(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);""" ... def gguf_set_val_str(ctx: ffi.CData, key: ffi.CData, val: ffi.CData) -> None: """ GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);""" ... def gguf_set_val_u16(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);""" ... def gguf_set_val_u32(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);""" ... def gguf_set_val_u8(ctx: ffi.CData, key: ffi.CData, val: int) -> None: """ overrides existing values or adds a new one GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); """ ... def gguf_type_name(type: int) -> ffi.CData: """ GGML_API const char * gguf_type_name(enum gguf_type type);""" ... def gguf_write_to_file(ctx: ffi.CData, fname: ffi.CData, only_meta: bool) -> None: """ write the entire context to a binary file GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta); """ ... def quantize_row_q2_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q2_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """ Quantization void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k); """ ... def quantize_row_q3_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q3_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);""" ... def quantize_row_q4_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q4_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);""" ... def quantize_row_q5_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q5_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);""" ... def quantize_row_q6_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q6_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);""" ... def quantize_row_q8_K(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);""" ... def quantize_row_q8_K_reference(x: ffi.CData, y: ffi.CData, k: int) -> None: """void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);""" ... ggml-org-ggml-3678254/examples/python/ggml/cffi.py000066400000000000000000001444621512524704700217450ustar00rootroot00000000000000# auto-generated file import _cffi_backend ffi = _cffi_backend.FFI('ggml.cffi', _version = 0x2601, _types = b'\x00\x00\xB6\x0D\x00\x00\x09\x0B\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x2F\x03\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x31\x03\x00\x04\x3D\x03\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x32\x03\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x34\x03\x00\x03\xFE\x03\x00\x04\x53\x03\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x3D\x03\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x04\x3E\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x00\xB6\x0D\x00\x00\x00\x0F\x00\x02\xD0\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x04\x0B\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x0B\x0B\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x0F\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x16\x0D\x00\x00\x0B\x11\x00\x04\x38\x03\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x16\x0D\x00\x00\x0B\x11\x00\x00\x44\x11\x00\x00\x08\x11\x00\x04\x30\x03\x00\x00\x4B\x11\x00\x00\x00\x0F\x00\x04\x16\x0D\x00\x00\x0B\x11\x00\x00\x20\x09\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x0B\x00\x00\x00\x0F\x00\x01\x14\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\x34\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x02\x7E\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\xF4\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\xF4\x0D\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\xF4\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\xF4\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x04\x18\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x02\xE9\x0D\x00\x00\x0E\x11\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x4B\x11\x00\x04\x33\x03\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x0E\x11\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x04\x35\x03\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x21\x11\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x22\x0D\x00\x00\x00\x0F\x00\x00\xDB\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\xDB\x0D\x00\x00\x00\x0F\x00\x03\xB0\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x03\xB5\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x04\x0D\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x04\x0D\x00\x00\x10\x11\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x4B\x0D\x00\x00\x0B\x11\x00\x00\x00\x0F\x00\x00\x4B\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x30\x0D\x00\x00\x0F\x11\x00\x00\x0B\x03\x00\x00\xB0\x11\x00\x00\x00\x0F\x00\x04\x30\x0D\x00\x00\x0B\x11\x00\x00\x4B\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x04\x30\x0D\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x0B\x0D\x00\x00\x1B\x09\x00\x00\x00\x0F\x00\x04\x33\x0D\x00\x00\x4B\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x0E\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x7F\x0D\x00\x00\x00\x0F\x00\x00\x50\x0D\x00\x00\x07\x0B\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x4B\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x01\x11\x00\x00\x07\x01\x00\x00\xDB\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x01\x11\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x01\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x01\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x01\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x05\x0B\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x01\x01\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0A\x0B\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0D\x01\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x0D\x01\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0B\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x0B\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x03\x5C\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x03\x62\x03\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x02\xD8\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x03\x4F\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x08\x11\x00\x03\x54\x03\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x02\xD3\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x03\x44\x03\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x03\x48\x03\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x0B\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x08\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x08\x11\x00\x00\x0F\x11\x00\x00\x01\x0F\x00\x00\x08\x0D\x00\x00\x08\x11\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x08\x0D\x00\x00\x08\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x21\x0D\x00\x00\x0F\x11\x00\x00\x24\x09\x00\x00\x00\x0F\x00\x00\x21\x0D\x00\x00\x00\x0F\x00\x03\xBA\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x03\xBF\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x01\x11\x00\x00\xF4\x03\x00\x00\x10\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\xDB\x03\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x02\x35\x11\x00\x00\x10\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x02\x39\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x04\x11\x00\x00\x4B\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x0B\x11\x00\x00\x21\x09\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x04\x32\x03\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x21\x11\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x11\x0D\x00\x00\x00\x0F\x00\x00\x6C\x0D\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x6C\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x10\x0D\x00\x02\x4B\x11\x00\x00\x00\x0F\x00\x00\x10\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x00\x10\x0D\x00\x00\x21\x11\x00\x00\x00\x0F\x00\x00\x10\x0D\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x02\xE1\x0D\x00\x00\x21\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xF8\x03\x00\x00\xF4\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xF9\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xFA\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xFB\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xFC\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x03\xFD\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0F\x11\x00\x00\x0F\x11\x00\x00\x07\x01\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xF8\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xF9\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xFA\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xFB\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xFC\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x03\xFD\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x00\x6C\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x35\x11\x00\x00\x10\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x07\x01\x00\x03\xFE\x03\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x07\x01\x00\x02\x7E\x11\x00\x02\x35\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x07\x01\x00\x02\x7E\x11\x00\x02\x35\x11\x00\x02\x35\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x07\x01\x00\x02\x7E\x11\x00\x04\x53\x03\x00\x02\xE1\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x04\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x04\x11\x00\x00\x22\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x04\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x4B\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x4B\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x04\x30\x03\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\xF8\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\xF8\x11\x00\x02\xF8\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0B\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0B\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0B\x11\x00\x00\x4B\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0B\x11\x00\x00\x44\x11\x00\x00\x50\x11\x00\x00\x0B\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0B\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\x4B\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0E\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0E\x11\x00\x00\x4B\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0E\x11\x00\x00\x4B\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0E\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x7F\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x7F\x11\x00\x02\xE9\x11\x00\x02\xE9\x11\x00\x02\xE9\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x7F\x11\x00\x00\x4B\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x04\x37\x03\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x08\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x08\x11\x00\x00\x10\x11\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x01\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x0F\x03\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x0F\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x34\x11\x00\x02\xE1\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x05\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x03\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x04\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x0F\x11\x00\x02\xE1\x11\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x15\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x21\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x21\x11\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x0A\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x6C\x03\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x10\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x10\x11\x00\x00\x08\x11\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x02\xE1\x11\x00\x02\x7E\x11\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x04\x53\x0D\x00\x00\x00\x0F\x00\x00\x24\x03\x00\x00\x0D\x09\x00\x00\x0E\x09\x00\x00\x0F\x09\x00\x00\x10\x09\x00\x00\x11\x09\x00\x00\x12\x09\x00\x00\x13\x09\x00\x00\x14\x09\x00\x00\x04\x09\x00\x00\x05\x09\x00\x00\x06\x09\x00\x00\x07\x09\x00\x00\x08\x09\x00\x00\x09\x09\x00\x00\x0A\x09\x00\x00\x02\x01\x00\x03\xFE\x05\x00\x00\x00\x80\x00\x03\xFE\x05\x00\x00\x00\x10\x00\x03\xFE\x05\x00\x00\x00\xC0\x00\x03\xFE\x05\x00\x00\x00\x25\x00\x03\xFE\x05\x00\x00\x00\x28\x00\x03\xFE\x05\x00\x00\x00\x04\x00\x03\xFE\x05\x00\x00\x00\x38\x00\x03\xFE\x05\x00\x00\x00\x40\x00\x03\xFE\x05\x00\x00\x1F\xF0\x00\x03\xFE\x05\x00\x00\x00\x08\x00\x00\x00\x0B\x00\x00\x02\x0B\x00\x00\x03\x0B\x00\x00\x06\x0B\x00\x00\x08\x0B\x00\x00\x0B\x09\x00\x00\x22\x05\x00\x00\x10\x00\x00\x00\x22\x05\x00\x00\x00\x08\x00\x00\x0F\x01\x00\x00\xDB\x05\x00\x00\x00\x04\x00\x00\x09\x01\x00\x03\xB0\x05\x00\x00\x00\x10\x00\x03\xB5\x05\x00\x00\x00\x10\x00\x03\xB5\x05\x00\x00\x01\x00\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x09\x00\x00\x03\x09\x00\x04\x2C\x03\x00\x00\x0C\x09\x00\x04\x2E\x03\x00\x00\x15\x09\x00\x00\x16\x09\x00\x00\x17\x09\x00\x00\x18\x09\x00\x00\x19\x09\x00\x00\x1A\x09\x00\x00\x1C\x09\x00\x00\x1D\x09\x00\x04\x37\x03\x00\x00\x1E\x09\x00\x00\x1F\x09\x00\x00\x08\x05\x00\x00\x10\x00\x00\x00\x08\x05\x00\x00\x00\x06\x00\x00\x22\x09\x00\x00\x23\x09\x00\x03\xBA\x03\x00\x03\xBA\x05\x00\x00\x00\x80\x00\x03\xBA\x05\x00\x00\x00\x0C\x00\x03\xBA\x05\x00\x00\x00\x10\x00\x03\xBA\x05\x00\x00\x00\x20\x00\x03\xBA\x05\x00\x00\x00\x40\x00\x00\x0C\x01\x00\x00\x11\x05\x00\x00\x00\x04\x00\x00\x10\x05\x00\x00\x20\x51\x00\x02\xC6\x03\x00\x02\xDE\x03\x00\x03\xE0\x03\x00\x03\xE7\x03\x00\x00\x00\x01', _globals = (b'\xFF\xFF\xFF\x0BGGML_BACKEND_CPU',0,b'\xFF\xFF\xFF\x0BGGML_BACKEND_GPU',10,b'\xFF\xFF\xFF\x0BGGML_BACKEND_GPU_SPLIT',20,b'\xFF\xFF\xFF\x0BGGML_FTYPE_ALL_F32',0,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_F16',1,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q2_K',10,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q3_K',11,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q4_0',2,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q4_1',3,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q4_1_SOME_F16',4,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q4_K',12,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q5_0',8,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q5_1',9,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q5_K',13,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q6_K',14,b'\xFF\xFF\xFF\x0BGGML_FTYPE_MOSTLY_Q8_0',7,b'\xFF\xFF\xFF\x0BGGML_FTYPE_UNKNOWN',-1,b'\xFF\xFF\xFF\x1FGGML_GRAPH_SIZE',164520,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_BACKTRACKING_ARMIJO',0,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE',2,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_BACKTRACKING_WOLFE',1,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_DEFAULT',1,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_FAIL',-128,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_INVALID_PARAMETERS',-124,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_MAXIMUM_ITERATIONS',-125,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_MAXIMUM_STEP',-126,b'\xFF\xFF\xFF\x0BGGML_LINESEARCH_MINIMUM_STEP',-127,b'\xFF\xFF\xFF\x0BGGML_OBJECT_GRAPH',1,b'\xFF\xFF\xFF\x1FGGML_OBJECT_SIZE',32,b'\xFF\xFF\xFF\x0BGGML_OBJECT_TENSOR',0,b'\xFF\xFF\xFF\x0BGGML_OBJECT_WORK_BUFFER',2,b'\xFF\xFF\xFF\x0BGGML_OPT_ADAM',0,b'\xFF\xFF\xFF\x0BGGML_OPT_DID_NOT_CONVERGE',1,b'\xFF\xFF\xFF\x0BGGML_OPT_FAIL',4,b'\xFF\xFF\xFF\x0BGGML_OPT_INVALID_WOLFE',3,b'\xFF\xFF\xFF\x0BGGML_OPT_LBFGS',1,b'\xFF\xFF\xFF\x0BGGML_OPT_NO_CONTEXT',2,b'\xFF\xFF\xFF\x0BGGML_OPT_OK',0,b'\xFF\xFF\xFF\x0BGGML_OP_ACC',4,b'\xFF\xFF\xFF\x0BGGML_OP_ADD',2,b'\xFF\xFF\xFF\x0BGGML_OP_ADD1',3,b'\xFF\xFF\xFF\x0BGGML_OP_ALIBI',40,b'\xFF\xFF\xFF\x0BGGML_OP_ARGMAX',14,b'\xFF\xFF\xFF\x0BGGML_OP_CLAMP',41,b'\xFF\xFF\xFF\x0BGGML_OP_CONT',26,b'\xFF\xFF\xFF\x0BGGML_OP_CONV_1D',42,b'\xFF\xFF\xFF\x0BGGML_OP_CONV_2D',43,b'\xFF\xFF\xFF\x0BGGML_OP_COUNT',62,b'\xFF\xFF\xFF\x0BGGML_OP_CPY',25,b'\xFF\xFF\xFF\x0BGGML_OP_CROSS_ENTROPY_LOSS',60,b'\xFF\xFF\xFF\x0BGGML_OP_CROSS_ENTROPY_LOSS_BACK',61,b'\xFF\xFF\xFF\x0BGGML_OP_DIAG',33,b'\xFF\xFF\xFF\x0BGGML_OP_DIAG_MASK_INF',34,b'\xFF\xFF\xFF\x0BGGML_OP_DIAG_MASK_ZERO',35,b'\xFF\xFF\xFF\x0BGGML_OP_DIV',7,b'\xFF\xFF\xFF\x0BGGML_OP_DUP',1,b'\xFF\xFF\xFF\x0BGGML_OP_FLASH_ATTN',46,b'\xFF\xFF\xFF\x0BGGML_OP_FLASH_ATTN_BACK',48,b'\xFF\xFF\xFF\x0BGGML_OP_FLASH_FF',47,b'\xFF\xFF\xFF\x0BGGML_OP_GET_ROWS',31,b'\xFF\xFF\xFF\x0BGGML_OP_GET_ROWS_BACK',32,b'\xFF\xFF\xFF\x0BGGML_OP_LOG',10,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_BINARY',53,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM1',57,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM1_F32',54,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM2',58,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM2_F32',55,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM3',59,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_CUSTOM3_F32',56,b'\xFF\xFF\xFF\x0BGGML_OP_MAP_UNARY',52,b'\xFF\xFF\xFF\x0BGGML_OP_MEAN',13,b'\xFF\xFF\xFF\x0BGGML_OP_MUL',6,b'\xFF\xFF\xFF\x0BGGML_OP_MUL_MAT',21,b'\xFF\xFF\xFF\x0BGGML_OP_NONE',0,b'\xFF\xFF\xFF\x0BGGML_OP_NORM',18,b'\xFF\xFF\xFF\x0BGGML_OP_OUT_PROD',22,b'\xFF\xFF\xFF\x0BGGML_OP_PERMUTE',29,b'\xFF\xFF\xFF\x0BGGML_OP_POOL_1D',44,b'\xFF\xFF\xFF\x0BGGML_OP_POOL_2D',45,b'\xFF\xFF\xFF\x0BGGML_OP_POOL_AVG',1,b'\xFF\xFF\xFF\x0BGGML_OP_POOL_COUNT',2,b'\xFF\xFF\xFF\x0BGGML_OP_POOL_MAX',0,b'\xFF\xFF\xFF\x0BGGML_OP_REPEAT',15,b'\xFF\xFF\xFF\x0BGGML_OP_REPEAT_BACK',16,b'\xFF\xFF\xFF\x0BGGML_OP_RESHAPE',27,b'\xFF\xFF\xFF\x0BGGML_OP_RMS_NORM',19,b'\xFF\xFF\xFF\x0BGGML_OP_RMS_NORM_BACK',20,b'\xFF\xFF\xFF\x0BGGML_OP_ROPE',38,b'\xFF\xFF\xFF\x0BGGML_OP_ROPE_BACK',39,b'\xFF\xFF\xFF\x0BGGML_OP_SCALE',23,b'\xFF\xFF\xFF\x0BGGML_OP_SET',24,b'\xFF\xFF\xFF\x0BGGML_OP_SILU_BACK',17,b'\xFF\xFF\xFF\x0BGGML_OP_SOFT_MAX',36,b'\xFF\xFF\xFF\x0BGGML_OP_SOFT_MAX_BACK',37,b'\xFF\xFF\xFF\x0BGGML_OP_SQR',8,b'\xFF\xFF\xFF\x0BGGML_OP_SQRT',9,b'\xFF\xFF\xFF\x0BGGML_OP_SUB',5,b'\xFF\xFF\xFF\x0BGGML_OP_SUM',11,b'\xFF\xFF\xFF\x0BGGML_OP_SUM_ROWS',12,b'\xFF\xFF\xFF\x0BGGML_OP_TRANSPOSE',30,b'\xFF\xFF\xFF\x0BGGML_OP_UNARY',51,b'\xFF\xFF\xFF\x0BGGML_OP_VIEW',28,b'\xFF\xFF\xFF\x0BGGML_OP_WIN_PART',49,b'\xFF\xFF\xFF\x0BGGML_OP_WIN_UNPART',50,b'\xFF\xFF\xFF\x0BGGML_TASK_COMPUTE',1,b'\xFF\xFF\xFF\x0BGGML_TASK_FINALIZE',2,b'\xFF\xFF\xFF\x0BGGML_TASK_INIT',0,b'\xFF\xFF\xFF\x1FGGML_TENSOR_SIZE',288,b'\xFF\xFF\xFF\x0BGGML_TYPE_COUNT',19,b'\xFF\xFF\xFF\x0BGGML_TYPE_F16',1,b'\xFF\xFF\xFF\x0BGGML_TYPE_F32',0,b'\xFF\xFF\xFF\x0BGGML_TYPE_I16',17,b'\xFF\xFF\xFF\x0BGGML_TYPE_I32',18,b'\xFF\xFF\xFF\x0BGGML_TYPE_I8',16,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q2_K',10,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q3_K',11,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q4_0',2,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q4_1',3,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q4_K',12,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q5_0',6,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q5_1',7,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q5_K',13,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q6_K',14,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q8_0',8,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q8_1',9,b'\xFF\xFF\xFF\x0BGGML_TYPE_Q8_K',15,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_ABS',0,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_ELU',5,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_GELU',7,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_GELU_QUICK',8,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_NEG',2,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_RELU',6,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_SGN',1,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_SILU',9,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_STEP',3,b'\xFF\xFF\xFF\x0BGGML_UNARY_OP_TANH',4,b'\xFF\xFF\xFF\x0BGGUF_TYPE_ARRAY',9,b'\xFF\xFF\xFF\x0BGGUF_TYPE_BOOL',7,b'\xFF\xFF\xFF\x0BGGUF_TYPE_COUNT',10,b'\xFF\xFF\xFF\x0BGGUF_TYPE_FLOAT32',6,b'\xFF\xFF\xFF\x0BGGUF_TYPE_INT16',3,b'\xFF\xFF\xFF\x0BGGUF_TYPE_INT32',5,b'\xFF\xFF\xFF\x0BGGUF_TYPE_INT8',1,b'\xFF\xFF\xFF\x0BGGUF_TYPE_STRING',8,b'\xFF\xFF\xFF\x0BGGUF_TYPE_UINT16',2,b'\xFF\xFF\xFF\x0BGGUF_TYPE_UINT32',4,b'\xFF\xFF\xFF\x0BGGUF_TYPE_UINT8',0,b'\x00\x02\x9A\x23__assert_rtn',0,b'\x00\x02\x7C\x23dequantize_row_q2_K',0,b'\x00\x02\x81\x23dequantize_row_q3_K',0,b'\x00\x02\x86\x23dequantize_row_q4_K',0,b'\x00\x02\x8B\x23dequantize_row_q5_K',0,b'\x00\x02\x90\x23dequantize_row_q6_K',0,b'\x00\x02\x95\x23dequantize_row_q8_K',0,b'\x00\x00\xFA\x23ggml_abs',0,b'\x00\x00\xFA\x23ggml_abs_inplace',0,b'\x00\x01\xDD\x23ggml_acc',0,b'\x00\x01\xDD\x23ggml_acc_inplace',0,b'\x00\x01\x84\x23ggml_add',0,b'\x00\x01\x84\x23ggml_add1',0,b'\x00\x01\x84\x23ggml_add1_inplace',0,b'\x00\x01\x84\x23ggml_add_inplace',0,b'\x00\x01\x26\x23ggml_alibi',0,b'\x00\x02\xEC\x23ggml_allocr_alloc',0,b'\x00\x02\x42\x23ggml_allocr_alloc_graph',0,b'\x00\x02\xE4\x23ggml_allocr_free',0,b'\x00\x00\x03\x23ggml_allocr_is_measure',0,b'\x00\x00\xA2\x23ggml_allocr_new',0,b'\x00\x00\x9F\x23ggml_allocr_new_measure',0,b'\x00\x02\xE4\x23ggml_allocr_reset',0,b'\x00\x02\xE7\x23ggml_allocr_set_parse_seq',0,b'\x00\x00\x17\x23ggml_are_same_shape',0,b'\x00\x00\xFA\x23ggml_argmax',0,b'\x00\x00\x74\x23ggml_blck_size',0,b'\x00\x00\xB3\x23ggml_build_backward',0,b'\x00\x00\xB8\x23ggml_build_forward',0,b'\x00\x00\xAA\x23ggml_build_forward_ctx',0,b'\x00\x02\xF3\x23ggml_build_forward_expand',0,b'\x00\x00\x1B\x23ggml_cl_can_mul_mat',0,b'\x00\x03\x6B\x23ggml_cl_free_data',0,b'\x00\x03\xE0\x23ggml_cl_host_free',0,b'\x00\x02\x72\x23ggml_cl_host_malloc',0,b'\x00\x03\xEC\x23ggml_cl_init',0,b'\x00\x03\x78\x23ggml_cl_mul',0,b'\x00\x03\x7D\x23ggml_cl_mul_mat',0,b'\x00\x02\x54\x23ggml_cl_mul_mat_get_wsize',0,b'\x00\x03\xE3\x23ggml_cl_transform_tensor',0,b'\x00\x01\x1B\x23ggml_clamp',0,b'\x00\x00\xFA\x23ggml_cont',0,b'\x00\x01\x90\x23ggml_conv_1d',0,b'\x00\x01\x89\x23ggml_conv_1d_ph',0,b'\x00\x01\x98\x23ggml_conv_2d',0,b'\x00\x00\x90\x23ggml_cpu_has_arm_fma',0,b'\x00\x00\x90\x23ggml_cpu_has_avx',0,b'\x00\x00\x90\x23ggml_cpu_has_avx2',0,b'\x00\x00\x90\x23ggml_cpu_has_avx512',0,b'\x00\x00\x90\x23ggml_cpu_has_avx512_vbmi',0,b'\x00\x00\x90\x23ggml_cpu_has_avx512_vnni',0,b'\x00\x00\x90\x23ggml_cpu_has_blas',0,b'\x00\x00\x90\x23ggml_cpu_has_clblast',0,b'\x00\x00\x90\x23ggml_cpu_has_cublas',0,b'\x00\x00\x90\x23ggml_cpu_has_f16c',0,b'\x00\x00\x90\x23ggml_cpu_has_fma',0,b'\x00\x00\x90\x23ggml_cpu_has_fp16_va',0,b'\x00\x00\x90\x23ggml_cpu_has_gpublas',0,b'\x00\x00\x90\x23ggml_cpu_has_neon',0,b'\x00\x00\x90\x23ggml_cpu_has_sse3',0,b'\x00\x00\x90\x23ggml_cpu_has_vsx',0,b'\x00\x00\x90\x23ggml_cpu_has_wasm_simd',0,b'\x00\x01\x84\x23ggml_cpy',0,b'\x00\x01\x84\x23ggml_cross_entropy_loss',0,b'\x00\x01\xA3\x23ggml_cross_entropy_loss_back',0,b'\x00\x03\x41\x23ggml_cuda_assign_buffers',0,b'\x00\x03\x41\x23ggml_cuda_assign_buffers_force_inplace',0,b'\x00\x03\x41\x23ggml_cuda_assign_buffers_no_scratch',0,b'\x00\x00\x1B\x23ggml_cuda_can_mul_mat',0,b'\x00\x00\x06\x23ggml_cuda_compute_forward',0,b'\x00\x03\x41\x23ggml_cuda_free_data',0,b'\x00\x03\xEC\x23ggml_cuda_free_scratch',0,b'\x00\x00\x90\x23ggml_cuda_get_device_count',0,b'\x00\x02\xCE\x23ggml_cuda_get_device_description',0,b'\x00\x03\xE0\x23ggml_cuda_host_free',0,b'\x00\x02\x72\x23ggml_cuda_host_malloc',0,b'\x00\x02\xCB\x23ggml_cuda_set_main_device',0,b'\x00\x02\x79\x23ggml_cuda_set_mul_mat_q',0,b'\x00\x03\xD8\x23ggml_cuda_set_scratch_size',0,b'\x00\x02\xA0\x23ggml_cuda_set_tensor_split',0,b'\x00\x03\xE3\x23ggml_cuda_transform_tensor',0,b'\x00\x00\x95\x23ggml_cycles',0,b'\x00\x00\x95\x23ggml_cycles_per_ms',0,b'\x00\x00\xFA\x23ggml_diag',0,b'\x00\x01\x21\x23ggml_diag_mask_inf',0,b'\x00\x01\x21\x23ggml_diag_mask_inf_inplace',0,b'\x00\x01\x21\x23ggml_diag_mask_zero',0,b'\x00\x01\x21\x23ggml_diag_mask_zero_inplace',0,b'\x00\x01\x84\x23ggml_div',0,b'\x00\x01\x84\x23ggml_div_inplace',0,b'\x00\x00\xFA\x23ggml_dup',0,b'\x00\x00\xFA\x23ggml_dup_inplace',0,b'\x00\x02\x0B\x23ggml_dup_tensor',0,b'\x00\x02\x4D\x23ggml_element_size',0,b'\x00\x00\xFA\x23ggml_elu',0,b'\x00\x00\xFA\x23ggml_elu_inplace',0,b'\x00\x01\xA9\x23ggml_flash_attn',0,b'\x00\x01\xB0\x23ggml_flash_attn_back',0,b'\x00\x01\xB8\x23ggml_flash_ff',0,b'\x00\x02\x16\x23ggml_format_name',0,b'\x00\x00\x6B\x23ggml_fp16_to_fp32',0,b'\x00\x03\xDB\x23ggml_fp16_to_fp32_row',0,b'\x00\x02\x62\x23ggml_fp32_to_fp16',0,b'\x00\x02\xC1\x23ggml_fp32_to_fp16_row',0,b'\x00\x03\x03\x23ggml_free',0,b'\x00\x00\x53\x23ggml_ftype_to_ggml_type',0,b'\x00\x00\xFA\x23ggml_gelu',0,b'\x00\x00\xFA\x23ggml_gelu_inplace',0,b'\x00\x00\xFA\x23ggml_gelu_quick',0,b'\x00\x00\xFA\x23ggml_gelu_quick_inplace',0,b'\x00\x02\x6C\x23ggml_get_data',0,b'\x00\x00\x5D\x23ggml_get_data_f32',0,b'\x00\x00\x63\x23ggml_get_f32_1d',0,b'\x00\x00\x81\x23ggml_get_i32_1d',0,b'\x00\x02\x4A\x23ggml_get_max_tensor_size',0,b'\x00\x02\x69\x23ggml_get_mem_buffer',0,b'\x00\x02\x4A\x23ggml_get_mem_size',0,b'\x00\x00\x36\x23ggml_get_name',0,b'\x00\x00\x0A\x23ggml_get_no_alloc',0,b'\x00\x01\x84\x23ggml_get_rows',0,b'\x00\x01\xA3\x23ggml_get_rows_back',0,b'\x00\x00\xCE\x23ggml_get_tensor',0,b'\x00\x00\x56\x23ggml_get_unary_op',0,b'\x00\x00\x77\x23ggml_graph_compute',0,b'\x00\x03\x0A\x23ggml_graph_compute_with_ctx',0,b'\x00\x02\xFE\x23ggml_graph_dump_dot',0,b'\x00\x02\xFA\x23ggml_graph_export',0,b'\x00\x00\xCA\x23ggml_graph_get_tensor',0,b'\x00\x00\xAE\x23ggml_graph_import',0,b'\x00\x02\x60\x23ggml_graph_overhead',0,b'\x00\x00\xBE\x23ggml_graph_plan',0,b'\x00\x02\xF7\x23ggml_graph_print',0,b'\x00\x02\xF0\x23ggml_graph_reset',0,b'\x00\x00\xBB\x23ggml_init',0,b'\x00\x03\xEC\x23ggml_init_cublas',0,b'\x00\x00\x6E\x23ggml_internal_get_type_traits',0,b'\x00\x00\x14\x23ggml_is_contiguous',0,b'\x00\x00\x27\x23ggml_is_numa',0,b'\x00\x00\x14\x23ggml_is_permuted',0,b'\x00\x00\x00\x23ggml_is_quantized',0,b'\x00\x00\x14\x23ggml_is_transposed',0,b'\x00\x00\xFA\x23ggml_log',0,b'\x00\x00\xFA\x23ggml_log_inplace',0,b'\x00\x01\xE6\x23ggml_map_binary_f32',0,b'\x00\x01\xE6\x23ggml_map_binary_inplace_f32',0,b'\x00\x02\x04\x23ggml_map_custom1',0,b'\x00\x01\xFF\x23ggml_map_custom1_f32',0,b'\x00\x02\x04\x23ggml_map_custom1_inplace',0,b'\x00\x01\xFF\x23ggml_map_custom1_inplace_f32',0,b'\x00\x01\xF2\x23ggml_map_custom2',0,b'\x00\x01\xEC\x23ggml_map_custom2_f32',0,b'\x00\x01\xF2\x23ggml_map_custom2_inplace',0,b'\x00\x01\xEC\x23ggml_map_custom2_inplace_f32',0,b'\x00\x01\xC7\x23ggml_map_custom3',0,b'\x00\x01\xC0\x23ggml_map_custom3_f32',0,b'\x00\x01\xC7\x23ggml_map_custom3_inplace',0,b'\x00\x01\xC0\x23ggml_map_custom3_inplace_f32',0,b'\x00\x01\xFA\x23ggml_map_unary_f32',0,b'\x00\x01\xFA\x23ggml_map_unary_inplace_f32',0,b'\x00\x00\xFA\x23ggml_mean',0,b'\x00\x00\x0D\x23ggml_metal_add_buffer',0,b'\x00\x03\x1C\x23ggml_metal_free',0,b'\x00\x00\x71\x23ggml_metal_get_concur_list',0,b'\x00\x03\x2C\x23ggml_metal_get_tensor',0,b'\x00\x03\x23\x23ggml_metal_graph_compute',0,b'\x00\x03\x27\x23ggml_metal_graph_find_concurrency',0,b'\x00\x03\xE0\x23ggml_metal_host_free',0,b'\x00\x02\x72\x23ggml_metal_host_malloc',0,b'\x00\x00\x7B\x23ggml_metal_if_optimized',0,b'\x00\x00\xC2\x23ggml_metal_init',0,b'\x00\x03\x1F\x23ggml_metal_set_n_cb',0,b'\x00\x03\x2C\x23ggml_metal_set_tensor',0,b'\x00\x03\xEC\x23ggml_mpi_backend_free',0,b'\x00\x03\xEC\x23ggml_mpi_backend_init',0,b'\x00\x03\x33\x23ggml_mpi_eval_init',0,b'\x00\x03\x30\x23ggml_mpi_free',0,b'\x00\x03\x39\x23ggml_mpi_graph_compute_post',0,b'\x00\x03\x39\x23ggml_mpi_graph_compute_pre',0,b'\x00\x00\xC5\x23ggml_mpi_init',0,b'\x00\x00\x7E\x23ggml_mpi_rank',0,b'\x00\x01\x84\x23ggml_mul',0,b'\x00\x01\x84\x23ggml_mul_inplace',0,b'\x00\x01\x84\x23ggml_mul_mat',0,b'\x00\x02\x4D\x23ggml_nbytes',0,b'\x00\x02\x4D\x23ggml_nbytes_pad',0,b'\x00\x02\x50\x23ggml_nbytes_split',0,b'\x00\x00\xFA\x23ggml_neg',0,b'\x00\x00\xFA\x23ggml_neg_inplace',0,b'\x00\x00\x92\x23ggml_nelements',0,b'\x00\x00\xF2\x23ggml_new_f32',0,b'\x00\x00\xA7\x23ggml_new_graph',0,b'\x00\x00\xF6\x23ggml_new_i32',0,b'\x00\x00\xD2\x23ggml_new_tensor',0,b'\x00\x00\xD8\x23ggml_new_tensor_1d',0,b'\x00\x00\xDD\x23ggml_new_tensor_2d',0,b'\x00\x00\xE3\x23ggml_new_tensor_3d',0,b'\x00\x00\xEA\x23ggml_new_tensor_4d',0,b'\x00\x00\xFA\x23ggml_norm',0,b'\x00\x00\xFA\x23ggml_norm_inplace',0,b'\x00\x00\x92\x23ggml_nrows',0,b'\x00\x03\xEC\x23ggml_numa_init',0,b'\x00\x00\x2D\x23ggml_op_name',0,b'\x00\x00\x2D\x23ggml_op_symbol',0,b'\x00\x00\x4E\x23ggml_opt',0,b'\x00\x00\xC7\x23ggml_opt_default_params',0,b'\x00\x03\x0F\x23ggml_opt_init',0,b'\x00\x00\x42\x23ggml_opt_resume',0,b'\x00\x00\x47\x23ggml_opt_resume_g',0,b'\x00\x01\x84\x23ggml_out_prod',0,b'\x00\x01\x34\x23ggml_permute',0,b'\x00\x00\xFE\x23ggml_pool_1d',0,b'\x00\x01\x06\x23ggml_pool_2d',0,b'\x00\x03\x3E\x23ggml_print_object',0,b'\x00\x03\x19\x23ggml_print_objects',0,b'\x00\x02\x33\x23ggml_quantize_chunk',0,b'\x00\x02\x3B\x23ggml_quantize_q2_K',0,b'\x00\x02\x3B\x23ggml_quantize_q3_K',0,b'\x00\x02\x3B\x23ggml_quantize_q4_0',0,b'\x00\x02\x3B\x23ggml_quantize_q4_1',0,b'\x00\x02\x3B\x23ggml_quantize_q4_K',0,b'\x00\x02\x3B\x23ggml_quantize_q5_0',0,b'\x00\x02\x3B\x23ggml_quantize_q5_1',0,b'\x00\x02\x3B\x23ggml_quantize_q5_K',0,b'\x00\x02\x3B\x23ggml_quantize_q6_K',0,b'\x00\x02\x3B\x23ggml_quantize_q8_0',0,b'\x00\x00\xFA\x23ggml_relu',0,b'\x00\x00\xFA\x23ggml_relu_inplace',0,b'\x00\x01\x84\x23ggml_repeat',0,b'\x00\x01\x84\x23ggml_repeat_back',0,b'\x00\x01\x84\x23ggml_reshape',0,b'\x00\x01\x46\x23ggml_reshape_1d',0,b'\x00\x01\x4B\x23ggml_reshape_2d',0,b'\x00\x01\x51\x23ggml_reshape_3d',0,b'\x00\x01\x58\x23ggml_reshape_4d',0,b'\x00\x01\x16\x23ggml_rms_norm',0,b'\x00\x01\x84\x23ggml_rms_norm_back',0,b'\x00\x01\x16\x23ggml_rms_norm_inplace',0,b'\x00\x01\x34\x23ggml_rope',0,b'\x00\x01\x34\x23ggml_rope_back',0,b'\x00\x01\x3C\x23ggml_rope_custom',0,b'\x00\x01\x3C\x23ggml_rope_custom_inplace',0,b'\x00\x01\x34\x23ggml_rope_inplace',0,b'\x00\x01\x84\x23ggml_scale',0,b'\x00\x01\x84\x23ggml_scale_inplace',0,b'\x00\x01\xDD\x23ggml_set',0,b'\x00\x01\xD0\x23ggml_set_1d',0,b'\x00\x01\xD0\x23ggml_set_1d_inplace',0,b'\x00\x01\xD6\x23ggml_set_2d',0,b'\x00\x01\xD6\x23ggml_set_2d_inplace',0,b'\x00\x02\x1A\x23ggml_set_f32',0,b'\x00\x03\x6E\x23ggml_set_f32_1d',0,b'\x00\x02\x1E\x23ggml_set_i32',0,b'\x00\x03\x73\x23ggml_set_i32_1d',0,b'\x00\x01\xDD\x23ggml_set_inplace',0,b'\x00\x02\x12\x23ggml_set_name',0,b'\x00\x03\x06\x23ggml_set_no_alloc',0,b'\x00\x03\x15\x23ggml_set_param',0,b'\x00\x02\x46\x23ggml_set_scratch',0,b'\x00\x02\x0F\x23ggml_set_zero',0,b'\x00\x00\xFA\x23ggml_sgn',0,b'\x00\x00\xFA\x23ggml_sgn_inplace',0,b'\x00\x00\xFA\x23ggml_silu',0,b'\x00\x01\x84\x23ggml_silu_back',0,b'\x00\x00\xFA\x23ggml_silu_inplace',0,b'\x00\x00\xFA\x23ggml_soft_max',0,b'\x00\x01\x84\x23ggml_soft_max_back',0,b'\x00\x01\x84\x23ggml_soft_max_back_inplace',0,b'\x00\x00\xFA\x23ggml_soft_max_inplace',0,b'\x00\x00\xFA\x23ggml_sqr',0,b'\x00\x00\xFA\x23ggml_sqr_inplace',0,b'\x00\x00\xFA\x23ggml_sqrt',0,b'\x00\x00\xFA\x23ggml_sqrt_inplace',0,b'\x00\x00\xFA\x23ggml_step',0,b'\x00\x00\xFA\x23ggml_step_inplace',0,b'\x00\x01\x84\x23ggml_sub',0,b'\x00\x01\x84\x23ggml_sub_inplace',0,b'\x00\x00\xFA\x23ggml_sum',0,b'\x00\x00\xFA\x23ggml_sum_rows',0,b'\x00\x00\xFA\x23ggml_tanh',0,b'\x00\x00\xFA\x23ggml_tanh_inplace',0,b'\x00\x02\x60\x23ggml_tensor_overhead',0,b'\x00\x03\xEC\x23ggml_time_init',0,b'\x00\x00\x95\x23ggml_time_ms',0,b'\x00\x00\x95\x23ggml_time_us',0,b'\x00\x00\xFA\x23ggml_transpose',0,b'\x00\x00\x30\x23ggml_type_name',0,b'\x00\x02\x30\x23ggml_type_size',0,b'\x00\x00\x60\x23ggml_type_sizef',0,b'\x00\x01\x11\x23ggml_unary',0,b'\x00\x01\x11\x23ggml_unary_inplace',0,b'\x00\x02\x4A\x23ggml_used_mem',0,b'\x00\x02\xDE\x23ggml_vec_dot_q2_K_q8_K',0,b'\x00\x02\xDE\x23ggml_vec_dot_q3_K_q8_K',0,b'\x00\x02\xDE\x23ggml_vec_dot_q4_K_q8_K',0,b'\x00\x02\xDE\x23ggml_vec_dot_q5_K_q8_K',0,b'\x00\x02\xDE\x23ggml_vec_dot_q6_K_q8_K',0,b'\x00\x01\x7E\x23ggml_view_1d',0,b'\x00\x01\x76\x23ggml_view_2d',0,b'\x00\x01\x6C\x23ggml_view_3d',0,b'\x00\x01\x60\x23ggml_view_4d',0,b'\x00\x02\x0B\x23ggml_view_tensor',0,b'\x00\x01\x21\x23ggml_win_part',0,b'\x00\x01\x2D\x23ggml_win_unpart',0,b'\x00\x03\xCC\x23gguf_add_tensor',0,b'\x00\x00\x88\x23gguf_find_key',0,b'\x00\x00\x88\x23gguf_find_tensor',0,b'\x00\x03\x84\x23gguf_free',0,b'\x00\x02\x59\x23gguf_get_alignment',0,b'\x00\x02\x75\x23gguf_get_arr_data',0,b'\x00\x00\x8C\x23gguf_get_arr_n',0,b'\x00\x00\x3D\x23gguf_get_arr_str',0,b'\x00\x00\x59\x23gguf_get_arr_type',0,b'\x00\x02\x6F\x23gguf_get_data',0,b'\x00\x02\x59\x23gguf_get_data_offset',0,b'\x00\x00\x39\x23gguf_get_key',0,b'\x00\x00\x59\x23gguf_get_kv_type',0,b'\x00\x03\xD4\x23gguf_get_meta_data',0,b'\x00\x02\x59\x23gguf_get_meta_size',0,b'\x00\x00\x85\x23gguf_get_n_kv',0,b'\x00\x00\x85\x23gguf_get_n_tensors',0,b'\x00\x00\x29\x23gguf_get_tensor_name',0,b'\x00\x02\x5C\x23gguf_get_tensor_offset',0,b'\x00\x00\x20\x23gguf_get_val_bool',0,b'\x00\x00\x67\x23gguf_get_val_f32',0,b'\x00\x00\x97\x23gguf_get_val_i16',0,b'\x00\x00\x8C\x23gguf_get_val_i32',0,b'\x00\x00\x9B\x23gguf_get_val_i8',0,b'\x00\x00\x39\x23gguf_get_val_str',0,b'\x00\x02\x65\x23gguf_get_val_u16',0,b'\x00\x02\x2C\x23gguf_get_val_u32',0,b'\x00\x02\x28\x23gguf_get_val_u8',0,b'\x00\x00\x85\x23gguf_get_version',0,b'\x00\x02\x26\x23gguf_init_empty',0,b'\x00\x02\x22\x23gguf_init_from_file',0,b'\x00\x03\x9C\x23gguf_set_arr_data',0,b'\x00\x03\x8C\x23gguf_set_arr_str',0,b'\x00\x03\xD0\x23gguf_set_kv',0,b'\x00\x03\xC6\x23gguf_set_tensor_data',0,b'\x00\x03\x97\x23gguf_set_tensor_type',0,b'\x00\x03\x87\x23gguf_set_val_bool',0,b'\x00\x03\xA3\x23gguf_set_val_f32',0,b'\x00\x03\xAD\x23gguf_set_val_i16',0,b'\x00\x03\xA8\x23gguf_set_val_i32',0,b'\x00\x03\xB2\x23gguf_set_val_i8',0,b'\x00\x03\x92\x23gguf_set_val_str',0,b'\x00\x03\xC1\x23gguf_set_val_u16',0,b'\x00\x03\xBC\x23gguf_set_val_u32',0,b'\x00\x03\xB7\x23gguf_set_val_u8',0,b'\x00\x00\x33\x23gguf_type_name',0,b'\x00\x03\x87\x23gguf_write_to_file',0,b'\x00\x02\xC6\x23quantize_row_q2_K',0,b'\x00\x02\xA3\x23quantize_row_q2_K_reference',0,b'\x00\x02\xC6\x23quantize_row_q3_K',0,b'\x00\x02\xA8\x23quantize_row_q3_K_reference',0,b'\x00\x02\xC6\x23quantize_row_q4_K',0,b'\x00\x02\xAD\x23quantize_row_q4_K_reference',0,b'\x00\x02\xC6\x23quantize_row_q5_K',0,b'\x00\x02\xB2\x23quantize_row_q5_K_reference',0,b'\x00\x02\xC6\x23quantize_row_q6_K',0,b'\x00\x02\xB7\x23quantize_row_q6_K_reference',0,b'\x00\x02\xC6\x23quantize_row_q8_K',0,b'\x00\x02\xBC\x23quantize_row_q8_K_reference',0), _struct_unions = ((b'\x00\x00\x04\x27\x00\x00\x00\x02$1',b'\x00\x00\x22\x11n_iter',b'\x00\x00\xF4\x11sched',b'\x00\x00\xF4\x11decay',b'\x00\x00\xF4\x11alpha',b'\x00\x00\xF4\x11beta1',b'\x00\x00\xF4\x11beta2',b'\x00\x00\xF4\x11eps',b'\x00\x00\xF4\x11eps_f',b'\x00\x00\xF4\x11eps_g'),(b'\x00\x00\x04\x28\x00\x00\x00\x02$2',b'\x00\x00\x22\x11m',b'\x00\x00\x22\x11n_iter',b'\x00\x00\x22\x11max_linesearch',b'\x00\x00\xF4\x11eps',b'\x00\x00\xF4\x11ftol',b'\x00\x00\xF4\x11wolfe',b'\x00\x00\xF4\x11min_step',b'\x00\x00\xF4\x11max_step',b'\x00\x04\x14\x11linesearch'),(b'\x00\x00\x04\x29\x00\x00\x00\x02$3',b'\x00\x00\x08\x11x',b'\x00\x00\x08\x11g1',b'\x00\x00\x08\x11g2',b'\x00\x00\x08\x11m',b'\x00\x00\x08\x11v',b'\x00\x00\x08\x11mh',b'\x00\x00\x08\x11vh',b'\x00\x00\x08\x11pf',b'\x00\x00\xF4\x11fx_best',b'\x00\x00\xF4\x11fx_prev',b'\x00\x00\x22\x11n_no_improvement'),(b'\x00\x00\x04\x2A\x00\x00\x00\x02$4',b'\x00\x00\x08\x11x',b'\x00\x00\x08\x11xp',b'\x00\x00\x08\x11g',b'\x00\x00\x08\x11gp',b'\x00\x00\x08\x11d',b'\x00\x00\x08\x11pf',b'\x00\x00\x08\x11lmal',b'\x00\x00\x08\x11lmys',b'\x00\x00\x08\x11lms',b'\x00\x00\x08\x11lmy',b'\x00\x00\xF4\x11fx_best',b'\x00\x00\xF4\x11step',b'\x00\x00\x22\x11j',b'\x00\x00\x22\x11k',b'\x00\x00\x22\x11end',b'\x00\x00\x22\x11n_no_improvement'),(b'\x00\x00\x03\xF7\x00\x00\x00\x03$__mbstate_t',b'\x00\x03\xFF\x11__mbstate8',b'\x00\x00\xDB\x11_mbstateL'),(b'\x00\x00\x03\xF8\x00\x00\x00\x02$block_q2_K',b'\x00\x04\x44\x11scales',b'\x00\x04\x48\x11qs',b'\x00\x00\x6C\x11d',b'\x00\x00\x6C\x11dmin'),(b'\x00\x00\x03\xF9\x00\x00\x00\x02$block_q3_K',b'\x00\x04\x46\x11hmask',b'\x00\x04\x48\x11qs',b'\x00\x04\x42\x11scales',b'\x00\x00\x6C\x11d'),(b'\x00\x00\x03\xFA\x00\x00\x00\x02$block_q4_K',b'\x00\x00\x6C\x11d',b'\x00\x00\x6C\x11dmin',b'\x00\x04\x42\x11scales',b'\x00\x04\x40\x11qs'),(b'\x00\x00\x03\xFB\x00\x00\x00\x02$block_q5_K',b'\x00\x00\x6C\x11d',b'\x00\x00\x6C\x11dmin',b'\x00\x04\x42\x11scales',b'\x00\x04\x46\x11qh',b'\x00\x04\x40\x11qs'),(b'\x00\x00\x03\xFC\x00\x00\x00\x02$block_q6_K',b'\x00\x04\x40\x11ql',b'\x00\x04\x48\x11qh',b'\x00\x04\x23\x11scales',b'\x00\x00\x6C\x11d'),(b'\x00\x00\x03\xFD\x00\x00\x00\x02$block_q8_K',b'\x00\x00\xF4\x11d',b'\x00\x04\x25\x11qs',b'\x00\x04\x21\x11bsums'),(b'\x00\x00\x04\x18\x00\x00\x00\x02$ggml_type_traits_t',b'\x00\x00\x0F\x11type_name',b'\x00\x00\x22\x11blck_size',b'\x00\x00\x11\x11type_size',b'\x00\x00\xB6\x11is_quantized',b'\x00\x04\x52\x11to_float',b'\x00\x04\x4F\x11from_float',b'\x00\x04\x4F\x11from_float_reference',b'\x00\x04\x50\x11vec_dot',b'\x00\x00\x01\x11vec_dot_type'),(b'\x00\x00\x04\x2C\x00\x00\x00\x02__darwin_pthread_handler_rec',b'\x00\x04\x51\x11__routine',b'\x00\x00\x10\x11__arg',b'\x00\x04\x2B\x11__next'),(b'\x00\x00\x03\xEF\x00\x00\x00\x02_opaque_pthread_attr_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x0B\x11__opaque'),(b'\x00\x00\x03\xF0\x00\x00\x00\x02_opaque_pthread_cond_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x07\x11__opaque'),(b'\x00\x00\x03\xF1\x00\x00\x00\x02_opaque_pthread_condattr_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x11\x11__opaque'),(b'\x00\x00\x03\xF2\x00\x00\x00\x02_opaque_pthread_mutex_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x0B\x11__opaque'),(b'\x00\x00\x03\xF3\x00\x00\x00\x02_opaque_pthread_mutexattr_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x11\x11__opaque'),(b'\x00\x00\x03\xF4\x00\x00\x00\x02_opaque_pthread_once_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x11\x11__opaque'),(b'\x00\x00\x03\xF5\x00\x00\x00\x02_opaque_pthread_rwlock_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x03\x11__opaque'),(b'\x00\x00\x03\xF6\x00\x00\x00\x02_opaque_pthread_rwlockattr_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x01\x11__opaque'),(b'\x00\x00\x04\x2E\x00\x00\x00\x02_opaque_pthread_t',b'\x00\x04\x20\x11__sig',b'\x00\x04\x2B\x11__cleanup_stack',b'\x00\x04\x0F\x11__opaque'),(b'\x00\x00\x04\x2F\x00\x00\x00\x10ggml_allocr',),(b'\x00\x00\x04\x30\x00\x00\x00\x02ggml_cgraph',b'\x00\x00\x22\x11n_nodes',b'\x00\x00\x22\x11n_leafs',b'\x00\x04\x39\x11nodes',b'\x00\x04\x39\x11grads',b'\x00\x04\x39\x11leafs',b'\x00\x04\x4D\x11visited_hash_table',b'\x00\x00\x22\x11perf_runs',b'\x00\x00\xDB\x11perf_cycles',b'\x00\x00\xDB\x11perf_time_us'),(b'\x00\x00\x04\x31\x00\x00\x00\x02ggml_compute_params',b'\x00\x04\x17\x11type',b'\x00\x00\x22\x11ith',b'\x00\x00\x22\x11nth',b'\x00\x00\x11\x11wsize',b'\x00\x00\x10\x11wdata'),(b'\x00\x00\x04\x32\x00\x00\x00\x10ggml_context',),(b'\x00\x00\x04\x33\x00\x00\x00\x02ggml_cplan',b'\x00\x00\x11\x11work_size',b'\x00\x04\x3F\x11work_data',b'\x00\x00\x22\x11n_threads',b'\x00\x04\x19\x11n_tasks',b'\x00\x03\xEE\x11abort_callback',b'\x00\x00\x10\x11abort_callback_data'),(b'\x00\x00\x00\xBC\x00\x00\x00\x02ggml_init_params',b'\x00\x00\x11\x11mem_size',b'\x00\x00\x10\x11mem_buffer',b'\x00\x00\xB6\x11no_alloc'),(b'\x00\x00\x04\x34\x00\x00\x00\x10ggml_metal_context',),(b'\x00\x00\x04\x35\x00\x00\x00\x10ggml_mpi_context',),(b'\x00\x00\x04\x37\x00\x00\x00\x02ggml_object',b'\x00\x00\x11\x11offs',b'\x00\x00\x11\x11size',b'\x00\x04\x36\x11next',b'\x00\x04\x15\x11type',b'\x00\x04\x09\x11padding'),(b'\x00\x00\x04\x38\x00\x00\x00\x02ggml_opt_context',b'\x00\x00\x0B\x11ctx',b'\x00\x00\x50\x11params',b'\x00\x00\x22\x11iter',b'\x00\x00\xDB\x11nx',b'\x00\x00\xB6\x11just_initialized',b'\x00\x04\x29\x11adam',b'\x00\x04\x2A\x11lbfgs'),(b'\x00\x00\x00\x50\x00\x00\x00\x02ggml_opt_params',b'\x00\x00\xC8\x11type',b'\x00\x00\x22\x11n_threads',b'\x00\x00\x22\x11past',b'\x00\x00\xF4\x11delta',b'\x00\x00\x22\x11max_no_improvement',b'\x00\x00\xB6\x11print_forward_graph',b'\x00\x00\xB6\x11print_backward_graph',b'\x00\x04\x27\x11adam',b'\x00\x04\x28\x11lbfgs'),(b'\x00\x00\x02\x48\x00\x00\x00\x02ggml_scratch',b'\x00\x00\x11\x11offs',b'\x00\x00\x11\x11size',b'\x00\x00\x10\x11data'),(b'\x00\x00\x04\x3D\x00\x00\x00\x02ggml_tensor',b'\x00\x00\x01\x11type',b'\x00\x04\x13\x11backend',b'\x00\x00\x22\x11n_dims',b'\x00\x04\x1E\x11ne',b'\x00\x04\x4B\x11nb',b'\x00\x00\x2E\x11op',b'\x00\x04\x1B\x11op_params',b'\x00\x00\xB6\x11is_param',b'\x00\x00\x08\x11grad',b'\x00\x04\x3B\x11src',b'\x00\x00\x22\x11perf_runs',b'\x00\x00\xDB\x11perf_cycles',b'\x00\x00\xDB\x11perf_time_us',b'\x00\x00\x10\x11data',b'\x00\x04\x0D\x11name',b'\x00\x00\x10\x11extra',b'\x00\x04\x09\x11padding'),(b'\x00\x00\x04\x3E\x00\x00\x00\x10gguf_context',),(b'\x00\x00\x02\x24\x00\x00\x00\x02gguf_init_params',b'\x00\x00\xB6\x11no_alloc',b'\x00\x00\xB0\x11ctx')), _enums = (b'\x00\x00\x04\x13\x00\x00\x00\x16ggml_backend\x00GGML_BACKEND_CPU,GGML_BACKEND_GPU,GGML_BACKEND_GPU_SPLIT',b'\x00\x00\x00\x54\x00\x00\x00\x15ggml_ftype\x00GGML_FTYPE_UNKNOWN,GGML_FTYPE_ALL_F32,GGML_FTYPE_MOSTLY_F16,GGML_FTYPE_MOSTLY_Q4_0,GGML_FTYPE_MOSTLY_Q4_1,GGML_FTYPE_MOSTLY_Q4_1_SOME_F16,GGML_FTYPE_MOSTLY_Q8_0,GGML_FTYPE_MOSTLY_Q5_0,GGML_FTYPE_MOSTLY_Q5_1,GGML_FTYPE_MOSTLY_Q2_K,GGML_FTYPE_MOSTLY_Q3_K,GGML_FTYPE_MOSTLY_Q4_K,GGML_FTYPE_MOSTLY_Q5_K,GGML_FTYPE_MOSTLY_Q6_K',b'\x00\x00\x04\x14\x00\x00\x00\x16ggml_linesearch\x00GGML_LINESEARCH_DEFAULT,GGML_LINESEARCH_BACKTRACKING_ARMIJO,GGML_LINESEARCH_BACKTRACKING_WOLFE,GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE',b'\x00\x00\x04\x15\x00\x00\x00\x16ggml_object_type\x00GGML_OBJECT_TENSOR,GGML_OBJECT_GRAPH,GGML_OBJECT_WORK_BUFFER',b'\x00\x00\x00\x2E\x00\x00\x00\x16ggml_op\x00GGML_OP_NONE,GGML_OP_DUP,GGML_OP_ADD,GGML_OP_ADD1,GGML_OP_ACC,GGML_OP_SUB,GGML_OP_MUL,GGML_OP_DIV,GGML_OP_SQR,GGML_OP_SQRT,GGML_OP_LOG,GGML_OP_SUM,GGML_OP_SUM_ROWS,GGML_OP_MEAN,GGML_OP_ARGMAX,GGML_OP_REPEAT,GGML_OP_REPEAT_BACK,GGML_OP_SILU_BACK,GGML_OP_NORM,GGML_OP_RMS_NORM,GGML_OP_RMS_NORM_BACK,GGML_OP_MUL_MAT,GGML_OP_OUT_PROD,GGML_OP_SCALE,GGML_OP_SET,GGML_OP_CPY,GGML_OP_CONT,GGML_OP_RESHAPE,GGML_OP_VIEW,GGML_OP_PERMUTE,GGML_OP_TRANSPOSE,GGML_OP_GET_ROWS,GGML_OP_GET_ROWS_BACK,GGML_OP_DIAG,GGML_OP_DIAG_MASK_INF,GGML_OP_DIAG_MASK_ZERO,GGML_OP_SOFT_MAX,GGML_OP_SOFT_MAX_BACK,GGML_OP_ROPE,GGML_OP_ROPE_BACK,GGML_OP_ALIBI,GGML_OP_CLAMP,GGML_OP_CONV_1D,GGML_OP_CONV_2D,GGML_OP_POOL_1D,GGML_OP_POOL_2D,GGML_OP_FLASH_ATTN,GGML_OP_FLASH_FF,GGML_OP_FLASH_ATTN_BACK,GGML_OP_WIN_PART,GGML_OP_WIN_UNPART,GGML_OP_UNARY,GGML_OP_MAP_UNARY,GGML_OP_MAP_BINARY,GGML_OP_MAP_CUSTOM1_F32,GGML_OP_MAP_CUSTOM2_F32,GGML_OP_MAP_CUSTOM3_F32,GGML_OP_MAP_CUSTOM1,GGML_OP_MAP_CUSTOM2,GGML_OP_MAP_CUSTOM3,GGML_OP_CROSS_ENTROPY_LOSS,GGML_OP_CROSS_ENTROPY_LOSS_BACK,GGML_OP_COUNT',b'\x00\x00\x01\x01\x00\x00\x00\x16ggml_op_pool\x00GGML_OP_POOL_MAX,GGML_OP_POOL_AVG,GGML_OP_POOL_COUNT',b'\x00\x00\x04\x16\x00\x00\x00\x15ggml_opt_result\x00GGML_OPT_OK,GGML_OPT_DID_NOT_CONVERGE,GGML_OPT_NO_CONTEXT,GGML_OPT_INVALID_WOLFE,GGML_OPT_FAIL,GGML_LINESEARCH_FAIL,GGML_LINESEARCH_MINIMUM_STEP,GGML_LINESEARCH_MAXIMUM_STEP,GGML_LINESEARCH_MAXIMUM_ITERATIONS,GGML_LINESEARCH_INVALID_PARAMETERS',b'\x00\x00\x00\xC8\x00\x00\x00\x16ggml_opt_type\x00GGML_OPT_ADAM,GGML_OPT_LBFGS',b'\x00\x00\x04\x17\x00\x00\x00\x16ggml_task_type\x00GGML_TASK_INIT,GGML_TASK_COMPUTE,GGML_TASK_FINALIZE',b'\x00\x00\x00\x01\x00\x00\x00\x16ggml_type\x00GGML_TYPE_F32,GGML_TYPE_F16,GGML_TYPE_Q4_0,GGML_TYPE_Q4_1,GGML_TYPE_Q5_0,GGML_TYPE_Q5_1,GGML_TYPE_Q8_0,GGML_TYPE_Q8_1,GGML_TYPE_Q2_K,GGML_TYPE_Q3_K,GGML_TYPE_Q4_K,GGML_TYPE_Q5_K,GGML_TYPE_Q6_K,GGML_TYPE_Q8_K,GGML_TYPE_I8,GGML_TYPE_I16,GGML_TYPE_I32,GGML_TYPE_COUNT',b'\x00\x00\x01\x14\x00\x00\x00\x16ggml_unary_op\x00GGML_UNARY_OP_ABS,GGML_UNARY_OP_SGN,GGML_UNARY_OP_NEG,GGML_UNARY_OP_STEP,GGML_UNARY_OP_TANH,GGML_UNARY_OP_ELU,GGML_UNARY_OP_RELU,GGML_UNARY_OP_GELU,GGML_UNARY_OP_GELU_QUICK,GGML_UNARY_OP_SILU',b'\x00\x00\x00\x34\x00\x00\x00\x16gguf_type\x00GGUF_TYPE_UINT8,GGUF_TYPE_INT8,GGUF_TYPE_UINT16,GGUF_TYPE_INT16,GGUF_TYPE_UINT32,GGUF_TYPE_INT32,GGUF_TYPE_FLOAT32,GGUF_TYPE_BOOL,GGUF_TYPE_STRING,GGUF_TYPE_ARRAY,GGUF_TYPE_COUNT'), _typenames = (b'\x00\x00\x00\xDB__darwin_blkcnt_t',b'\x00\x00\x00\x22__darwin_blksize_t',b'\x00\x00\x00\x11__darwin_clock_t',b'\x00\x00\x00\x22__darwin_ct_rune_t',b'\x00\x00\x00\x22__darwin_dev_t',b'\x00\x00\x03\xBF__darwin_fsblkcnt_t',b'\x00\x00\x03\xBF__darwin_fsfilcnt_t',b'\x00\x00\x03\xBF__darwin_gid_t',b'\x00\x00\x03\xBF__darwin_id_t',b'\x00\x00\x04\x4A__darwin_ino64_t',b'\x00\x00\x04\x4A__darwin_ino_t',b'\x00\x00\x04\x20__darwin_intptr_t',b'\x00\x00\x03\xBF__darwin_mach_port_name_t',b'\x00\x00\x03\xBF__darwin_mach_port_t',b'\x00\x00\x03\xF7__darwin_mbstate_t',b'\x00\x00\x00\x6C__darwin_mode_t',b'\x00\x00\x03\xBF__darwin_natural_t',b'\x00\x00\x00\xDB__darwin_off_t',b'\x00\x00\x00\x22__darwin_pid_t',b'\x00\x00\x03\xEF__darwin_pthread_attr_t',b'\x00\x00\x03\xF0__darwin_pthread_cond_t',b'\x00\x00\x03\xF1__darwin_pthread_condattr_t',b'\x00\x00\x00\x11__darwin_pthread_key_t',b'\x00\x00\x03\xF2__darwin_pthread_mutex_t',b'\x00\x00\x03\xF3__darwin_pthread_mutexattr_t',b'\x00\x00\x03\xF4__darwin_pthread_once_t',b'\x00\x00\x03\xF5__darwin_pthread_rwlock_t',b'\x00\x00\x03\xF6__darwin_pthread_rwlockattr_t',b'\x00\x00\x04\x2D__darwin_pthread_t',b'\x00\x00\x04\x20__darwin_ptrdiff_t',b'\x00\x00\x00\x22__darwin_rune_t',b'\x00\x00\x03\xBF__darwin_sigset_t',b'\x00\x00\x00\x11__darwin_size_t',b'\x00\x00\x03\xBF__darwin_socklen_t',b'\x00\x00\x04\x20__darwin_ssize_t',b'\x00\x00\x00\x22__darwin_suseconds_t',b'\x00\x00\x04\x20__darwin_time_t',b'\x00\x00\x03\xBF__darwin_uid_t',b'\x00\x00\x03\xBF__darwin_useconds_t',b'\x00\x00\x04\x05__darwin_uuid_string_t',b'\x00\x00\x04\x44__darwin_uuid_t',b'\x00\x00\x00\x22__darwin_wchar_t',b'\x00\x00\x00\x22__darwin_wint_t',b'\x00\x00\x03\xB0__int16_t',b'\x00\x00\x00\x22__int32_t',b'\x00\x00\x00\xDB__int64_t',b'\x00\x00\x03\xB5__int8_t',b'\x00\x00\x03\xF7__mbstate_t',b'\x00\x00\x00\x6C__uint16_t',b'\x00\x00\x03\xBF__uint32_t',b'\x00\x00\x04\x4A__uint64_t',b'\x00\x00\x03\xBA__uint8_t',b'\x00\x00\x03\xF8block_q2_K',b'\x00\x00\x03\xF9block_q3_K',b'\x00\x00\x03\xFAblock_q4_K',b'\x00\x00\x03\xFBblock_q5_K',b'\x00\x00\x03\xFCblock_q6_K',b'\x00\x00\x03\xFDblock_q8_K',b'\x00\x00\x01\xEAggml_binary_op_f32_t',b'\x00\x00\x02\x02ggml_custom1_op_f32_t',b'\x00\x00\x02\x07ggml_custom1_op_t',b'\x00\x00\x01\xF0ggml_custom2_op_f32_t',b'\x00\x00\x01\xF6ggml_custom2_op_t',b'\x00\x00\x01\xC5ggml_custom3_op_f32_t',b'\x00\x00\x01\xCCggml_custom3_op_t',b'\x00\x00\x00\x6Cggml_fp16_t',b'\x00\x00\x04\x4Fggml_from_float_t',b'\x00\x00\x04\x52ggml_to_float_t',b'\x00\x00\x04\x18ggml_type_traits_t',b'\x00\x00\x01\xFDggml_unary_op_f32_t',b'\x00\x00\x04\x50ggml_vec_dot_t',b'\x00\x00\x03\xB0int16_t',b'\x00\x00\x00\x22int32_t',b'\x00\x00\x00\xDBint64_t',b'\x00\x00\x03\xB5int8_t',b'\x00\x00\x03\xB0int_fast16_t',b'\x00\x00\x00\x22int_fast32_t',b'\x00\x00\x00\xDBint_fast64_t',b'\x00\x00\x03\xB5int_fast8_t',b'\x00\x00\x03\xB0int_least16_t',b'\x00\x00\x00\x22int_least32_t',b'\x00\x00\x00\xDBint_least64_t',b'\x00\x00\x03\xB5int_least8_t',b'\x00\x00\x04\x20intmax_t',b'\x00\x00\x04\x20intptr_t',b'\x00\x00\x04\x1Dmax_align_t',b'\x00\x00\x04\x20ptrdiff_t',b'\x00\x00\x00\xDBregister_t',b'\x00\x00\x00\x11rsize_t',b'\x00\x00\x00\x11size_t',b'\x00\x00\x04\x4Asyscall_arg_t',b'\x00\x00\x00\x6Cu_int16_t',b'\x00\x00\x03\xBFu_int32_t',b'\x00\x00\x04\x4Au_int64_t',b'\x00\x00\x03\xBAu_int8_t',b'\x00\x00\x00\x6Cuint16_t',b'\x00\x00\x03\xBFuint32_t',b'\x00\x00\x04\x4Auint64_t',b'\x00\x00\x03\xBAuint8_t',b'\x00\x00\x00\x6Cuint_fast16_t',b'\x00\x00\x03\xBFuint_fast32_t',b'\x00\x00\x04\x4Auint_fast64_t',b'\x00\x00\x03\xBAuint_fast8_t',b'\x00\x00\x00\x6Cuint_least16_t',b'\x00\x00\x03\xBFuint_least32_t',b'\x00\x00\x04\x4Auint_least64_t',b'\x00\x00\x03\xBAuint_least8_t',b'\x00\x00\x00\x11uintmax_t',b'\x00\x00\x00\x11uintptr_t',b'\x00\x00\x04\x4Auser_addr_t',b'\x00\x00\x00\xDBuser_long_t',b'\x00\x00\x00\xDBuser_off_t',b'\x00\x00\x04\x4Auser_size_t',b'\x00\x00\x00\xDBuser_ssize_t',b'\x00\x00\x00\xDBuser_time_t',b'\x00\x00\x04\x4Auser_ulong_t',b'\x00\x00\x00\x22wchar_t'), ) ggml-org-ggml-3678254/examples/python/ggml/ffi/000077500000000000000000000000001512524704700212155ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/python/ggml/ffi/__init__.pyi000066400000000000000000000000741512524704700235000ustar00rootroot00000000000000# Phony stubs. class CData: pass class CType: passggml-org-ggml-3678254/examples/python/ggml/utils.py000066400000000000000000000214051512524704700221650ustar00rootroot00000000000000""" Common helpers for working with ggml + numpy """ from ggml import ffi, lib from typing import Union, Optional import numpy as np def init(mem_size: int, mem_buffer: ffi.CData = ffi.NULL, no_alloc: bool = False) -> ffi.CData: """ Initialize a ggml context, which will be freed automatically when the pointer is garbage collected. """ params = ffi.new('struct ggml_init_params*') params.mem_size = mem_size params.mem_buffer = mem_buffer params.no_alloc = no_alloc return ffi.gc(lib.ggml_init(params[0]), lib.ggml_free) TensorLike = Union[ffi.CData, np.ndarray] def copy(from_tensor: TensorLike, to_tensor: TensorLike, allow_requantize: bool = True): """ Copy the contents of one tensor to another, doing any necessary (de/re)quantization transparently. Works across numpy & ggml tensors, but they must have the same shape (and be contiguous). Parameters ---------- from_tensor : TensorLike The tensor to copy from (a numpy array or possibly-quantized ggml tensor) to_tensor : TensorLike The tensor to copy to (a numpy array or possibly-quantized ggml tensor) allow_requantize : bool If False, will throw an error if requantization is required (i.e. both from_tensor and to_tensor are quantized with different quantization types) """ if id(from_tensor) == id(to_tensor): return __expect_same_layout("source", from_tensor, "destination", to_tensor) __check_shape_consistent_with_type(from_tensor) __check_shape_consistent_with_type(to_tensor) from_type = __get_type(from_tensor) to_type = __get_type(to_tensor) if from_type == to_type: ffi.memmove(__get_data(to_tensor), __get_data(from_tensor), __get_nbytes(from_tensor)) else: assert allow_requantize or not lib.ggml_is_quantized(from_type) or not lib.ggml_is_quantized(to_type), \ f"Requantizing from {__type_name(from_type)} to {__type_name(to_type)} is disabled. Force with allow_requantize=True" __set_floats(to_tensor, __get_floats(from_tensor)) def numpy(tensor: ffi.CData, allow_copy: Union[bool, np.ndarray] = False, allow_requantize=False) -> np.ndarray: """ Convert a ggml tensor to a numpy array. If the tensor isn't quantized, the returned numpy array will be a view over its data. If it is quantized (and allow_copy is True), the copy will involve dequantization and the returned array will be a copy of the original tensor (any changes to the numpy array won't then be reflected back to the tensor). Parameters ---------- tensor : ffi.CData The tensor to convert to a numpy array allow_copy : bool or np.ndarray If False, will throw an error if the tensor is quantized (since dequantization requires extra memory). If True, will dequantize the tensor and return a copy of the data in a new float32 numpy array. If an np.ndarray, will copy the data into the given array (which must be the same shape as the tensor) when dequantization is needed allow_requantize : bool If allow_copy is a tensor with a different quantization type than the source tensor, will throw an error unless allow_requantize is True. """ shape = __get_shape(tensor) if lib.ggml_is_quantized(tensor.type): if allow_copy == False: raise ValueError(f"{__describe(tensor)} is quantized, conversion to numpy requires a copy (pass allow_copy=True; changes to the numpy array won't affect the original).") elif isinstance(allow_copy, np.ndarray): __expect_same_layout("source tensor", tensor, "dequantization output tensor", allow_copy) destination = allow_copy else: destination = np.empty(shape, dtype=np.float32) copy(tensor, destination, allow_requantize=allow_requantize) return destination else: dtype = __type_to_dtype(tensor.type) if not dtype: raise NotImplementedError(f'Cannot convert {__describe(tensor)} to numpy') assert __is_contiguous(tensor), f"Cannot convert {__describe(tensor)} to numpy (support contiguous tensors only)" nbytes = lib.ggml_nelements(tensor) * lib.ggml_type_size(tensor.type) array = np.frombuffer(ffi.buffer(lib.ggml_get_data(tensor), nbytes), dtype=dtype) array.shape = shape return array def __type_name(type: int) -> str: name = lib.ggml_type_name(type) return ffi.string(name).decode('utf-8') if name else None __k_quant_types = set([ lib.GGML_TYPE_Q2_K, lib.GGML_TYPE_Q3_K, lib.GGML_TYPE_Q4_K, lib.GGML_TYPE_Q5_K, lib.GGML_TYPE_Q6_K, lib.GGML_TYPE_Q8_K, ]) __type_to_dtype_dict = { lib.GGML_TYPE_I8: np.int8, lib.GGML_TYPE_I16: np.int16, lib.GGML_TYPE_I32: np.int32, lib.GGML_TYPE_F16: np.float16, lib.GGML_TYPE_F32: np.float32, } def __type_to_dtype(type: int) -> Optional[np.dtype]: return __type_to_dtype_dict.get(type) def __dtype_to_type(dtype: np.dtype): if dtype == np.float32: return lib.GGML_TYPE_F32 elif dtype == np.float16: return lib.GGML_TYPE_F16 elif dtype == np.int32: return lib.GGML_TYPE_I32 elif dtype == np.int16: return lib.GGML_TYPE_I16 elif dtype == np.int8: return lib.GGML_TYPE_I8 else: raise ValueError(f"Unsupported dtype: {dtype}") def __describe(tensor: ffi.CType): return f'Tensor[{__type_name(__get_type(tensor))}, {__get_shape(tensor)}]' def __get_type(tensor: TensorLike): return __dtype_to_type(tensor.dtype) if isinstance(tensor, np.ndarray) else tensor.type def __get_shape(x: TensorLike): return x.shape if isinstance(x, np.ndarray) else tuple([x.ne[i] for i in range(x.n_dims)]) def __get_strides(x: TensorLike): return x.strides if isinstance(x, np.ndarray) else tuple([x.nb[i] for i in range(x.n_dims)]) def __get_data(x: TensorLike) -> ffi.CData: return ffi.from_buffer(x) if isinstance(x, np.ndarray) else lib.ggml_get_data(x) def __get_nbytes(tensor: TensorLike): return tensor.nbytes if isinstance(tensor, np.ndarray) else lib.ggml_nbytes(tensor) def __get_nelements(tensor: TensorLike): return tensor.size if isinstance(tensor, np.ndarray) else lib.ggml_nelements(tensor) def __is_contiguous(tensor: TensorLike): return tensor.flags['C_CONTIGUOUS'] if isinstance(tensor, np.ndarray) else lib.ggml_is_contiguous(tensor) def __get_floats(tensor: TensorLike) -> ffi.CData: data, type = __get_data(tensor), __get_type(tensor) if type == lib.GGML_TYPE_F32: return ffi.cast('float*', data) else: nelements = __get_nelements(tensor) floats = ffi.new('float[]', nelements) if type == lib.GGML_TYPE_F16: lib.ggml_fp16_to_fp32_row(ffi.cast('uint16_t*', data), floats, nelements) elif lib.ggml_is_quantized(type): qtype = lib.ggml_internal_get_type_traits(type) assert qtype.to_float, f"Type {__type_name(type)} is not supported by ggml" qtype.to_float(data, floats, nelements) else: raise NotImplementedError(f'Cannot read floats from {__describe(tensor)}') return floats def __set_floats(tensor: TensorLike, f32_data: ffi.CData) -> None: data, type, nbytes = __get_data(tensor), __get_type(tensor), __get_nbytes(tensor) if type == lib.GGML_TYPE_F32: ffi.memmove(data, f32_data, nbytes) else: nelements = __get_nelements(tensor) if type == lib.GGML_TYPE_F16: lib.ggml_fp32_to_fp16_row(f32_data, ffi.cast('uint16_t*', data), nelements) elif lib.ggml_is_quantized(type): qtype = lib.ggml_internal_get_type_traits(type) assert qtype.from_float, f"Type {__type_name(type)} is not supported by ggml" qtype.from_float(f32_data, data, nelements) else: raise NotImplementedError(f'Cannot write floats to {__describe(tensor)}') def __expect_same_layout(name1: str, tensor1: TensorLike, name2: str, tensor2: TensorLike): shape1, shape2 = __get_shape(tensor1), __get_shape(tensor2) assert shape1 == shape2, f"Shape mismatch: {name1} has {shape1} but {name2} has {shape2}" assert __is_contiguous(tensor1) and __is_contiguous(tensor2), f"Only contiguous tensors are supported (got {name1} with strides {__get_strides(tensor1)} and {name2} with strides {__get_strides(tensor2)})" def __check_shape_consistent_with_type(tensor: TensorLike): type = __get_type(tensor) if not lib.ggml_is_quantized(type): return shape = __get_shape(tensor) block_size = lib.ggml_blck_size(type) assert not (block_size == 0 and type in __k_quant_types), f"Can't quantize, native library was not compiled with USE_K_QUANTS!" assert block_size > 0, f"Invalid block size {block_size} for type {__type_name(type)}" for i, d in enumerate(shape): assert d % block_size == 0, f"Dimension {i} of {__describe(tensor)} is not divisible by {block_size}, required for quantization." ggml-org-ggml-3678254/examples/python/regenerate.py000066400000000000000000000041401512524704700222150ustar00rootroot00000000000000# Generates bindings for the ggml library. # # cffi requires prior C preprocessing of the headers, and it uses pycparser which chokes on a couple of things # so we help it a bit (e.g. replace sizeof expressions with their value, remove exotic syntax found in Darwin headers). import os, sys, re, subprocess import cffi from stubs import generate_stubs API = os.environ.get('API', 'api.h') CC = os.environ.get('CC') or 'gcc' C_INCLUDE_DIR = os.environ.get('C_INCLUDE_DIR', '../../../llama.cpp') CPPFLAGS = [ "-I", C_INCLUDE_DIR, '-D__fp16=uint16_t', # pycparser doesn't support __fp16 '-D__attribute__(x)=', '-D_Static_assert(x, m)=', ] + [x for x in os.environ.get('CPPFLAGS', '').split(' ') if x != ''] try: header = subprocess.run([CC, "-E", *CPPFLAGS, API], capture_output=True, text=True, check=True).stdout except subprocess.CalledProcessError as e: print(f'{e.stderr}\n{e}', file=sys.stderr); raise header = '\n'.join([l for l in header.split('\n') if '__darwin_va_list' not in l]) # pycparser hates this # Replace constant size expressions w/ their value (compile & run a mini exe for each, because why not). # First, extract anyting *inside* square brackets and anything that looks like a sizeof call. for expr in set(re.findall(f'(?<=\\[)[^\\]]+(?=])|sizeof\\s*\\([^()]+\\)', header)): if re.match(r'^(\d+|\s*)$', expr): continue # skip constants and empty bracket contents subprocess.run([CC, "-o", "eval_size_expr", *CPPFLAGS, "-x", "c", "-"], text=True, check=True, input=f'''#include #include "{API}" int main() {{ printf("%lu", (size_t)({expr})); }}''') size = subprocess.run(["./eval_size_expr"], capture_output=True, text=True, check=True).stdout print(f'Computed constexpr {expr} = {size}') header = header.replace(expr, size) ffibuilder = cffi.FFI() ffibuilder.cdef(header) ffibuilder.set_source(f'ggml.cffi', None) # we're not compiling a native extension, as this quickly gets hairy ffibuilder.compile(verbose=True) with open("ggml/__init__.pyi", "wt") as f: f.write(generate_stubs(header))ggml-org-ggml-3678254/examples/python/stubs.py000066400000000000000000000106441512524704700212420ustar00rootroot00000000000000""" This generates .pyi stubs for the cffi Python bindings generated by regenerate.py """ import sys, re, itertools sys.path.extend(['.', '..']) # for pycparser from pycparser import c_ast, parse_file, CParser import pycparser.plyparser from pycparser.c_ast import PtrDecl, TypeDecl, FuncDecl, EllipsisParam, IdentifierType, Struct, Enum, Typedef from typing import Tuple __c_type_to_python_type = { 'void': 'None', '_Bool': 'bool', 'char': 'int', 'short': 'int', 'int': 'int', 'long': 'int', 'ptrdiff_t': 'int', 'size_t': 'int', 'int8_t': 'int', 'uint8_t': 'int', 'int16_t': 'int', 'uint16_t': 'int', 'int32_t': 'int', 'uint32_t': 'int', 'int64_t': 'int', 'uint64_t': 'int', 'float': 'float', 'double': 'float', 'ggml_fp16_t': 'np.float16', } def format_type(t: TypeDecl): if isinstance(t, PtrDecl) or isinstance(t, Struct): return 'ffi.CData' if isinstance(t, Enum): return 'int' if isinstance(t, TypeDecl): return format_type(t.type) if isinstance(t, IdentifierType): assert len(t.names) == 1, f'Expected a single name, got {t.names}' return __c_type_to_python_type.get(t.names[0]) or 'ffi.CData' return t.name class PythonStubFuncDeclVisitor(c_ast.NodeVisitor): def __init__(self): self.sigs = {} self.sources = {} def get_source_snippet_lines(self, coord: pycparser.plyparser.Coord) -> Tuple[list[str], list[str]]: if coord.file not in self.sources: with open(coord.file, 'rt') as f: self.sources[coord.file] = f.readlines() source_lines = self.sources[coord.file] ncomment_lines = len(list(itertools.takewhile(lambda i: re.search(r'^\s*(//|/\*)', source_lines[i]), range(coord.line - 2, -1, -1)))) comment_lines = [l.strip() for l in source_lines[coord.line - 1 - ncomment_lines:coord.line - 1]] decl_lines = [] for line in source_lines[coord.line - 1:]: decl_lines.append(line.rstrip()) if (';' in line) or ('{' in line): break return (comment_lines, decl_lines) def visit_Enum(self, node: Enum): if node.values is not None: for e in node.values.enumerators: self.sigs[e.name] = f' @property\n def {e.name}(self) -> int: ...' def visit_Typedef(self, node: Typedef): pass def visit_FuncDecl(self, node: FuncDecl): ret_type = node.type is_ptr = False while isinstance(ret_type, PtrDecl): ret_type = ret_type.type is_ptr = True fun_name = ret_type.declname if fun_name.startswith('__'): return args = [] argnames = [] def gen_name(stem): i = 1 while True: new_name = stem if i == 1 else f'{stem}{i}' if new_name not in argnames: return new_name i += 1 for a in node.args.params: if isinstance(a, EllipsisParam): arg_name = gen_name('args') argnames.append(arg_name) args.append('*' + gen_name('args')) elif format_type(a.type) == 'None': continue else: arg_name = a.name or gen_name('arg') argnames.append(arg_name) args.append(f'{arg_name}: {format_type(a.type)}') ret = format_type(ret_type if not is_ptr else node.type) comment_lines, decl_lines = self.get_source_snippet_lines(node.coord) lines = [f' def {fun_name}({", ".join(args)}) -> {ret}:'] if len(comment_lines) == 0 and len(decl_lines) == 1: lines += [f' """{decl_lines[0]}"""'] else: lines += [' """'] lines += [f' {c.lstrip("/* ")}' for c in comment_lines] if len(comment_lines) > 0: lines += [''] lines += [f' {d}' for d in decl_lines] lines += [' """'] lines += [' ...'] self.sigs[fun_name] = '\n'.join(lines) def generate_stubs(header: str): """ Generates a .pyi Python stub file for the GGML API using C header files. """ v = PythonStubFuncDeclVisitor() v.visit(CParser().parse(header, "")) keys = list(v.sigs.keys()) keys.sort() return '\n'.join([ '# auto-generated file', 'import ggml.ffi as ffi', 'import numpy as np', 'class lib:', *[v.sigs[k] for k in keys] ]) ggml-org-ggml-3678254/examples/python/test_tensor.py000066400000000000000000000224401512524704700224500ustar00rootroot00000000000000import pytest from pytest import raises from ggml import lib, ffi from ggml.utils import init, copy, numpy import numpy as np import numpy.testing as npt @pytest.fixture() def ctx(): print("setup") yield init(mem_size=10*1024*1024) print("teardown") class TestNumPy: # Single element def test_set_get_single_i32(self, ctx): i = lib.ggml_new_i32(ctx, 42) assert lib.ggml_get_i32_1d(i, 0) == 42 assert numpy(i) == np.array([42], dtype=np.int32) def test_set_get_single_f32(self, ctx): i = lib.ggml_new_f32(ctx, 4.2) epsilon = 0.000001 # Not sure why so large a difference?? pytest.approx(lib.ggml_get_f32_1d(i, 0), 4.2, epsilon) pytest.approx(numpy(i), np.array([4.2], dtype=np.float32), epsilon) def _test_copy_np_to_ggml(self, a: np.ndarray, t: ffi.CData): a2 = a.copy() # Clone original copy(a, t) npt.assert_array_equal(numpy(t), a2) # I32 def test_copy_np_to_ggml_1d_i32(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_I32, 10) a = np.arange(10, dtype=np.int32) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_2d_i32(self, ctx): t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_I32, 2, 3) a = np.arange(2 * 3, dtype=np.int32).reshape((2, 3)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_3d_i32(self, ctx): t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_I32, 2, 3, 4) a = np.arange(2 * 3 * 4, dtype=np.int32).reshape((2, 3, 4)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_i32(self, ctx): t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_I32, 2, 3, 4, 5) a = np.arange(2 * 3 * 4 * 5, dtype=np.int32).reshape((2, 3, 4, 5)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_n_i32(self, ctx): dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash pdims = ffi.new('int64_t[]', len(dims)) for i, d in enumerate(dims): pdims[i] = d t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_I32, len(dims), pdims) a = np.arange(np.prod(dims), dtype=np.int32).reshape(tuple(pdims)) self._test_copy_np_to_ggml(a, t) # F32 def test_copy_np_to_ggml_1d_f32(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) a = np.arange(10, dtype=np.float32) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_2d_f32(self, ctx): t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F32, 2, 3) a = np.arange(2 * 3, dtype=np.float32).reshape((2, 3)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_3d_f32(self, ctx): t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F32, 2, 3, 4) a = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_f32(self, ctx): t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F32, 2, 3, 4, 5) a = np.arange(2 * 3 * 4 * 5, dtype=np.float32).reshape((2, 3, 4, 5)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_n_f32(self, ctx): dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash pdims = ffi.new('int64_t[]', len(dims)) for i, d in enumerate(dims): pdims[i] = d t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_F32, len(dims), pdims) a = np.arange(np.prod(dims), dtype=np.float32).reshape(tuple(pdims)) self._test_copy_np_to_ggml(a, t) # F16 def test_copy_np_to_ggml_1d_f16(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F16, 10) a = np.arange(10, dtype=np.float16) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_2d_f16(self, ctx): t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F16, 2, 3) a = np.arange(2 * 3, dtype=np.float16).reshape((2, 3)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_3d_f16(self, ctx): t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F16, 2, 3, 4) a = np.arange(2 * 3 * 4, dtype=np.float16).reshape((2, 3, 4)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_f16(self, ctx): t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F16, 2, 3, 4, 5) a = np.arange(2 * 3 * 4 * 5, dtype=np.float16).reshape((2, 3, 4, 5)) self._test_copy_np_to_ggml(a, t) def test_copy_np_to_ggml_4d_n_f16(self, ctx): dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash pdims = ffi.new('int64_t[]', len(dims)) for i, d in enumerate(dims): pdims[i] = d t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_F16, len(dims), pdims) a = np.arange(np.prod(dims), dtype=np.float16).reshape(tuple(pdims)) self._test_copy_np_to_ggml(a, t) # Mismatching shapes def test_copy_mismatching_shapes_1d(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) a = np.arange(10, dtype=np.float32) copy(a, t) # OK a = a.reshape((5, 2)) with raises(AssertionError): copy(a, t) with raises(AssertionError): copy(t, a) def test_copy_mismatching_shapes_2d(self, ctx): t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F32, 2, 3) a = np.arange(6, dtype=np.float32) copy(a.reshape((2, 3)), t) # OK a = a.reshape((3, 2)) with raises(AssertionError): copy(a, t) with raises(AssertionError): copy(t, a) def test_copy_mismatching_shapes_3d(self, ctx): t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F32, 2, 3, 4) a = np.arange(24, dtype=np.float32) copy(a.reshape((2, 3, 4)), t) # OK a = a.reshape((2, 4, 3)) with raises(AssertionError): copy(a, t) with raises(AssertionError): copy(t, a) def test_copy_mismatching_shapes_4d(self, ctx): t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F32, 2, 3, 4, 5) a = np.arange(24*5, dtype=np.float32) copy(a.reshape((2, 3, 4, 5)), t) # OK a = a.reshape((2, 3, 5, 4)) with raises(AssertionError): copy(a, t) with raises(AssertionError): copy(t, a) def test_copy_f16_to_f32(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 1) a = np.array([123.45], dtype=np.float16) copy(a, t) np.testing.assert_allclose(lib.ggml_get_f32_1d(t, 0), 123.45, rtol=1e-3) def test_copy_f32_to_f16(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F16, 1) a = np.array([123.45], dtype=np.float32) copy(a, t) np.testing.assert_allclose(lib.ggml_get_f32_1d(t, 0), 123.45, rtol=1e-3) def test_copy_f16_to_Q5_K(self, ctx): n = 256 t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) a = np.arange(n, dtype=np.float16) copy(a, t) np.testing.assert_allclose(a, numpy(t, allow_copy=True), rtol=0.05) def test_copy_Q5_K_to_f16(self, ctx): n = 256 t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) copy(np.arange(n, dtype=np.float32), t) a = np.arange(n, dtype=np.float16) copy(t, a) np.testing.assert_allclose(a, numpy(t, allow_copy=True), rtol=0.05) def test_copy_i16_f32_mismatching_types(self, ctx): t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 1) a = np.arange(1, dtype=np.int16) with raises(NotImplementedError): copy(a, t) with raises(NotImplementedError): copy(t, a) class TestTensorCopy: def test_copy_self(self, ctx): t = lib.ggml_new_i32(ctx, 42) copy(t, t) assert lib.ggml_get_i32_1d(t, 0) == 42 def test_copy_1d(self, ctx): t1 = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) t2 = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) a = np.arange(10, dtype=np.float32) copy(a, t1) copy(t1, t2) assert np.allclose(a, numpy(t2)) assert np.allclose(numpy(t1), numpy(t2)) class TestGraph: def test_add(self, ctx): n = 256 ta = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) tb = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) tsum = lib.ggml_add(ctx, ta, tb) assert tsum.type == lib.GGML_TYPE_F32 gf = ffi.new('struct ggml_cgraph*') lib.ggml_build_forward_expand(gf, tsum) a = np.arange(0, n, dtype=np.float32) b = np.arange(n, 0, -1, dtype=np.float32) copy(a, ta) copy(b, tb) lib.ggml_graph_compute_with_ctx(ctx, gf, 1) assert np.allclose(numpy(tsum, allow_copy=True), a + b) class TestQuantization: def test_quantized_add(self, ctx): n = 256 ta = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) tb = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) tsum = lib.ggml_add(ctx, ta, tb) assert tsum.type == lib.GGML_TYPE_Q5_K gf = ffi.new('struct ggml_cgraph*') lib.ggml_build_forward_expand(gf, tsum) a = np.arange(0, n, dtype=np.float32) b = np.arange(n, 0, -1, dtype=np.float32) copy(a, ta) copy(b, tb) lib.ggml_graph_compute_with_ctx(ctx, gf, 1) unquantized_sum = a + b sum = numpy(tsum, allow_copy=True) diff = np.linalg.norm(unquantized_sum - sum, np.inf) assert diff > 4 assert diff < 5 ggml-org-ggml-3678254/examples/sam/000077500000000000000000000000001512524704700167625ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/sam/CMakeLists.txt000066400000000000000000000004311512524704700215200ustar00rootroot00000000000000# # sam set(TEST_TARGET sam) add_executable(${TEST_TARGET} sam.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common) # # sam-quantize #set(TEST_TARGET sam-quantize) #add_executable(${TEST_TARGET} quantize.cpp) #target_link_libraries(${TEST_TARGET} PRIVATE ggml common) ggml-org-ggml-3678254/examples/sam/README.md000066400000000000000000000062311512524704700202430ustar00rootroot00000000000000# SAM.cpp Inference of Meta's [Segment Anything Model](https://github.com/facebookresearch/segment-anything/) in pure C/C++ ## Description The example currently supports only the [ViT-B SAM model checkpoint](https://huggingface.co/facebook/sam-vit-base). ## Next steps - [X] Reduce memory usage by utilizing the new ggml-alloc - [X] Remove redundant graph nodes - [ ] Make inference faster - [X] Fix the difference in output masks compared to the PyTorch implementation - [X] Filter masks based on stability score - [ ] Add support for user input - [ ] Support F16 for heavy F32 ops - [ ] Test quantization - [X] Support bigger model checkpoints - [ ] GPU support ## Quick start Setup Python and build examples according to main README. ```bash # Download PTH model wget -P examples/sam/ https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth # Convert PTH model to ggml python examples/sam/convert-pth-to-ggml.py examples/sam/sam_vit_b_01ec64.pth examples/sam/ 1 # run inference ./bin/sam -t 16 -i ../examples/sam/example.jpg -m ../examples/sam/ggml-model-f16.bin ``` ## Downloading and converting the model checkpoints You can download a [model checkpoint](https://github.com/facebookresearch/segment-anything/tree/main#model-checkpoints) and convert it to `ggml` format using the script `convert-pth-to-ggml.py`: ## Example output on M2 Ultra ``` $ ▶ make -j sam && time ./bin/sam -t 8 -i img.jpg [ 28%] Built target common [ 71%] Built target ggml [100%] Built target sam main: seed = 1693224265 main: loaded image 'img.jpg' (680 x 453) sam_image_preprocess: scale = 0.664062 main: preprocessed image (1024 x 1024) sam_model_load: loading model from 'models/sam-vit-b/ggml-model-f16.bin' - please wait ... sam_model_load: n_enc_state = 768 sam_model_load: n_enc_layer = 12 sam_model_load: n_enc_head = 12 sam_model_load: n_enc_out_chans = 256 sam_model_load: n_pt_embd = 4 sam_model_load: ftype = 1 sam_model_load: qntvr = 0 operator(): ggml ctx size = 202.32 MB sam_model_load: ...................................... done sam_model_load: model size = 185.05 MB / num tensors = 304 embd_img dims: 64 64 256 1 f32 First & Last 10 elements: -0.05117 -0.06408 -0.07154 -0.06991 -0.07212 -0.07690 -0.07508 -0.07281 -0.07383 -0.06779 0.01589 0.01775 0.02250 0.01675 0.01766 0.01661 0.01811 0.02051 0.02103 0.03382 sum: 12736.272313 Skipping mask 0 with iou 0.705935 below threshold 0.880000 Skipping mask 1 with iou 0.762136 below threshold 0.880000 Mask 2: iou = 0.947081, stability_score = 0.955437, bbox (371, 436), (144, 168) main: load time = 51.28 ms main: total time = 2047.49 ms real 0m2.068s user 0m16.343s sys 0m0.214s ``` Input point is (414.375, 162.796875) (currently hardcoded) Input image: ![llamas](https://user-images.githubusercontent.com/8558655/261301565-37b7bf4b-bf91-40cf-8ec1-1532316e1612.jpg) Output mask (mask_out_2.png in build folder): ![mask_glasses](https://user-images.githubusercontent.com/8558655/263706800-47eeea30-1457-4c87-938b-8f11536c5aa7.png) ## References - [ggml](https://github.com/ggerganov/ggml) - [SAM](https://segment-anything.com/) - [SAM demo](https://segment-anything.com/demo) ggml-org-ggml-3678254/examples/sam/convert-pth-to-ggml.py000066400000000000000000000075061512524704700231610ustar00rootroot00000000000000# Convert a SAM model checkpoint to a ggml compatible file # import sys import torch import struct import numpy as np if len(sys.argv) < 3: print("Usage: convert-pth-to-ggml.py file-model dir-output [ftype]\n") print(" ftype == 0 -> float32") print(" ftype == 1 -> float16") sys.exit(1) # output in the same directory as the model fname_model = sys.argv[1] dir_out = sys.argv[2] fname_out = dir_out + "/ggml-model.bin" # possible data types # ftype == 0 -> float32 # ftype == 1 -> float16 # # map from ftype to string ftype_str = ["f32", "f16"] ftype = 1 if len(sys.argv) > 3: ftype = int(sys.argv[3]) if ftype < 0 or ftype > 1: print("Invalid ftype: " + str(ftype)) sys.exit(1) fname_out = fname_out.replace(".bin", "-" + ftype_str[ftype] + ".bin") # Default params are set to sam_vit_b checkpoint n_enc_state = 768 n_enc_layers = 12 n_enc_heads = 12 n_enc_out_chans = 256 n_pt_embd = 4 model = torch.load(fname_model, map_location="cpu") for k, v in model.items(): print(k, v.shape) if k == "image_encoder.blocks.0.norm1.weight": n_enc_state = v.shape[0] if n_enc_state == 1024: # sam_vit_l n_enc_layers = 24 n_enc_heads = 16 elif n_enc_state == 1280: # sam_vit_h n_enc_layers = 32 n_enc_heads = 16 hparams = { "n_enc_state": n_enc_state, "n_enc_layers": n_enc_layers, "n_enc_heads": n_enc_heads, "n_enc_out_chans": n_enc_out_chans, "n_pt_embd": n_pt_embd, } print(hparams) for k, v in model.items(): print(k, v.shape) #exit() #code.interact(local=locals()) fout = open(fname_out, "wb") fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex fout.write(struct.pack("i", hparams["n_enc_state"])) fout.write(struct.pack("i", hparams["n_enc_layers"])) fout.write(struct.pack("i", hparams["n_enc_heads"])) fout.write(struct.pack("i", hparams["n_enc_out_chans"])) fout.write(struct.pack("i", hparams["n_pt_embd"])) fout.write(struct.pack("i", ftype)) for k, v in model.items(): name = k shape = v.shape if name[:19] == "prompt_encoder.mask": continue print("Processing variable: " + name + " with shape: ", shape, " and type: ", v.dtype) #data = tf.train.load_variable(dir_model, name).squeeze() #data = v.numpy().squeeze() data = v.numpy() n_dims = len(data.shape) # for efficiency - transpose some matrices # "model/h.*/attn/c_attn/w" # "model/h.*/attn/c_proj/w" # "model/h.*/mlp/c_fc/w" # "model/h.*/mlp/c_proj/w" #if name[-14:] == "/attn/c_attn/w" or \ # name[-14:] == "/attn/c_proj/w" or \ # name[-11:] == "/mlp/c_fc/w" or \ # name[-13:] == "/mlp/c_proj/w": # print(" Transposing") # data = data.transpose() dshape = data.shape # default type is fp16 ftype_cur = 1 if ftype == 0 or n_dims == 1 or \ name == "image_encoder.pos_embed" or \ name.startswith("prompt_encoder") or \ name.startswith("mask_decoder.iou_token") or \ name.startswith("mask_decoder.mask_tokens"): print(" Converting to float32") data = data.astype(np.float32) ftype_cur = 0 else: print(" Converting to float16") data = data.astype(np.float16) # reshape the 1D bias into a 4D tensor so we can use ggml_repeat # keep it in F32 since the data is small if name == "image_encoder.patch_embed.proj.bias": data = data.reshape(1, data.shape[0], 1, 1) n_dims = len(data.shape) dshape = data.shape print(" New shape: ", dshape) # header str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): fout.write(struct.pack("i", dshape[n_dims - 1 - i])) fout.write(str) # data data.tofile(fout) fout.close() print("Done. Output file: " + fname_out) print("") ggml-org-ggml-3678254/examples/sam/example.jpg000066400000000000000000002326041512524704700211260ustar00rootroot00000000000000JFIFC  !"$"$C" J !1AQ"aq2B#R3b$r񂒢%CSc4sU5!1AQ"aq2#34B$ ?ժբu+j@@ [ClV[6 Eek& `5ڷ@vchQ ;d bD5 kҲi@+`+t4;Vo[ lv P4[ƫ1Df(&Y f(Qb@Qb)m')]$PdPiBhHh~*0(R(TR(PⴢA@ *51G (PFmQ( FDb4`V4(h/4{k`SAh};@CԦZe]##8$R.P!(3g$z0?JisOq e2;{Ed:A<?Zu?VFۅvטa6Ͻ(Xc|drʨrI=E)gr%`VGwL:1[fT`2khC E) Lf(H(fS*RpHҀj|Bmu.@ېۉS8IUop .P4Ҙ.p u@MjɈϊFmR;;O,S%R|!ؑdTsQk y'E 1< vid4p/OSUo qYhxXq8rBZ*_fQVI/fM%m$rqȢy~]K]FNg{b3C0=^AwȒZ )D"T9([ҤRRW8ϯ4n$ Kcq[܏oBtS:52>Ӑ.W-oMvڑ< ZP8J yR;08"a@˷uI,g}'={)<#xYSr=fEei6G  $Lc9OioI=W <'f#8ۃ*j9l$R$T|lP@!_&E.“a@,(a@ )R(=(/J)B((Hm+ڈvDlQQ@(VR(jҊQEm(*6QX FFb QG4bQb-L݁ 0iEZ(h 0@V0)@(D`@V`Vh  ڠ> x fnm;ݛhџ@Hϸm!czn+q+o=Zzk1gyhΧʕXO`y^x!SȏPE YA8 +pG$_w O4M1y#+uAY1da v5-yKB) N6oQFd+XG=~][I'xhbL㍅ۊ+c( DuE ncI p&lb=q1`>e BuI%c##{>M$߬.A%_ cթ=A ޔI)_' X$ 9y{|O4|I} rkZݴ{4*0N:>k FhEއ9KHf\ok% 1!y$cE$yuӮϮKS:G!CF}N9>)޳]#M,`?#dpc8}*5}'GvG瑞}lzѳzyU8m&]KV>Di5Xq~;Ug9t 1:a7?g U$U"$PɥH"4H,(Ȃj3ך1) M#< 1RJڹq:?(H>,~d*/zw[|3PoOqLfY.osjZ1$[Tr=2kǞL3F̡'Gk}jGR~.&syMYXol육ATM8edeQbDD?Һ#tە ګ?^S@/gO;|g 2ĂNw'{/{umö {gpIіgwл9rJ0}w%OB0 )\+QBE4LE~zp,(hXRl)r(PPhNh#e8ڶ(QB E,E*J)EJ``V'ڀ9,`gHc.vڻc*X0 A io&fȸN}=LkfŦ\ݛbJH2$pqq]<1 ڍ +@v#mæ6>ƜVa#$:W`|k8mEIWdP" 7`2>$ʳ7gopQ;C+1!AڣWI>GQ$`>U4NG ]u`#v?/%uU;sͺ v?ޒ m]QQ [J;f2(`=?LԜ TkX4@R0KDܙ-32' vK(#^S-OB ?[ iow[]ƅcQ:\t兝g6@g>(/ Hu|*zEjTEڗ?aTdܳwʷ<O55EY]#݃9u}3wbd/H I_%ȹ1q})).]S9ǡZF亞+`:Hd-蓪TKڟ.01HXGuvuON¾R !2o>Z|`Ss/p@4X{ Ѽ{sdKRc̲+0s>b |` {TwXbGq-K ;g4kWk+q/o_%iDw,AkGsX_uoFY3<2ݽ@;H s<ަ=޺ʂYX =5.qu}:n~ǡtH4 D +\~GږpҦhdPJz4Iȝ0ᜲ$d*9U;Ni t 4&9bx4 %{HGB]Kmh7Vcnp;\siڲFRZ1ѻ,=>dWeq&傕`>cƱmunRI"o1vwlYdo^Iו.21|`g=V݄qnY <}#8MOسt%_6qd"&Nc>AKL{%^"Kl{ToLBiwVY3)Y'mcʥ-\E=ڦ L}5 J,ApǷ>^\<=((#f@2(=H4a@•".@E&—aIҀ 2;Q J D UE QE(P V *E* EVsJc4 te l A#=V>Ll!ks#(uqϡgj%q}>ᧆXsP^6ܸFeYsk'1ʹSr))c6B<@9ޟTe4s.S>8Q~0;NR2bI#w:lR{暵I4^xQܑ$P]dQ#ΚI UNwkK}Cv;▹6,!Ӥy%٘i98 xsmuvvp4"uHفQOZۧnd[Gf,ۮSyG%Kq;p{_x 3ԥG@@HUfb ()t*]SNk%@82ߘZnD;pNg?KZ8K8'apOſA-Q.&2y/>?JjS5v4>3_6{K5Q< ql2;Q). %^^Eı$Brm>Q:˘A3Mw) aH JaӮ࿽on' Ҁ m=}g4$$S&: Iy;^I'1_H=Ze$a#b<qj7 ŕYTs2{Ql^SI!15NOjt܌}6$l#Ki$ttz=ϭ5>o]g>(|>=ss\I>j8e#l>!ǥC+%џ;^XeZzὅzK;]0%ٚfVirgf@#ϯ=MԐjVT0 o/X'uTWmbS}v_qEXrw?JzN^^c.&uL]HIoݲN;<ʁc=Gq]id;w' vbPHqcv)#OEve|7Ӛ1:5]D r=dhHOIRǒ?JgKk}!灌N4Ba$y,K 2W$ϖʪMFuy.ggXơkזK8m0_~O Rz_QYǫ.%Jd÷8,4r笊z߼vqvӽvD_ rQH#{P5?X/;AYp=cT"T{;&ȝ=pUڴ˂KƳox َ1>N)i}hKg"`Kx=+=]D;sW 1FioJsf!H'5puI4Rq-3}1j|LOq_I+^ Y S @FCq5X'Xx5Qg;QW6EZj( )BEDj%{Zt e(ȥYL bxnG;'nQMj+{r[~_,=-\IY i$H|֨^84q}h"bO<'>JXZ@%vXr~1~\.҆پh PrBͮ[Ѽ1͠?gl >X,_>EQ`~-S$έ$lm:QuM"3nȘ䤑SMMi,]]\Yϵ#${/яd&ҵO3K餶Xhs 8ݱ<)[P躽iv줉E>G8##^z)S험<9ҹz40bKcЎ2~H<1e`/Nj:N:%ͽŌ<$va؃v0-ij2CV @ ֈ"E8VQ$A(PPP(J)DBA@QIAH%PR =YPF KjpjB[Nt*3|Q>!F9SϯZ.ЌǥqQ麃ۮICWZ2JacU'aRx5$F1(Y$HYn%.nv5ԲRT梍IfIm!gP g D#xBxC\|{ A9Idԥ+ISy&w$',E Dj=7mRs/Lҋđ%>GkSuc,7/a}(Y{x[7[}̱BQipH5>z]r{8AՓbiknAli],}Ӂڑ-դkk+Qw/+w#.fKVO2K]"/JsAI[Ʊƨ*jҊ(1Fhv+R\I~VN(S^ϊvEhu8@{U>jX6Bf˚njvD7 "$28yjtou}JH涰7QD}ow`2ɵցnBcyd&Y.FbgҚU--0H¹c?\~xP=i(i}u JN ; X݉$?]Cni#Na"աAx~uSk3.m:=\K3>#Gx88a;if)7qϿ?WT@/dl%HXC8hppeW8 @*Jo4[NںJ}*[/U.V31[ Y%[ . ێ?OYnqøE]>pg[Z;S 攘r@x /#![n+q_]w=\TM9$ۊ'XI2-G::l[/0/Imeo#bA#F3u=壗S4'vЀGҢOW[\^FΟk1 51<_2*GYJyQ HU> An##Lqk]/[BNvs\+HR#n;Kx'7i|R@p>ךzF-oբY|i9vA<+~tC=],i,<ǻbw)8 |6SiZin@xkH,SmJC*X1vu׊ O$gtzR6yj1 y^ ,i*\}]JNe9->o?_Kjrj6'j5U?#H:^K3Pi2qWEƁ>cJÄ8xQ 'iL=TcpjU<Ꮔ*9#R{{>]q*p2yKbvMcAۦ*獰1/oMON]#,1p)Ziҙ-·uN3^kSY5ٝ$[-+1 .[i)@`9_]+Ȗ|RpP:>8bG.avb8|h`X4o$qJ,7LP3$=o>3>(6bc^JIx2Q L׊'k*呦/*hs/vH~= HE4ʉ|maak=*z(䛞G=iB(XTdV{BEi$E PJzPfҲ 2QXE(AZAJ@(E* *GS@5҈(R(QHjqڴ.Q6;QE)Ma\^׽7z,u2FN@?v붒mySL$pIڟkKPgdD\)dLfĎku tʺ24q(錰wO|XD˨(>Ǻ^O!\Ӱz(iUcczqNU}RmW: 7P,RFw|UګޙMK*aBDn+l?ַZ}mIy_&Z.)֙m|j]D&5# 8ԁ3]Z}/i<"&X6|.{3quOOtX5AjLcf N P>0;*t׹aE-^1m˿<,Mte#p=P{*笮mAhдRqc9?_NK뷺=.@98ގ״XWgNmql]ZBqcpOn֒N 2϶J7ݭ/R3Ą,.7>rq^ލش?RC4P)PîjM .@Hv"T kwjvfH|F ϫ]57l9C¢ QH޳՚KU+1U@ UwtD|pOZbVQ(v˵6P=OҝK{ggk6:uҋe̱0Ru>J7K'I,]y&KZ[:eٕ ?/er$U,8y]Nc%%zy ޿-~!UG+Gm;x#yfG.Y؜ԓa=2Ңd x<I@lp`*mLsqKVbPO8 *Q!2\Cwa1Ht %Zb>I7H:KV_-ǔݓٽ@+؞YۯL֩ լo 'wOptGf TH_>+$ܠ#EP=OX(udkXV)W./joݎ6 MEI$(ɒN|% ^M#3Y $sKy6f>M7&̙ ?.Y 2ru[א!PEWw=;2?k6?YIeݻWtֽ9ԁcK}=_g@.@ʷtSb;/ڭ\Cg{ąUyGt\G`͸q۷u_+^AofG0'#Z/ZS*u~UzMMsDNä4%~W zuμX1;ק?gä.:DjKi<KK(~vq/˚ ]צdPvM\\q^OM;Ct8 n{|Gη8Gr)[ãiRi׈w%|K  zz񟈳\_jbjNj~gelsߜ;zC].n:QHX > nf'# `|Lzi_Y`r|\I{;*QtZX!'R:z+ ; =[1"-$^[/DܥO>Cxܱ%(DV40a4mgTh!<דItܺukY\0!}pGpGzU-oSSwqO=k:̣~wj{cxٞi5]i۠`lI]~^Tr>,tʬ18 c{Wb`#Ҵ5,X4$QPTp=hw@|=7jR3]4pEF'KHO^Y,VX.0vm8 WnKPJ̃!ң=YqZ1%c8 y߄UkcYI#Wk66Ix 񂯆OȚp:FS"xHb=㒀mMl/J "V&_2)Ucc#Gj0]o8Y PR.03I$ͩ -1Ay'w&c?zt/TX.jk{tQظ O'`sOFi&vA伴ѴIHF)Q5oxP-WuOQjAu귍uu=N~ ;¹di.u}E qߝwNȷmrQcx.v8z*Tſҝ{}&c堐X*AHϾ+ea Y l`C0n:cmCnKUp2|%' 6ѵMSOda)Aj;%"VJD FEk 5J?JJCFE (M8ȬiQF( sBA@QZAJ( QZAF&BQJ(QFCQF(/tζn4cNwm˔pOO#> O=J-Jo'POmݎ8Ͽ _DG:fDkXbXBOd A"ǃiz?tԴ_ɺ\XsN\dql+h:X4ہKK:t>]~YuΣo]FɶY.En;LtUV[+8HjzRSUͦ rJUW棅?amOP;_o<WЮt}-UCO?t r sWxafi K s  u5!HBggo8~41uF-% 7`}"U>$+VM~Owk&&P;m<xMl!"y* /oi qρW'w=ųD,Zfݹs+y9^7ON9srkq6k^,zgL]ndqED;s2ݜ z f~}KB - Ӽ6C۞j| +T`P8D+0v;Q'c@]VKs!e*G+?qPI7[3[L K\ӡB\N;^[8E*9p}qQI.aH j ;><3zu%7 z}+ޑi6e0Gvf$j-' uƹkQ}} G_KQ{{폦jy XOߐSċk?:u].кto iv\\ZdH?lV;;xuY$(B0rJL`o5\&FJ=HS-䱔C{5BW³7`~t9~Gu KПofޡzc#;'}*S^6t?}w̍ @淺,]I47aHn9'Bu_NuGKy x*zA<#-/t]VWOYdFb\wɮ<:fu4]v!>]yzcw> ۹-ͽ_"4ҝM.j0#g l.vh&2 sjڲK"ZBTc]dxnIWf<ϨAX29U=M45g6&y$cX'k"˪{[yT[`y,Gfit7L7a*#)HEQsoq~H{wq-dҷNמ=J([kf_H(y]%I* A7RI,b6mT~S_[E`-m9"ҤD`{Б/zJV[ qutnq2=V Ҥf5M bƛ/s*zAO!52i9tx'1)N^M81CPxf8'}j-^"B2sBsnvz60:֝гK:uSLӕY]ERTu8MͿN]}WkVD}ZFebr=aT'PhڏJG 14nG ;#ӣn\~wMܐ^XjVo$Ln^r>*Ե= NH3߇wvgFj m}GԎڄqc2=3]:EzO)ifr=ǡӚX<0ADw}OTz秴k{hF'+w$)%SvUdRP7z(M 4 К&Bh=(PfJYKQ(R(R(RPSFQJQF QEUEl 5B(ր7Qn̶紇J2utn*CʫȖJW-n׍ I=XgS鎊1xS>ӨMRp@IHڼrƊ;`~pq`<;Ni}s\Eu;N#x!@>""x'}"tc}V˙3ې< tc7tKFx-lk!7+;XwN>i5ƥ! g0ᙿۚoEuGLz^ b}`s|`t\2Z]YHmq;=>u xz*)uķM'l@*lIKv+ҝ7f,:~At^F==M,~l=9/j5Nƍj*?#ьRD;R6Z#bn揥u1X߸?ʡ'5(91jwVy$PX<.yO&5`VhB2vEn[OAcco*4>z%d)} 0lޘh-~qug4˔LR{i}Q<2Q6_X\ˣ[\L*H)$4~mcc}gUދwg,vH{`)ʏNSUIג- I Gw[uvL W}0 >C>?bv.W>qٮ]p_nINgÛ^.ӭcP0XzTo4ޖN RBϧ8^zricGyC߂2rꎨ_sp<##rn"8gO&4SoϨofY!n`= >TIX6ڠZ)+( >2Ѯ>XnOZնt9%64`CޣS tj_iXm.I6LaUrP?tϵZuM-&8dΫiX,dSpEiKYV7mnb`0_ߑ'$4=ݜWvSqo*9a`]4pG&҃ҏ҄ЀzPzR8mOH_}tqMm(!n7ȕ u߯f/1aی0U;WM W55;H̘ }k}_ڋK6%NzKۈQ䵻1 5ܿ }7v^D;mI"JIlFl␼r8i'޽@1Ҽ0XEmGν"cI[!$ji1ڌv%p1ڔb~*D?+j!B;QZ`Qӈ׮W ǽEgc 6>C7j'V w$#4ɉʹWd8r+=KXmd̄aWIAGNdm)_ yͳq׭'Im?mR$d q[q4^y<-Q c1#2{wKXevA>)vm5~ζ\{ZK(;#ۜ >fےs*SӞ"jg>j eKC vy ԼIdS[uVN!=Z*7 9F=ijiaJ-<3+1X+3J4 m$,h,>di&.]kmld(c&gڏPܶ/ .<{6)XsZ~$x-q\i f#!F0#$$m{IC mN8'"0iި.c2&;8TAc w90v%4V6)rާvR}RYp<}~:\9 g%Q5s)E6jzw#M0mJѰpQc_\eܾ|HlqV9 GߓIٔ`}fpjQ{jp*k4}Jf/ MCn']xV{? ܨH{~gj$(T Ob$`/xZĎ]c`~#\qYAA٭XKir > d䩃cp-U1?WuΥ>q: {;m!8H8<EWk#8$ǟsߌҺgR]iRڐ#ǹIAlwsRWӔ)۬Z^?< ^:MjW-FɎ~Ө+]3:?RXI{ys<@>PAp>vi?w" @` 9HE>=VSGNX1[Y%,FUgee}FVkveO֣^u/ɢ| -%^A)ZXCGA@Z44|E ,c.C)R8#Ba}B䵲՟bkD1ܯ4lUǡv`pA8+:C%žRx*J>W#|v>v }~uYMr~[I,{yiUTv'>Ĭ 0fAVoV5 `69`;oZkr^ǙMՈ\9ɗߧד*%NԟspW[zj,9\vSVUX.y wERIidTJ(hjkbv ⶷fHdve$h>cAS%F?u .ꙵw[/|i;:1c=MV)-^ }(lM*DWp`' #nEIg]Yvrm6>pNx?ZϺ ϓwfh{jbdi9Ig v  j6*#SqF1g1NG|}aUC|S2VB'rl}HMVۛ˅I9zUshB[Kc#9Ezzua`v񟙥.":8zfQ2G,''*K0'U$l|UZ[N$ R}JuvGܟjZu/wm@?28=A"]v=s}{Nωp w;kͺ. F"F?<8#fx9Ӿ!ut7N &ԤBCdas0sķ=}jŊi+MF[IYr{OLX f5ŨGS ̲"v2@ǩ'< /"ifȞa.N ԦKw/id-|ͷVbaN ~Z]:PQmrmUac+:mh0@ }œm|3f2)|x.y*Б2)Vx]l>RW̆K0ď(UPx7^.]c I8(g.q*6 )bēqԯK=;uQhtk-u;k;_>T|c޻"V* ,SfdJ1awwp[H|[U!HHl 31kw*Ciw*LѴH `g#' aMVsm$s+Љ<{)ۨuHX!k'2H Tx'sުaj޻`xSiDEwm!`FHvrU>5tЬ".o"2ve?/B9AWHbFgE]q'_~>_S^S fI (g S=}Vi{ z8/R?%CFGU[н4eTMkQ.I.?j1KVJt vr/fFò+=u\CǠ͐"c8b >]YWm yw?HUi'UW/s}{AG/oj'cq8v=&znw푤`,Tg+wg;ppNO#?zû}њ\óڡb1 SPID^Q|ؗJϮYdW7]ʐT?iZ ;{K̅F22gMAJ[ @Zu|#$L0ԽIoӅRBq3d{V/k.dM}Xz =A8>ԑFk/'Sށ_uZ H}_GK5p:Z{m{g0W#?.1EZnԣP54Q&jJJމM Ph}P٬QJ Q(iڕQ@w4a-(;%( 5)҉QBj(c(;PDf`I'r}5dP3TNzSt,jY;'rH}Wy50r2s6{xRjxe$+gEi(nilkIOґh87M!>T@z$Mʧ;s EyCFqɮo::||`+sPkA%mcd`GŻ0=J(wh:vbN3E{&ߊvs]rv*IGpA5ڀ "'8$p}s^uێ;/%2Y|%(l:V+E 4Qh ةFTzUhZD;SX;SOTkwNp&}IsAK0Yn"cN{"n;}_dQkkK ` (<, Np1MU\dW$NY5ŵk=hEvUP#qPR{p/-3F67r c%OlJYoŬF<Z@*iM R>iVFH`pH`8Np1]WJ=sPL[Zvci#*~Àp>=3Or.@WdBH \ݙw;`҉05x<1Q`Jܻ|Pn[E.nc|ZLF |Sl ʘXw%6*w=ڕ"hm~뻞_@u*&A%y\:e| (Aw.8CRO&xWvK!`= Bzz/X G83'fu+\d;Ĥc ]>\dy7)m;D e_mm w.նn{ "kJ0v1B}vsŽ j2|#Q]3SThhB۪ᜦYHQq=n [~X[~Jݝ_C:0vbs޻:uUz]+ZbtFc 1* `Q_.֐+7G1l]/Ӻ2 ]!qٓ>>nN3hc-F~J+ PWctm%#r68p?0)ӡzNe97K/`~h=s/<4$r$D0Gm{Terk} ORPht9+#Q]'O&s/0Wf|&EνhU O}Gio[!C*=Wӓ,kVku!k{6H#vPn}Aa-/EI.3#vP׹5pAk*!WUI_U)>%R3wsv%q( )cs6NHxS(K<0Qy!W6쎶(qH ^ߕr̅#98>¥GoᙾQHM{@wΙB9JLn05K(?-^;S==OPGow<ɮ&`;@ݍDlck$j|E'7Դ655Fo;ƆCG%O5҆KZ"c wWtŰzzo2̨iQF,y&51~#r(xc;B_s^5mXK>닫;H v{~g&$z?g/w F#͐́Lgkj=4=`ot/uPHKcH۾}yVVd@ȕvpZԽkz͝#;}v]KghftQG s40'>xGfJ2E\!Ͻ5,͒1}}v Kk7nGaF:jY[^R:xA|`GG~?cץ|IgG^oMHmI帷,A6BK28'q^>"] uҳhh*UN SF7{0orf?OfN}BeF=*Զ츍Cqڦ?j3 \G4`TG,3Fog,!`M=poekuqq0XR7؞יu{5+./ be8lN]:lkԲxYvp@ʟ?JgC|w[ۉ#<}Em/" ˸ UN=s>7{NNbTf6d1sL9E[2 A|ҵ+1+?_J>W!ŬnφELbh)@^֐7E/ gjσ+ vy qO7R- mAl{^{fN{96cgK1N2eU$%BM>M ׺S<_0=87WDLX^,P *k;{aK%ɹc.L2vݻ)OO%Y|&c܌{KzBYXY^Ydr,; dϖ\0|KOJr_VR[YzZc7dGV GsE|\5B1lo$HQ;w4:YQ܂ND' yێ+%ѭ`x&FrOl6 $/^jO}fm̬dvwÂa,gkϯ_鐇4%#np}0OcއjZJtOoetb]̣#?*pNyW-#մ{mN#Rd;̚'\d_(ꆢՔQë/c-kڣVQĐ0Ve?s$FGҹ?"IH6Lr8l^F]$KkΏ BiTb! 6X\zZɿ'Gr}o䜾lH"-m vQOh:~XR- s~`Nqd雔/.±weȸk4 UXKz*jo[ ./R^/;e6un3CPP ZsحF>cT\CMbkIr0'hi[`<.s;i5.X}~r7R,cחlzZO7eyd>IumvB@-"I搻Gq^[n.!FrQ8ߐ6$r[jwSHvBC`6qFO<\=Y J C?1#q$s^pn_L(Q\\\uK.ӣ!@|Ozu~ҷw0lW K20 QԚǧ-tmC3 F@gpmV\/%v.@9vY*No-ˁ%N@<+:N_Zy/<0I0˰}dcS OԜvǾ۞-]ddf϶8*N =O W8bqyئ 2[)Bl|pon-lX>w#" F2g 9lr;5jZ$[z92 gfi&+V@sx9?WYۏO__GsxWo4וXY~?l~TMzI^}USJ.m5k=ڜ4pZjI>BRit`q9< i, vjN+YoozD^~Xo3"1ul$K13>㚸e MI^k屍OH8_EP}EVDj-g;6$|VZ.gukmb2oW>{^y@ P;eGtS".o#qWNp4?UƌBgʱ_bH#+}WH1A>/-2#Gv88n*'v˳8Ͼ1}9.d2p] pkV˻jj8)#F3H'9'qY$A;wt5ac%ܶu6gYOdQ]vn}>S2#MZӊǒGΝ ~mik1ivce. u{V/wo]Ʋyv5?Z_\\Gc66@k+jPGb"K*)zoZKd%#[/:E:[3]K4BV\J5M^]kQHa|d­?4Ps^:][܇"k0>f1\ņ5t d;ÃwZTi8cW9ָG UL*6R;~#\MF$I6pӹg+6n¤=׊\޷}ۣ,^{Vwb_~O20Ѫ6,l @,H?*fwoܩ&񽐎?:uM]Q]-! n sHiu|1mZ{r9?aлaMֵntV_/M$AUYsRQVIu֟Ֆ}*,C"vP{s^>ꮹz]Km3h0bF9~@q^}MK<<ꋞ,:L ldݤڏٰ98 kad%M(G1{(5qxtbX% 1ス,<%ж@"A?4AeU#"9Ӂ#t.{{g> qzs88XaUլ#7I('8nPkιb:Z&̈́GPc`=f\isRI>)_1}Fu-M5.(wdI EK/f\=Aejp]K$ʩos|W?9$ |S+{InX˜,AܒI98$5Ѯ뗶{԰ݭS3A|!.<6;>$9nNq|KfMuQWcGHkhHij(A'ߏvh}o`u;l!+ƞS629#|Bc^Bd`p3ǯ?ҢW]zX Ն3Pn:GSke`Ɂ5 b'`=U -U䢛^Q}*NFc>n-{fRHo=>#L6I5|f6&'HAP%H qǸyAǍzms&Fb->@<2e#l$Hn[767<*v1:4wZޫ/+|g+I;6- -_ kL#C mAQ]G杻zsM?Fu$Xwǎ˜+;)oJ*УT|rnu;ˌؿ@;*j`/ \ORtgjR z> g֭aoo5k..?,` v9?֪aY?+YZC4֖F8/.U\rs@eA oSr77~G'w[ضoE(--mJigbApr@-ڑyhI3dgdI'p3Tmu[#TYc|q؆ :'K{vOk؎4s)v۞pFq8^57HOBjt#,ʅ?E F0|vU圶)QGpqp1)qH2'Sp$dp9,D|"ێʡnmJԁ<;jt;O&ITR۱ߎ22J#/_۰'ý|`ѝtlaJ _bdG`܅2FG^Εikm-\Gs{h;4Jp?*"9zW ndc<`e@ӌ =:W_wzq?'oϽX;@RϠ#l`gF^~#F矙=+yr=6H 2fpXTl*샩{KBho)369f(Ju_3KWdpehaINWBl(=)R9 VȬ4hGzQ4DlR@4 EFښXO4dgXļBw18K:XֵEk;e>l6 -Lц [' A܌ZZh;N$y3H$ j7ޓqi$Bo]%ӥ j;iғ܆۹Td`{|Cbȷe},1dقNѻp:ZX VPcse_;rv{zӯ *{wYŊ0Y@~팞;W5~YB^qr5t:XŧUedSS^,.4׵ 7vY$-d1ǯkGM_|_g۠ xTV8m…Lm 5I:NYWWYD g;N9b6⫒yy|\{?+vt p~gr92}j;U3H[t2C{ }P^J&ݨoYA&vnm,5TtFE~o.] rppG.BC< .ʎY')UK3]Y cUM)T7MϞF9ljKuӋaK'˙6ͻ%Æ{ڭ5륬il**8*#\'ՏaVjzMEҊ}DZ%] ba{I4ab Y 龐^᱾浆IfFYHK|!FT8 w<;0tLFXmWbJ qҪ_zZ^ B9Y`, UW.3 ̓5}7\lKMw4{۴# de'bز ͧS.H :ǻjrq!;O6zƣtNO:„TI+eø gted(q/1ݙ%3d9,I$=-<P `;WNIY\GIH:4 K@Ov'I(uMG~A)cL\ʹ~F #70ت3]Dӈ|˗;a!= *{| zpv8y~ҭ^.8e@Ӹl)Vn9ȧrE*\7VHWHU$: a0<$`@:N 雙$ a~1a9d >:ǵv(q7+~ #ӶyTw "d2S^xСu}뢬崽y',2ѱ:?[$זs>ӏ( 2j6Ji|YeAӰ'ew؞)Spcb xeٹ (+uY#qXR0dwA8p+ӷ@|̨O O#Qv?,Kٞa;~q-mRB ;*+D,qUbDmܡϯҡ+[})o9h|ˉ1\q78|V*H[u5vchԽ݆ 2m *tf=,*m<jcu|?֦69R@*蜗iuѳS[E,fveM1~gN׭wE$И4l#$MkƜ<-~>@'hZC)r7~x9Q#-oY+ ĂI$n棷.s0IOh FR7/9a錍Qɘ'SoQwYڼvHz5jH @ >VpLAc(]#PϜ*?!IxEj6М>P>Fu(t⾕q& %JcCF6} ?*wWܽn\zsC康kYZ7r"+_MջֲbiJuė:][ۣD/@ y ڸ8Q.ouXȊ͕W'F3~di>W 9QUqB'G⺂W^`eiyp1cto*>35:jki$ǑM6]P)8Ts#+2]|_Ukoq{\>L*[27;v_^ƩwDe3::q c'ٻfeoTZ;ިq}ǸXgYAst]7JbΩ*C[3J- aIqAztw0hDrew) $s3 JS]M6]-'HT ώ*|fOYI)NW#},"E6) o̐1c>43 OA83)$ݟ[ɵ F{鿾ŏ$UxFNF)%oe ]G(B(o08wfV75!ؼ;T&hGJ&.&y-2{v|UȗdJIOY[-rY8U\;Jir"43:4;hc}\Ɏ}7?܊譬3 qks 2y,ڛ3B BAUŕwIVVL4˖"0b`c;@VI~̫%QFFHSRG{sVQLHH}:%OqXi|ǃ.5_-i_m gM9!FaP{6`um=z="\bXG98$ 4]Zh84N3 ䷁H$ DOʤ5W3թle#ݣ'n䌫ܜƯCIS'PB;{߭:4=;կS[Mբ"Iw"1bRr[<ĚI-?6`A!PLJ{z(el2cw4"ZA(hk izs-lཬS? cv_^_"u]3~&=݁'5n;XY"$?3)YC)G[: job?M=CߌOM}ajVul<.8(}y`F*Gœ?ǯ[&BHGfujXh[qPI!rm W#E2@U߀r=Gz1max0Z.JOn|`F1};o%OJZ@+ \rБcnWOJuf`B"Y(˂PwjTan^ebPmdKH.L~TnNCiMrXJlTǧbmn턐]ZNzA`UYhV`-4MB,noqڒ4c~~I?ʤ괗ғ_|CMd-N^噽TlIӴ[YqgvrH=XJNR"_,?fY104Fu${o.F7Fv>}}8OtAh ڭs kIhh<,hH?x5NFl&ӣ}w Pu0ݣv 8a-y&օK+|$RI$; :XۦfpO+Ϯ{TmTa7^e|r-B鑶YI)[[yP&sKxԟ6R[C%s%hq`S>Zh76 #1PpT ?LcNz|=Aat̖(g܌tQjkekpHM) >#+€ǾiS''XZ c ؓCL'w=CIAD*RX\zTYI-$>]1In/\;ʁ&,BWZtĚխ݌@F &ckAOýj >F-Y(>H@V&*Ccs7Z2K6"i`@Z62Dr@F])bG ֺ[` RjY[jڵݭDaPY>upܐ2DW.-"O¸#@z 渴 l˓%=؀79~S i5_B`\`S[YrPKnd-qťiR_EbcD9. ~^^i]WczJ#Uy$X,O8[ !K Y O F|4-'1a#y{xTj~S/gZOKJ{br|~VjZM̸\Fd! Nw_He+ f'<`4۬j?wF6qN ǭGfy#XrGpG\w2[VǷ><3|-3^qPFK\{=uE/V[hV ֑@\JXTA5mjw2;['`?{^}j,$۝ jպ:Xv{rj|?ڽJb/U:Wnqtfo}V7qftW ۉaxcl)FbR -W TE-:',LRM?Sckt]2Q.MQ<;3\O|<)oC+XXݔƿ=GzxXWC1Q,JyDywyw?;8Q̏C9b!ԗ6S W u-E ޠzsǿɩ;M!cnB?Ɓʁm =A,zOm1LrǖTA8e6mrMiKtt+mVO:cqq%\]s$6m|_sd.}VBۂ=F?߮*,Z}K[4,W+=翮jJG/|.p{w6y]Q$/:FqhCd GGr'@iioyČ4dJm=A^s^9Kxǹe[ f+%A5;e,N 1*xqFTvHb.\G9 W0.ysc!8XBȼ(e#{o&v-6ɅV8ژS׾0J%MnS}Fn!Xی#13}32=͕͜KDyv5ōݻMp$Tn6O#ҥ#tH/i3Ǭyz=dp@y.'v]99MTbe+glܧǂ?[:I|ddg~(p'.=>:/Z+8Mhw'F(s.X!atD|ܺLumpTb4r 1nqU^Ūkin} uklS Ǧ*R<5/[[0Pls܅O5֪ϒ`0cYXk(1J%'FƟWlկDC%g H V磓-8+֢}S ~}ϧS9 (>j&cmC]2?X{zCddH+mPZ,1@|Lo_ *xXqN[]$i$0J9$MZʲZya؃,}x 2x=5|Z0v.ӌUVij% &&6]ya\y!'-<=)~濎H1!\MĒ[ǥ5kK>{of}J'$Ot8lnl#( w&T ,)  '+5 G|YV.nm5IuFgsٕ3c{ & px|$9}xn#Rr0v2l ]hMrPH!e-CH(ڡDꌪ$JlИu<1INzݙeY38y%}9GC֜bm 'n9x1.к/.kHa\}G ,Dg gT.myykgʈ9G%gvxRnf#[(FqZVb]:J Ԯ.nẓw8UZ;T;!oUk. #,#k=x/< YNY7I݌U='hw_L-;GQӯ$6KSdv 6-6yb+s2xdd v"< U4VUi^=e>ç:2X#p 'v;Eagc%#6L 4N\c9q j]=7Eߜ#Q(F/;9O6FS Wb2FKam><<<.Ӑ+I [FYcD&pH2^8bLOYz:5ܖ}Vy6vkbCc~鋝su~Or㏺] v\'kdUX_35ҝk:'2hbp s Ǔp}KukaԚNogmqjD C#O#5{tΫ=acpѤ 2pl*NA$w5`n ǖ pqvP-fkQeh =զ/!U*A?V{'#{SxKJԜ$|\lG* Z:Bxxԫ==?av/h_9讀5Y-qkj",)n"xmأ_!90Q/Nqi C ZU|+ F 1rXxLukEWx>q&5}DIJ+/ 4D`,ğ 8 7.^nnn6m-݉VB{\1l#zcQeh൅C30;q;ûMyľ Z,-d\(-G8*=iz~Q_/?e;Ў>JFDy<9 ~| QQ1wֺӆqHt8b˶o13n>H8fm51 =;o+}Zݎa,EEX6=qPn螢۷Y s#^E^w̲֗5;<8tthItkznIǞэ3z*@?5CKws'=2)Eg^jUZBH#2]w,IӓjG{QtHqjᡞ>0 䁜nR{I&2kc3\$lƘ Y$VWqpl[2 $e>kzڅ?c|1˾4eOtWIdvVQ턐ykG<+Ѻ= e2M)TVʡ*DŽܓ7)".ݙU.q'v2z4kI.ou)$O)gYB ryR+lg4mۂY׾=['O!A>GQ݅B8m:yv'88Άoz^JK$`I;pGcңp%?݌ʻmOZ>,X8P.e~imnA?*ZO֩jPw ~}J #1jО(q#05Y@RNG^{$W,W{D6-O''GHYc/!o\Cpe<=[ #MgQt}wpdyd'`ePzլwJ:'$>[1\n݉5-v l#wR+[?3&D'8@6ހH[cۻ7>ԓZ|{BI$l+)T`|@B9;o$)k,22Z࢒<@j>"v=qm˕{{_e;9$SL%%`鶿W80|#ГߏzV 쎧VUu| K98 8ԩK-2oo 'lEuLnZ Hh*`HA?.xgv%ylcH*]dc FxKlԫm2FEQ='Ťڽ؜N:f C #Xy[9fQA<37>G<F\ΖK"4#' &#e,w KM]pu9?+fI=9oRm,rI HK"|%'<-crw6,"hRP\Yq+V+-74l4j;! ɮ X̃vnۣ 琮r8[ ݲDҲxB#O59% $0@x/Q꫍JPj3aDs< n ,*G,mn-zvcWC>|zC@>IOPYpH$Gu@B>E\Ӧv Wۑz]| Q,ԅGj֣t2Rxj׶9܁*~L0ik-OZ؋xGlǧuKW"+=7MӞ3h$h[q?߽{q^+yVJdw5+ԴrW7_u}}xM7*Qv`/֑:.im1 洹fU+~2L-a] HqߔE 4-b3w\dx;ͫ hSGMljvgwkU{-ŭ5eY] XO18ʒ oTGt#G!kq ';ZՄz)uL@ H tQEiKxLr8n<{ *j?.3ܾmVs/6pXbu~rsb;׊hmo%PrEǝ lƀq94cko͜0B—ݏ)_À gNjSӲ}u5XX I!FOF zjmwMÝv<-gI CV5f,9O*? 2[kGkks#*y˽B3Du]a\m0*~ ~}čHzHCX^\JonGu㌒{0P1~(4۾YReg>b2@U>cW]+ ^-ܮ1]ZK|pC#/$\}~(KP:p\[BW=E韤GɅ'(Y |;H#6>%'zEJ+^wGvS ?$|}{UI& 5iu\tv HMO G'90aV7@b_\wϱMDmNO mWPI d{P9۸!xk/ڈ_Š ⸘$E gzUv0PVSs};kw"0\qW=SJD1$3_JX Xw֭o5O w'U&ɝ˞ή6'9 ~|ޟ4[=8Zi[SEK{60,Caq߿qH֒!xƩf5 ɂ0x=Zn"o0{'u̇i2p3٤8`c;IGǟmMAڡ0d}fMU{}FG ,;Ɏx).)DPdnH=J;^VӑU WG;yn—O4Ơ3a@"y`hퟩljEEr5:Xu-`i#{(G?h.4A9r@EMuޥpi)@P^Č`|Y\[ڌwp\X(c/%A2tv}j7V:mdl_j(W.k9W?(S Rh֚t '[{{vH^c$杶F f?,>I_$Y{\W5aSG%5-3φ|0 6Ĉbj:ޡ8LO#qᑁ˶k& 3md' 9-zl0o>klߴ sAz\Rs ?;>'rwekiI*G/G$2{uODU]w1Kv,P}ls!94&1eXcg|#sX` )>ꎡ@ɽ-  # 1*M'|'9b t[wnlleՂ͎ ᇆ?I7[mnLCyqqXADȈ `މ ^]Eèk{ q;F3|QL۬W6 II9lpqd+XM;I-pPM|o`Yuv+q*B> ͂ﴶ6!][?R]Nr:#$';*8>mGCm[ >BG>8GeRk.{ M?Y;~5iNðX{g8t8- UTe U5PZ2~59ZE0cG!s>ږ=fzI]4F,O\{upV[ č'Mӭե[19+ぐιz7S5[{kXZ59Ĩ>}zOJ"hLq{"g<ۀ%?6W/fUI}d=71Uv~9/rI>x@t;D;Mf4 ,3 QUPBx*ܮӜnsVyYCi3Y|*Yd0ʌq9Uם+E,0IsTM=C$$_hѱ-Ϡ-"qQHiyXvPC zI]ﭣ,ː$ >U ٗ)pA :b(ǜcM4%)L6#4peA]fEqrouޥ~Q coZ +:pe2MÜ7k7[A4;4eЮG F}jճ2r#bv |Crx&^_U_mxy dr # sCXb{)RM4zҹTZ5O|q?*쵎ҍӖ^˶9Eġ!gk}]* ۘSmۿ6kא._+#]M zU?*1Oj ԛ]7s,!O*aU$C[׬QSj7nurϥYӴY|OkvJ)RW啊O5wp>sEjegjm[hjgp3rxbüкcK7b9b  G~栰OnMt#*?w ![:ay }DVK=KM3]Ŕ$( _:~WkEdɰ=O1~4-GkzFԷ dO54#w]=N)z.YIus3La 's9WWOsjZEߞ-"ݞi_b$#qLs<}7tj!hd 4f#N̿q3wv }3\RO32+y9s ) mSlaH3N;ٟɑc/OXPy;r2s HOjJXT.rU3rx8p47662V8cwbI%xjkӭ[S-Ja$Ho@$AJarT\{heIy?S66 NJ1L1#_]p( ҵ)5 "d$ROY$( FH8?j)WH7|'cwԊz}dXF[<P}bE!ɭQ}"2YJv3_EB;]pچ[El8 z:A.jodm n82jě:,5Jcg=G_5Z)$q+(OLgOɤ_ ${-|-!妟'HĻ)?0j)2IhZ;n~j:׎:ŝ.1Z)+sFtj}Ҭ|z;$kigtr0?1m"l$E~d 9lSD\m0u:) ZD^YʏR g jKB+-y1!O Czdf׋PR==?Σ77s n3ǘ֧+,^[3Z{]6#uyg!lnNOЗ=޼JMi> V9$aI֥=)u=zNkY?9NA.!z#ǽnI,l%Yhm( $v"P8ej!,,#{Kjm6g.îK +rqJ9fmy_{GȀO('ΏeEyn$xrTe69לaKk{E6C"]CI?Ͽj)oZ1 AU~|4ݡDMȸ1G>Q]PnI=!+,{/wtW_Iw>[~GRZ !;;l3)/*GJc!7KبXr9!BtZcwv[Ϸҷպl皟^2_L^M| }bnN:m<0THN8'j֡h&~ ġq܅r)5!2H  s7QZǧY܇Wo >#MmY+Zj5:e2y X00F{zS̟ ʤ"Sm`JhѴkUl!{uS}XܞjٲNb}?/\Y2nŏ[uHAn5{{˖ȟ'$C;Uƣ:O"L'a'x_>wjҴ662Ao;5S;ع,8)FN F24 0Ֆ:KM}.7$@!Y*мa`]X-W}3S(=nC@I9I![i::u[j72?Ry|2u.[אݻޟ )/WxXAg;sΠ rڝ1stdfDcު1ϰ7GwZGsw6NcF5v*qF}q\4.u9fsƫ.y۸Jr_sm,$HrQ.WvɨD2N¨Ds !1{N9;rqϱ駗LٷHt02/FHrjgNjNM-a:2OQ{Rlm =? O4vPXʹmgm,2K(P+5ʴ3Gs}§Xt棘&pħTZi0NH6`> >);/eywPY>^Nl H96'y9Qԯ^oQ;=:IbQN{N'>Y;+)Mg2@`>u{ JUWAe˾#[X- jK3rNy95PKqrKbkwA'ۊ:J怯Y ̑-gm\9Nj..m{$w"AHC9=^j 4c4Fb9~ ~1o0*il|ӹ' ?L}+жw)|:oPrX=i:%%@,UOk}C!IWeSNQQo: [Ɗ ite>wx'xs֯ C"%cx(91XHΥ|'/-qS ( i^fS؂arshSWVrͨjux4P.Ł]ac=;Zw'Pfsi"cl#q޳PcyfE#9F8>ҭ ޤg)a^E$3}rxcKHq:RӴh-`а΢vn2L<G9J=Ig3F .Y"ݮc66(l HgӖHvpKr+g=0ÖG Hm9! ~.sUSJ;sVY\,l,TM 2{}*u!rX׫k]Ԯu'#=;N-ւ'ԅYݑ pL?W:;)JǾ]=r: Gif61}9ny/6Ʋ8vq⡽]Xj,qx#\o}ĩm;JҮLa۟U@nK=VeymWʹ_̉i0ZK鶷i=,Jdg^66qY"*Sh'8qEx_!͌dqMzq-W91VTyVu{lN57D .M]D0S2}~F?%m/B`_&9Ep~}_׵fQ#.0ú{s=ܐKNoU $DyV*?2!L//,- [Kw$%3CԴ NEm֥|WW*H ^|3}3Fw#^j)OnZ7ߗ !KtPVhQYq8Yb[(PRybf}R=gd\S_o~Ufxbù}ϖGljශcV,#9Q8ǩ)1NA$=ʱJ(>iسIGQIwn}R: RuS(#q^sᝐ(LƫCk.M\/SRxfkNI&8($ qUETaNO;Ձ29b-ښH&Ee;V(||B{9((2tNLu7܀;~Y7ZM_ۺ%դ9Y8<|?W7}J˸0 q?!#[I椎w] Zc\t)$Hx q8=GQ n"]s3qZiڜ7yRwaC޵qDSi'iL|j-pMoCHEB}:ʒ1iTk]#qy$\ʌ AAZn[$5Eʘ F=!o%]@矯l9zXpFNі;>%B%Sh⑱+}>#kOD,w[qUv]mb!J?,qjOoQr;"]YAԢ$ʲH͵#⋭1itgq] 4L"ۓn8FuI n{7?MkXX3F˅':yN@y]I'eՕkOƒTH$id $ ^}Y:e/{6>S nX0 !XQ# W3](|P0=yEimmadERp>}rǀ=GI\11'mIm+Q/!s$ 6qQNc;tD-՚pjrW< ?RSSEG,6q0X`8$XkW %y ٌ܋Jyʠj}G}Ζ5n&Q `\Dc0Fy'sznc颗m1|8'Ž3E. 1[0h^PIsu%ס7`Nq*$I8 3DHm7Jg6,v:a˩ppp6u:S5͐>?.vw4d}M[E)k 4k+V°<$w `>8y{TbpOTӒ$>tv+$,]H?Uw8r޲Fb0n "G&6E316>7xR0[tKROtf;q*GqHa].o" }Vvm=*29,d~N#'8^/l/Zխ4X, 4svbT`k5 Vǧ_[D5 {r4/38%h:5T[B$8+5ʳpS8w\1PO0'#k]]Vmm`<ـ+|V9γ伸+IA*ߴ?Iq> ɤ$"3+90x9n擧Y^2*B<R_ tJԍ?VФra;`z`%4e)bj9uȣHG I9Qm;Ntխu}2QG \L9yI WFxCG,IxX\sMvZtuiy42#ʍpG(zGʣeM owΓ(&w HVϪAq/nYAhN;2S䏞GNh40j-[)}$ }6y9#5XWJOOrY+a+)#8@ǭIgo2I-5gr|_c2mGR+I ꉐ @1N>E&ǑQq}ԣ*uW-tc5 m]Ob=?jֽGuC-Jmmmkaq5̭"*!,v'qn=Xƫ@>';çl&n<ШY"lqiZvjM;bCyCjM4n}2FgTǜl,!)^{T[zNض"02=#s\WѷC+7[`'*=Z?ٞѩ'B\XO"zQ1o|'%;ܞ_>9l[ۍ'XGG?wdsLsC$aBT4:.ܔidIEcF)ζ--E~uΟNf:eTP.-pdxH[8Axs Pw{gwUm ԷCOgaA;|DΡx:Z%|} Z#h #c5%ҼN ϰ H4ۜ91Y cn!+veA R8q?hy* R&bgLYݎ@f'c~nQr>8R9hXH7)Mj W (Ȭk(TK˱$)++,M78q6^rȢu.^[}:V`LΨv qXsNiI-mkJV" r'aFrD]zn pN *|يH'?*T0Ij-%GS5i[c檆fHJsHrIқ59Y,3w=(np9㏞kB3n89 >zSeC*΀ 2K" d`dZV=/W;SVCv(8<~T V,ȷ?~O(ZJ0<}iXUvH3?F$v{:VK(ø򧵖?!Nۑb?˷5if"S^oQ ݊1*o@}{qڦ,gBĉVP{r7AM dO@oP={iQX$FXān.b{wMGH~^!k>lk>m#&w>O8:6aVViB+x"Xvy$y5>UCeak xWjFy'tsreQF9Pa&4zPzP)cD;"+oF Ņ${MJ" RgƳNrTcn'O~=5}3y:\Ay ȈSG A1V.35wJ]Zyb$/4q?SfpH.l~5Wi>im%yڲ' s<p=*༻Q{u㈪#H8{g8l;;we(|worp,d2F ~2EWWg{.'1|Uk,mn(hLb|P; zN<ֶא L.O1y;cl8'NY*^Uxt#]2!ʏ25! 8?#u{yJG$q1zj0N.X\ 8Ғs]8l%\rN5>bb<#? 2=CkI$q9`GrqKtI3\^Z'H?U] .w+ Q,IԚ^A{sjt&@k8oih:GJK+T@ö@=:8zs羽cwܧrAx.$^rέgM~BkXR%^}I .§VX=XzCфcc]Z,O?.̒Ga7qe|u)=}̐B++HpVcn `*7JxL|=;x""n#J`,vm'.>FX bR*!$]+p/yƞןv*[MTetnXܲCm2i;"Ww\9k {3;u!uM!W!-|NH$`)P~"R-BRAQfSaO#;A Zt) zƱ<7Y#Ha\ 1Ns{ts&a{6+!q,׿Ρ]a Q3"ͻJWnc `<{ք# bj!fQ2i<Vmc^ !\%{ׁJ77PGq&21ߟ_wUzP-v ̒Q8)]}iDQkE oD6!$srj1{c%"cխkI4 x8p09^Ch uaD"?xo3H|iO6UqhcԠXY#;wozcߋFӴ^߲ҹ',ǖcy5vZ."Dc60]e_zp>AP3C+%y }6PƽD2$ጲo#̶Tړ_3]]o26xQi) 񢷈)蜏:"W!W 3t_w5A>Q 3qr8GVH6vZɡ5/ncVvٕY$Ʒ#0A9W7PvkK;VHS!O#ԐOUjY{%:z恭-b͎ ࡊN;GV. `NW9I+V$1 Cܜˏ pH=iVݕIF?Zք;Vmm<^lhҍ轍Pxlgoajt?m/EԗΤ[RU!I=Wͧy=9n23) 둃ǯqdֲ/]Zko^4A\PK154kNmr]m_.ebU$$2OdSֈgգ7W܃XfhHª 1dz9sVkSɍn%|ʑ8C<;gJ%--%UX'& Ls$"Pi6lYWRPF/t5~cX)X#swl5s!=;ҬetGR]FY@ZY$H8=j:JTzd_1q_%ovضwYoӮ^uDul1lvj /[`c !bsV:>^yn#\c'4+y. KYS'(Tرq :'sNwegR*]VzTpFYddLWeFqt# Y$1\KZ[]/{F_c1%hqPhK m]{(I:b8fp,=PS!MYR$_OjY[ItS.;xXt 'Ur:~ "i }~Prό!2WCv,~.tֱꚄ sRr{ @ _W:szXG̍*4.5V," ;-PKy>taSɱ}N=5FMxi%/_R HúlO~e^X뵭 wВ 1$!VpTrqR~:GjNHYBaH;]B#r8&C&64Ne_\QN]#o}wi[`-LebDNR-xBFZejPc,XfCvbJ2H^'\%d"2K+Ǩ$,pX9ޢ}7_jߕ5\v2}Jٯv SocFF˒AU{d"bi4Q(M9`1rH,5ɨY3Pv;m,=Dg`*e *#~pĎ>#0jEIr$ŨrK]$U-i]_Үb*JY&-IܣR{djsK+aKԯfkVrU&lgǮ[$Ws9h݇M=ƫLo2JK_N^>Pu_-֣)(|(02Ǔu i}k__Y\ݥi{ٰDyGtz,%Bb!08.Xd/^1u%c  ]HmtHA1i˹r8=ˡ) 0>sKkF%ìwlyFFN@$^W;u /dIv, =X+5L $r PF(bɽ X{6USN. 1~梯8$YlPeSW◅ךmcG[ "dϩS _Cb+u{)_X#::DTHf^j!9ǹ:z;Wnhʤ,c^{պӹvW75k8˭l݈q޸:\:OP7۠)p#sy Ү#_^K3X[L|\p S:'÷,ܚQ^}߱kz)-H6=b`oU_9om7 qN? ˘IӮe ypEY cǡbąVKvXmYn-mr^1c%ŀBEY5i|ԥ2W쾃!7cZM*֠kI^fY%1*f%i]|)eVvӢ쬡~ (mܣ*cs꺜vE_с &+H B>2TNU-bfh",[a'XrI5MNDŽdj5Kn#b'`Rwl`gp+4-Af@!hw m99{zW3S8c`ۗZф`יyF::ΕɦGt<Y6x,@'ix]liw]6DRьG\G봜ǥ{+VѺs^%-5 Om/`-++Vf 0FьU ErƟtgd <]T# 6}"'Y+=B[.#0V8 A;pp}; {0I8I\I-f\5յVXQqvzxA>2{"V 2ꎪ+LwĮ| o5x%$cPۤ A 6򧞒f$5+:Cmmw>G.Dz"]ϢUT|!示;zuMtB+zߥ:oߛI6O9#+ H 2cH.{ ?Q}H G s^SMSŸ(nm8k.wliUI兲wz2Ƴ1C"+0n'`~"F* i k$qx!$$ fLI @]f[m;Qiq,V2HeZ1#.,scuj47 ;[[i0 `FHgs? $+vdH^̦I@krIV'(@m-~I^Y/@+Cprva'?q޳[]͂w.nl~;[ry1,aXLu%w+qW?:у亱Ȓ.K&2K{9>ф;ۓޝ%};Xˀ}j鋉<5gfmycI5ltAp{ \==YE/3ta 8$zcQtvZ0"a0Ϸ0ȷ_-Xླ _JA؊P4mIydK{b-4_C#' ";Y|c4ԐI'kǧa oCӺQ4oVM fB~p q) a,} m XexUK=NqXZ]H-+3LH CdgsǭUcZI$p=zzv*ozf-+#IFQ#5KRoҳ^F_->a0r%I$0xj|cADU=30UO/, ~ P~35 Jľ2;Wq]+ZE1XR iTq|)EȂieAc[D1ځ d#ʺ˫2;+eB`ҁWY0ngH[ji<z2wa5$sZ>OOkon% Y}200{S,Mر oCJ-F+,U#8NUa9w?a7)"Xdnn2>66s됙M*؛dHUPb=ycb4)+9"oZ2pqqcICִ-<cݶ$ p $dߒ5q& VdQݘ#B߄T<WZE=D?asTŭŬҾ+}\w&A1]bj%bsxൣlgaq.9Fd ގ .q[\!rn5luB&:1}KmX U4:Xu/T Fm0K|¥@|JVbGCF%)~O}F(fY~kqߎOʬ' fv{s%;!.+19>O5X:u>Vs̐ylbx{=KNLQce%ZpA~N@HWXQnte,.IOHѲcF]b9EH=GϧOq7;xTgr}Z4~ }I8>vw/0*%rLsl<7~OnEFI /t6eqOH]DlˌH7c>/vgBy]lB?6#qHWcHcv- xH Ye>n:;=GU{R*b*HNcqU=,ɍ]5_⬟6ώlY(Pm;U,垽6Zu~֑>o4H.%Uۻ9,KVf`!UM:kN *S?-WwqLW8 IOkqjzMIQY-\2NTZuV:,xo˦Cz扢`*Dϩ3N+OGqCvFECɜFA?xvף$͏/-¾XG"57VX-w_4y-u ᢳГeau zQMk!i%ffO{`vB$յk;{v%8YdlQ#y9'9#be"&`PgUN 3`)E->ufE2Ow5ZFtPRodE/%,>C G=M*ڛ5{g˛W8T>*GȂ+ܕQ٭="Ie7.m, dc 5E){gy:Fٶ 2>1#y`c nI>>qQJW=Ԗգd#-*gLnSc!{%L?a<UI҄<Bs 7eb8G#3*.T<,vAWAiCG Āy`qU5+~XG}>qc ~U^IY ,K8z.~W<_ꞡ~zF_Ll}6Y"Bq d~K?RѺ|@OYHhk7Vpj=FH_NMy-A$;pHQOcV<-e F(b@/JU 'rwhn.pn,-s-# *w`e} Kqb4;-{ïVdl/ʣW<"s՝2gDM^SgHbP7ygX{ma "Y˿nn!gV Ce z\$ t%ƿvV9Xg7>u1ܣAǭyDMo{y 0_FU{6NB⽍siaz[ )dluH ,#,>{xzͅ}q}}-ӭ*@c -W$ ck'E=G=Z=_",-o1޸6x>ڜ;)9xF_u>Un^ň-Ё '*0~b56f0\ { Tqe\{r;/T\hW76Qk&y;> Woʽg%%1amzMʢHmQ@98bkɤMhUlfZsLZoj}b1)q$8hPk`e>B bZ;{T OWu50S@~5RUCa`rr+iK,1L٥@1V+HxˣIm_.N7bߒ۽Kg?Yҟfy,-2(d9 <>Ԇ467HC Rӵ]dnnl[42В1*/ ^TYH&{Gxlr0T?H7 ¿z&. =$tHf, $I켷H"44PUT 8;E1OZ*UTX9$A^`wnnt%s {Fr =Uno#讯ꮞnmE+|RD2?wӬ %H+>'B55è-ZPmSEMwڍ쥮eŽRƅzIc&i_}dvqYACg |:yQL}=+[f \4qGQ+\z3_=m`Q)dy9ޠv;b$g~9[[tG#JۂОˌvEȒe7 RO,I}Xil?6Q!rOo"Zo/ aM3wD Rr$+˅xbJѯ-99rRx[;qt\4Fc^|7?Zറ~3!Vg"8,VW [E>_$fVTE ;Wka4a<[&k$j=(I fhZ։ VV+(vZ9N敍 _}[u率d#gXUaTF [@2qaϏLdG9q]6;հHp[cՓc 3ɒk<h:|]>8ձG"&-Yv؏SZ}ZضƻK*_Icb}$ǰ#4}fco3C;lX Wμߦu o|w/ `_u`^vv$9 3ANYZN+%Qu|P(ʂx = H4YlQ_so; Gyc>_ҥ:2E5璩Gzz:ĕR(_8n{{y/͸r |N>f~ 8ݏLcC]ADLE#{}IDQ/eԫ_k.+c4wa(Ov~X!a*o\)Mi*{^ΙLX3[Twvz\kQI>֏Zb ~jVh:cG-3Ι-!-:.DLT]-s{(Ac ;ӇQqƯwwEu!Rc316xu 3qtb,ín(Hcg30~M_hZ>'QNOW 2EѬ6/&UMRNMkskIj3DHkZ՘3i`,נ?f軍)ﴛ3ea(QL߀ZECmVu\d~8q gTfk(^ZoTtƣzfP!&3d_+^4W辧еtH=cmvUrs$MųK`MHcNgd RD;i^&Ѩ̜+s*O?J9Dxg^"ᠶyV[[wO>YQHFn䜖9Qm;EMZgKŻ\B)ʛ#|yS:v{n tGBGy.1M6͗Wu鱲MiHf G;wlS&eM s)xE*d|.jLdMRcWvRXV?㸪U~mŦ{Rqc)2ۏI$}H?3VpCmqŲ[:YcpqG:!'G^o Q08q8OU//Z٦iJ[ܱVzlvVg#J608POsyZFv4RnfYG;g((g%ɽ'ܺ֟rXϳ~0~2ܩ1xj=UGeDfi#w?N |F'i<v$G. 3q#U=k☎6Ol0;x,RǸssmBvă9#O?:y-U6C(,pO u)a!<1֞M&@Wo?\uޕ'c%֫P(9|?}qm욌~߈"7*@p>9yP#6c~SkeeAIT&|F֤c!˹h[`4,ie'ڗie"!F0yccneqi@UT;}Ik5BQCkKC"FyaP=y' c L<% `}2F??58 ;R/ %Xg9UwmaV!3&><`g8Eeհ6yo'eW]>=zK)eR`H}#R;WOHɡtރZmI 旔MQM3M5 9=]csm{ RHdYH8]ڼ:ҽ#%Mi Inw-Ui/vqP;N7ÁN0KqO2(-/UI0JaN FE<ڽ-- *(8s3:n<`@\=D'pm˕4܀N?v_8Ҭ3)v>_RX!0OlSL[;; r$Q 8P?֘# ?Υ |L ?i,-=]k{fHŎ8>̂@>{+5ҳ𽭥Wt?zBMacLl(։-@5Jh P5 5֨=(}+)&CVVSDFz~lZ2@QKOGS+++pFeCg{S% Ѳ۱j]ܙG1݃OJdha, Gb=沲DsG m}ʞb~ȫÍJm"DA@,3vYYXkZc[8/3y U*KU@|ܥA m 9eer?_ֽ3m5ƳuLϾ Q7l`@Njxm?Zʫzx~ț/ I7nA6 ee]JqGWT#3ͯsp>e%Χ{wdMeet]=I3W9W[x;.hl[ٜ9z?5Լ>՚2."X _) sɬD[A56cKj;%|vmouxSfee^ ˏ'vƠ4R4r{C ʲ۽K"my*`>Y{[k ;#VHJʾ*y*O ?f\Ι *#f( p3<յj7^Z<(Zp\-E+#ke}x沲 oZ4'bwb[q!E! [ҲAڟ\K-tӭp{ *&::E{mCZ6:5at.nʭ)!:HϦXD4aG.V Dĭ KHkZ$Dm@;dOVVVoP>u;OS^|<,,YHNII9״]gA4#hmܬ9VʆݬX?] :_IKֺxuE `;Ix m>Ӽ:C^ƽ?bpҘ C*ǐ_;VVS霻$ȠuBMP ?:ຕԌoYYZzč@.AVYP7k++Tד8Rj;-I;beeEJDxA@Wn8YYV%Gc!e۝l\rFqYYRāWj m9YYMJ-jR*p('ЙBG5YYY҇~>PjڄVP( eeggml-org-ggml-3678254/examples/sam/sam.cpp000066400000000000000000003106461512524704700202600ustar00rootroot00000000000000#define _USE_MATH_DEFINES // for M_PI #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows #include "ggml.h" #include "ggml-cpu.h" #include "ggml-alloc.h" #include "ggml-backend.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif // default hparams (ViT-B SAM) struct sam_hparams { int32_t n_enc_state = 768; int32_t n_enc_layer = 12; int32_t n_enc_head = 12; int32_t n_enc_out_chans = 256; int32_t n_pt_embd = 4; int32_t n_dec_heads = 8; int32_t ftype = 1; float mask_threshold = 0.f; float iou_threshold = 0.88f; float stability_score_threshold = 0.95f; float stability_score_offset = 1.0f; float eps = 1e-6f; float eps_decoder_transformer = 1e-5f; int32_t n_enc_head_dim() const { return n_enc_state / n_enc_head; } int32_t n_img_size() const { return 1024; } int32_t n_window_size() const { return 14; } int32_t n_patch_size() const { return 16; } int32_t n_img_embd() const { return n_img_size() / n_patch_size(); } std::vector global_attn_indices() const { switch (n_enc_state) { case 768: return { 2, 5, 8, 11 }; case 1024: return { 5, 11, 17, 23 }; case 1280: return { 7, 15, 23, 31 }; default: { fprintf(stderr, "%s: unsupported n_enc_state = %d\n", __func__, n_enc_state); } break; }; return {}; } bool is_global_attn(int32_t layer) const { const auto indices = global_attn_indices(); for (const auto & idx : indices) { if (layer == idx) { return true; } } return false; } }; struct sam_layer_enc { struct ggml_tensor * norm1_w; struct ggml_tensor * norm1_b; struct ggml_tensor * rel_pos_w; struct ggml_tensor * rel_pos_h; struct ggml_tensor * qkv_w; struct ggml_tensor * qkv_b; struct ggml_tensor * proj_w; struct ggml_tensor * proj_b; struct ggml_tensor * norm2_w; struct ggml_tensor * norm2_b; struct ggml_tensor * mlp_lin1_w; struct ggml_tensor * mlp_lin1_b; struct ggml_tensor * mlp_lin2_w; struct ggml_tensor * mlp_lin2_b; }; struct sam_encoder_image { struct ggml_tensor * pe; struct ggml_tensor * proj_w; struct ggml_tensor * proj_b; struct ggml_tensor * neck_conv_0; struct ggml_tensor * neck_norm_0_w; struct ggml_tensor * neck_norm_0_b; struct ggml_tensor * neck_conv_1; struct ggml_tensor * neck_norm_1_w; struct ggml_tensor * neck_norm_1_b; std::vector layers; }; struct sam_encoder_prompt { struct ggml_tensor * pe; struct ggml_tensor * not_a_pt_embd_w; std::vector pt_embd; struct ggml_tensor * no_mask_embd_w; //std::vector mask_down_w; //std::vector mask_down_b; }; struct sam_layer_dec_transformer_attn { // q_proj struct ggml_tensor * q_w; struct ggml_tensor * q_b; // k_proj struct ggml_tensor * k_w; struct ggml_tensor * k_b; // v_proj struct ggml_tensor * v_w; struct ggml_tensor * v_b; // out_proj struct ggml_tensor * out_w; struct ggml_tensor * out_b; }; struct sam_layer_dec_transformer { sam_layer_dec_transformer_attn self_attn; // norm1 struct ggml_tensor * norm1_w; struct ggml_tensor * norm1_b; sam_layer_dec_transformer_attn cross_attn_token_to_img; // norm2 struct ggml_tensor * norm2_w; struct ggml_tensor * norm2_b; // mlp.lin1 struct ggml_tensor * mlp_lin1_w; struct ggml_tensor * mlp_lin1_b; // mlp.lin2 struct ggml_tensor * mlp_lin2_w; struct ggml_tensor * mlp_lin2_b; // norm3 struct ggml_tensor * norm3_w; struct ggml_tensor * norm3_b; // norm4 struct ggml_tensor * norm4_w; struct ggml_tensor * norm4_b; sam_layer_dec_transformer_attn cross_attn_img_to_token; }; struct sam_layer_dec_output_hypernet_mlps { // mlps_*.layers.0 struct ggml_tensor * w_0; struct ggml_tensor * b_0; // mlps_*.layers.1 struct ggml_tensor * w_1; struct ggml_tensor * b_1; // mlps_*.layers.2 struct ggml_tensor * w_2; struct ggml_tensor * b_2; }; struct sam_decoder_mask { std::vector transformer_layers; // trasnformer.final_attn_token_to_image sam_layer_dec_transformer_attn transformer_final_attn_token_to_img; // transformer.norm_final struct ggml_tensor * transformer_norm_final_w; struct ggml_tensor * transformer_norm_final_b; // output_upscaling.0 struct ggml_tensor * output_upscaling_0_w; struct ggml_tensor * output_upscaling_0_b; // output_upscaling.1 struct ggml_tensor * output_upscaling_1_w; struct ggml_tensor * output_upscaling_1_b; // output_upscaling.3 struct ggml_tensor * output_upscaling_3_w; struct ggml_tensor * output_upscaling_3_b; // output_hypernetworks_mlps std::vector output_hypernet_mlps; // iou_prediction_head.0 struct ggml_tensor * iou_prediction_head_0_w; struct ggml_tensor * iou_prediction_head_0_b; // iou_prediction_head.1 struct ggml_tensor * iou_prediction_head_1_w; struct ggml_tensor * iou_prediction_head_1_b; // iou_prediction_head.2 struct ggml_tensor * iou_prediction_head_2_w; struct ggml_tensor * iou_prediction_head_2_b; // iou_token.weight struct ggml_tensor * iou_token_w; // mask_tokens.weight struct ggml_tensor * mask_tokens_w; }; struct sam_state { struct ggml_tensor * embd_img; struct ggml_tensor * low_res_masks; struct ggml_tensor * iou_predictions; //struct ggml_tensor * tmp_save = {}; struct ggml_context * ctx; // buffer for `ggml_graph_plan.work_data` std::vector work_buffer; // buffers to evaluate the model std::vector buf_compute_img_enc; std::vector buf_compute_fast; ggml_gallocr_t allocr = {}; }; // void save_tensor(sam_state& state, struct ggml_tensor * t, struct ggml_cgraph * gf) { // if (!state.tmp_save) { // state.tmp_save = ggml_new_tensor(state.ctx, t->type, t->n_dims, t->ne); // } // struct ggml_tensor * tmp0 = ggml_cpy(state.ctx, t, state.tmp_save); // ggml_build_forward_expand(gf, tmp0); // } struct sam_model { sam_hparams hparams; sam_encoder_image enc_img; sam_encoder_prompt enc_prompt; sam_decoder_mask dec; // struct ggml_context * ctx; std::map tensors; }; struct sam_point { float x; float y; }; struct sam_box { float x1; float y1; float x2; float y2; }; // RGB uint8 image struct sam_image_u8 { int nx; int ny; std::vector data; }; // RGB float32 image // Memory layout: RGBRGBRGB... struct sam_image_f32 { int nx; int ny; std::vector data; }; enum sam_prompt_type { SAM_PROMPT_TYPE_POINT = 0, SAM_PROMPT_TYPE_BOX = 1, }; struct sam_prompt { sam_prompt_type prompt_type = SAM_PROMPT_TYPE_POINT; sam_point pt = { 414.375f, 162.796875f, }; sam_box box = { 368.0f, 144.0f, 441.0f, 173.0f }; }; struct sam_params { int32_t seed = -1; // RNG seed int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); std::string model = "models/sam-vit-b/ggml-model-f16.bin"; // model path std::string fname_inp = "img.jpg"; std::string fname_out = "img.out"; float mask_threshold = 0.f; float iou_threshold = 0.88f; float stability_score_threshold = 0.95f; float stability_score_offset = 1.0f; float eps = 1e-6f; float eps_decoder_transformer = 1e-5f; sam_prompt prompt; bool multimask_output = true; }; void print_t_f32(const char* title, struct ggml_tensor * t, int n = 10) { printf("%s\n", title); float * data = (float *)t->data; printf("dims: % " PRId64 " % " PRId64 " % " PRId64 " % " PRId64 " f32\n", t->ne[0], t->ne[1], t->ne[2], t->ne[3]); printf("First & Last %d elements:\n", n); for (int i = 0; i < std::min((int) (t->ne[0]*t->ne[1]), n); i++) { printf("%.5f ", data[i]); if (i != 0 && i % t->ne[0] == 0) { printf("\n"); } } printf("\n"); for (int i = 0; i < std::min((int) (t->ne[0]*t->ne[1]), n); i++) { printf("%.5f ", data[ggml_nelements(t) - n + i]); if ((ggml_nelements(t) - n + i) % t->ne[0] == 0) { printf("\n"); } } printf("\n"); double sum = 0.0; for (int i = 0; i < ggml_nelements(t); i++) { sum += data[i]; } printf("sum: %f\n\n", sum); } static void ggml_disconnect_node_from_graph(ggml_tensor * t) { t->op = GGML_OP_NONE; for (int i = 0; i < GGML_MAX_SRC; i++) { t->src[i] = NULL; } } static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { struct ggml_cplan plan = ggml_graph_plan(graph, n_threads, nullptr); if (plan.work_size > 0) { buf.resize(plan.work_size); plan.work_data = buf.data(); } ggml_graph_compute(graph, &plan); } static void ggml_sam_sin(struct ggml_tensor * dst , const struct ggml_tensor * src, int ith, int nth, void * userdata) { GGML_ASSERT(userdata == NULL); GGML_ASSERT(ggml_are_same_shape(dst, src)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_is_contiguous(src)); const float * src_data = ggml_get_data_f32(src); float * dst_data = ggml_get_data_f32(dst); const int ne = (int)ggml_nelements(dst); const int dr = (ne + nth - 1) / nth; const int ie0 = dr * ith; const int ie1 = std::min(ie0 + dr, ne); for (int i = ie0; i < ie1; ++i) { dst_data[i] = sinf(src_data[i]); } } static void ggml_sam_cos(struct ggml_tensor * dst , const struct ggml_tensor * src, int ith, int nth, void * userdata) { GGML_ASSERT(userdata == NULL); GGML_ASSERT(ggml_are_same_shape(dst, src)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_is_contiguous(src)); const float * src_data = ggml_get_data_f32(src); float * dst_data = ggml_get_data_f32(dst); const int ne = (int)ggml_nelements(dst); const int dr = (ne + nth - 1) / nth; const int ie0 = dr * ith; const int ie1 = std::min(ie0 + dr, ne); for (int i = ie0; i < ie1; ++i) { dst_data[i] = cosf(src_data[i]); } } bool sam_image_load_from_file(const std::string & fname, sam_image_u8 & img) { int nx, ny, nc; auto data = stbi_load(fname.c_str(), &nx, &ny, &nc, 3); if (!data) { fprintf(stderr, "%s: failed to load '%s'\n", __func__, fname.c_str()); return false; } img.nx = nx; img.ny = ny; img.data.resize(nx * ny * 3); memcpy(img.data.data(), data, nx * ny * 3); stbi_image_free(data); return true; } // ref: https://github.com/facebookresearch/segment-anything/blob/efeab7296ab579d4a261e554eca80faf6b33924a/segment_anything/modeling/sam.py#L164 // resize largest dimension to 1024 // normalize: x = (x - mean) / std // mean = [123.675, 116.28, 103.53] // std = [58.395, 57.12, 57.375] // TODO: why are these hardcoded !? // pad to 1024x1024 // TODO: for some reason, this is not numerically identical to pytorch's interpolation bool sam_image_preprocess(const sam_image_u8 & img, sam_image_f32 & res) { const int nx = img.nx; const int ny = img.ny; const int nx2 = 1024; const int ny2 = 1024; res.nx = nx2; res.ny = ny2; res.data.resize(3*nx2*ny2); const float scale = std::max(nx, ny) / 1024.0f; fprintf(stderr, "%s: scale = %f\n", __func__, scale); const int nx3 = int(nx/scale + 0.5f); const int ny3 = int(ny/scale + 0.5f); const float m3[3] = { 123.675f, 116.280f, 103.530f }; const float s3[3] = { 58.395f, 57.120f, 57.375f }; for (int y = 0; y < ny3; y++) { for (int x = 0; x < nx3; x++) { for (int c = 0; c < 3; c++) { // linear interpolation const float sx = (x + 0.5f)*scale - 0.5f; const float sy = (y + 0.5f)*scale - 0.5f; const int x0 = std::max(0, (int) std::floor(sx)); const int y0 = std::max(0, (int) std::floor(sy)); const int x1 = std::min(x0 + 1, nx - 1); const int y1 = std::min(y0 + 1, ny - 1); const float dx = sx - x0; const float dy = sy - y0; const int j00 = 3*(y0*nx + x0) + c; const int j01 = 3*(y0*nx + x1) + c; const int j10 = 3*(y1*nx + x0) + c; const int j11 = 3*(y1*nx + x1) + c; const float v00 = img.data[j00]; const float v01 = img.data[j01]; const float v10 = img.data[j10]; const float v11 = img.data[j11]; const float v0 = v00*(1.0f - dx) + v01*dx; const float v1 = v10*(1.0f - dx) + v11*dx; const float v = v0*(1.0f - dy) + v1*dy; const uint8_t v2 = std::min(std::max(std::round(v), 0.0f), 255.0f); const int i = 3*(y*nx3 + x) + c; res.data[i] = (float(v2) - m3[c]) / s3[c]; } } } return true; } // load the model's weights from a file bool sam_model_load(const sam_params & params, sam_model & model) { fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, params.model.c_str()); auto fin = std::ifstream(params.model, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, params.model.c_str()); return false; } // verify magic { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != 0x67676d6c) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, params.model.c_str()); return false; } } // load hparams { // Override defaults with user choices model.hparams.mask_threshold = params.mask_threshold; model.hparams.iou_threshold = params.iou_threshold; model.hparams.stability_score_threshold = params.stability_score_threshold; model.hparams.stability_score_offset = params.stability_score_offset; model.hparams.eps = params.eps; model.hparams.eps_decoder_transformer = params.eps_decoder_transformer; auto & hparams = model.hparams; fin.read((char *) &hparams.n_enc_state, sizeof(hparams.n_enc_state)); fin.read((char *) &hparams.n_enc_layer, sizeof(hparams.n_enc_layer)); fin.read((char *) &hparams.n_enc_head, sizeof(hparams.n_enc_head)); fin.read((char *) &hparams.n_enc_out_chans, sizeof(hparams.n_enc_out_chans)); fin.read((char *) &hparams.n_pt_embd, sizeof(hparams.n_pt_embd)); fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; printf("%s: n_enc_state = %d\n", __func__, hparams.n_enc_state); printf("%s: n_enc_layer = %d\n", __func__, hparams.n_enc_layer); printf("%s: n_enc_head = %d\n", __func__, hparams.n_enc_head); printf("%s: n_enc_out_chans = %d\n", __func__, hparams.n_enc_out_chans); printf("%s: n_pt_embd = %d\n", __func__, hparams.n_pt_embd); printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: qntvr = %d\n", __func__, qntvr); hparams.ftype %= GGML_QNT_VERSION_FACTOR; } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); if (wtype == GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, params.model.c_str(), model.hparams.ftype); return false; } auto & ctx = model.ctx; const size_t ctx_size = [&]() { size_t ctx_size = 0; const auto & hparams = model.hparams; const int32_t n_enc_state = hparams.n_enc_state; const int32_t n_enc_layer = hparams.n_enc_layer; const int32_t n_enc_head_dim = hparams.n_enc_head_dim(); const int32_t n_enc_out_chans = hparams.n_enc_out_chans; const int32_t n_pt_embd = hparams.n_pt_embd; const int32_t n_enc_layer_local = hparams.global_attn_indices().size(); const int32_t n_enc_layer_global = n_enc_layer - n_enc_layer_local; const int32_t n_img_embd = hparams.n_img_embd(); const int32_t n_window_size = hparams.n_window_size(); const int32_t n_patch_size = hparams.n_patch_size(); // image encoder { ctx_size += n_enc_state*n_img_embd*n_img_embd*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_state*3*n_patch_size*n_patch_size*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_state*n_enc_out_chans*1*1*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_out_chans*n_enc_out_chans*3*3*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); } // image encoder layers { ctx_size += n_enc_layer*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer_global*n_enc_head_dim*(2*n_img_embd - 1)*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer_global*n_enc_head_dim*(2*n_img_embd - 1)*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer_local*n_enc_head_dim*(2*n_window_size - 1)*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer_local*n_enc_head_dim*(2*n_window_size - 1)*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer*3*n_enc_state*n_enc_state*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer*3*n_enc_state* ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*n_enc_state*n_enc_state*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer*n_enc_state* ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*4*n_enc_state*n_enc_state*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer*4*n_enc_state* ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_layer*4*n_enc_state*n_enc_state*ggml_type_size(GGML_TYPE_F16); ctx_size += n_enc_layer*4*n_enc_state* ggml_type_size(GGML_TYPE_F32); } ctx_size += (8 + 14*n_enc_layer)*ggml_tensor_overhead(); // prompt encoder { ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); // 2*(n_enc_out_chans/2) ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); ctx_size += n_pt_embd*n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); } ctx_size += (2 + n_pt_embd)*ggml_tensor_overhead(); // mask decoder { //transformer { const int tfm_layers_count = 2; const int qkv_count = 3; const int norm_count = 4; const int n_hypernet_mpls_count = 4; // self_attn ctx_size += tfm_layers_count*qkv_count*n_enc_state*n_enc_state*ggml_type_size(GGML_TYPE_F16); ctx_size += tfm_layers_count*qkv_count*n_enc_state* ggml_type_size(GGML_TYPE_F32); ctx_size += tfm_layers_count*n_enc_state* ggml_type_size(GGML_TYPE_F32); // all norms ctx_size += tfm_layers_count*norm_count*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += tfm_layers_count*norm_count*n_enc_state*ggml_type_size(GGML_TYPE_F32); // cross_attn_token_to_img ctx_size += tfm_layers_count*qkv_count*n_enc_state*(n_enc_state/2)*ggml_type_size(GGML_TYPE_F16); ctx_size += tfm_layers_count*qkv_count*(n_enc_state/2)* ggml_type_size(GGML_TYPE_F32); ctx_size += tfm_layers_count*n_enc_state* ggml_type_size(GGML_TYPE_F32); // mlp ctx_size += tfm_layers_count*8*n_enc_out_chans*n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); ctx_size += tfm_layers_count*8*n_enc_out_chans* ggml_type_size(GGML_TYPE_F32); ctx_size += tfm_layers_count*n_enc_out_chans*8*n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); ctx_size += tfm_layers_count*n_enc_out_chans* ggml_type_size(GGML_TYPE_F32); // cross_attn_img_to_token ctx_size += tfm_layers_count*qkv_count*n_enc_state*(n_enc_state/2)*ggml_type_size(GGML_TYPE_F16); ctx_size += tfm_layers_count*qkv_count*(n_enc_state/2)* ggml_type_size(GGML_TYPE_F32); ctx_size += tfm_layers_count*n_enc_state* ggml_type_size(GGML_TYPE_F32); // transformer_final_attn_token_to_img ctx_size += qkv_count*n_enc_state*(n_enc_state/2)*ggml_type_size(GGML_TYPE_F16); ctx_size += qkv_count*(n_enc_state/2)* ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_state* ggml_type_size(GGML_TYPE_F32); // transformer_norm_final ctx_size += norm_count*n_enc_state*ggml_type_size(GGML_TYPE_F32); ctx_size += norm_count*n_enc_state*ggml_type_size(GGML_TYPE_F32); // output_upscaling ctx_size += n_enc_out_chans*n_img_embd*2*2*ggml_type_size(GGML_TYPE_F16); ctx_size += 3*n_img_embd* ggml_type_size(GGML_TYPE_F32); ctx_size += n_enc_out_chans*n_img_embd*(n_img_embd/2)*2*2*ggml_type_size(GGML_TYPE_F16); ctx_size += (n_img_embd/2)* ggml_type_size(GGML_TYPE_F32); // output_hypernetworks_mlps ctx_size += n_hypernet_mpls_count*2*n_enc_out_chans*n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); ctx_size += n_hypernet_mpls_count*2*n_enc_out_chans* ggml_type_size(GGML_TYPE_F32); ctx_size += n_hypernet_mpls_count*n_enc_out_chans*(n_img_embd/2)*ggml_type_size(GGML_TYPE_F16); ctx_size += n_hypernet_mpls_count*(n_img_embd/2)* ggml_type_size(GGML_TYPE_F32); // iou_prediction_head ctx_size += 2*n_enc_out_chans*n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); ctx_size += 2*n_enc_out_chans* ggml_type_size(GGML_TYPE_F32); ctx_size += n_pt_embd*n_enc_out_chans*ggml_type_size(GGML_TYPE_F16); ctx_size += n_pt_embd* ggml_type_size(GGML_TYPE_F32); // iou_token_w ctx_size += n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); // mask_tokens_w ctx_size += n_pt_embd*n_enc_out_chans*ggml_type_size(GGML_TYPE_F32); } } fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); return ctx_size; }(); // create the ggml context { struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; ctx = ggml_init(params); if (!ctx) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); return false; } } // prepare memory for the weights { const auto & hparams = model.hparams; const int32_t n_enc_state = hparams.n_enc_state; const int32_t n_enc_layer = hparams.n_enc_layer; const int32_t n_enc_head_dim = hparams.n_enc_head_dim(); const int32_t n_enc_out_chans = hparams.n_enc_out_chans; const int32_t n_pt_embd = hparams.n_pt_embd; const int32_t n_img_embd = hparams.n_img_embd(); const int32_t n_window_size = hparams.n_window_size(); const int32_t n_patch_size = hparams.n_patch_size(); model.enc_img.layers.resize(n_enc_layer); // image encoder { auto & enc = model.enc_img; enc.pe = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_enc_state, n_img_embd, n_img_embd, 1); enc.proj_w = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, n_patch_size, n_patch_size, 3, n_enc_state); enc.proj_b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 1, 1, n_enc_state); enc.neck_conv_0 = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 1, 1, n_enc_state, n_enc_out_chans); enc.neck_conv_1 = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 3, 3, n_enc_out_chans, n_enc_out_chans); enc.neck_norm_0_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); enc.neck_norm_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); enc.neck_norm_1_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); enc.neck_norm_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); model.tensors["image_encoder.pos_embed"] = enc.pe; model.tensors["image_encoder.patch_embed.proj.weight"] = enc.proj_w; model.tensors["image_encoder.patch_embed.proj.bias"] = enc.proj_b; model.tensors["image_encoder.neck.0.weight"] = enc.neck_conv_0; model.tensors["image_encoder.neck.2.weight"] = enc.neck_conv_1; model.tensors["image_encoder.neck.1.weight"] = enc.neck_norm_0_w; model.tensors["image_encoder.neck.1.bias"] = enc.neck_norm_0_b; model.tensors["image_encoder.neck.3.weight"] = enc.neck_norm_1_w; model.tensors["image_encoder.neck.3.bias"] = enc.neck_norm_1_b; for (int i = 0; i < n_enc_layer; ++i) { auto & layer = enc.layers[i]; layer.norm1_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); layer.norm1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); if (hparams.is_global_attn(i)) { layer.rel_pos_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_head_dim, 2*n_img_embd - 1); layer.rel_pos_h = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_head_dim, 2*n_img_embd - 1); } else { layer.rel_pos_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_head_dim, 2*n_window_size - 1); layer.rel_pos_h = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_head_dim, 2*n_window_size - 1); } layer.qkv_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_state, 3*n_enc_state); layer.qkv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_enc_state); layer.proj_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_state, n_enc_state); layer.proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); layer.norm2_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); layer.norm2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); layer.mlp_lin1_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_state, 4*n_enc_state); layer.mlp_lin1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_enc_state); layer.mlp_lin2_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, 4*n_enc_state, n_enc_state); layer.mlp_lin2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_state); model.tensors["image_encoder.blocks." + std::to_string(i) + ".norm1.weight"] = layer.norm1_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".norm1.bias"] = layer.norm1_b; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.rel_pos_w"] = layer.rel_pos_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.rel_pos_h"] = layer.rel_pos_h; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.qkv.weight"] = layer.qkv_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.qkv.bias"] = layer.qkv_b; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.proj.weight"] = layer.proj_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".attn.proj.bias"] = layer.proj_b; model.tensors["image_encoder.blocks." + std::to_string(i) + ".norm2.weight"] = layer.norm2_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".norm2.bias"] = layer.norm2_b; model.tensors["image_encoder.blocks." + std::to_string(i) + ".mlp.lin1.weight"] = layer.mlp_lin1_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".mlp.lin1.bias"] = layer.mlp_lin1_b; model.tensors["image_encoder.blocks." + std::to_string(i) + ".mlp.lin2.weight"] = layer.mlp_lin2_w; model.tensors["image_encoder.blocks." + std::to_string(i) + ".mlp.lin2.bias"] = layer.mlp_lin2_b; } } // prompt encoder { auto & enc = model.enc_prompt; enc.pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_enc_out_chans/2, 2); enc.not_a_pt_embd_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); enc.no_mask_embd_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); model.tensors["prompt_encoder.pe_layer.positional_encoding_gaussian_matrix"] = enc.pe; model.tensors["prompt_encoder.not_a_point_embed.weight"] = enc.not_a_pt_embd_w; model.tensors["prompt_encoder.no_mask_embed.weight"] = enc.no_mask_embd_w; enc.pt_embd.resize(n_pt_embd); for (int i = 0; i < n_pt_embd; i++) { enc.pt_embd[i] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); model.tensors["prompt_encoder.point_embeddings." + std::to_string(i) + ".weight"] = enc.pt_embd[i]; } } // mask decoder { auto & dec = model.dec; auto & tfm_layers = dec.transformer_layers; const int tfm_layers_count = 2; tfm_layers.resize(tfm_layers_count); for (int i = 0; i < tfm_layers_count; ++i) { auto& l = tfm_layers[i]; l.self_attn.q_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); l.self_attn.q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.self_attn.k_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); l.self_attn.k_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.self_attn.v_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); l.self_attn.v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.self_attn.out_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); l.self_attn.out_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm1_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.cross_attn_token_to_img.q_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_token_to_img.q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_token_to_img.k_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_token_to_img.k_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_token_to_img.v_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_token_to_img.v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_token_to_img.out_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans/2, n_enc_out_chans); l.cross_attn_token_to_img.out_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm2_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.mlp_lin1_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, 8*n_enc_out_chans); l.mlp_lin1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 8*n_enc_out_chans); l.mlp_lin2_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, 8*n_enc_out_chans, n_enc_out_chans); l.mlp_lin2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm3_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm3_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm4_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.norm4_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); l.cross_attn_img_to_token.q_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_img_to_token.q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_img_to_token.k_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_img_to_token.k_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_img_to_token.v_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); l.cross_attn_img_to_token.v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); l.cross_attn_img_to_token.out_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans/2, n_enc_out_chans); l.cross_attn_img_to_token.out_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); const auto prefix = "mask_decoder.transformer.layers." + std::to_string(i) + "."; model.tensors[prefix + "self_attn.q_proj.weight"] = l.self_attn.q_w; model.tensors[prefix + "self_attn.q_proj.bias"] = l.self_attn.q_b; model.tensors[prefix + "self_attn.k_proj.weight"] = l.self_attn.k_w; model.tensors[prefix + "self_attn.k_proj.bias"] = l.self_attn.k_b; model.tensors[prefix + "self_attn.v_proj.weight"] = l.self_attn.v_w; model.tensors[prefix + "self_attn.v_proj.bias"] = l.self_attn.v_b; model.tensors[prefix + "self_attn.out_proj.weight"] = l.self_attn.out_w; model.tensors[prefix + "self_attn.out_proj.bias"] = l.self_attn.out_b; model.tensors[prefix + "norm1.weight"] = l.norm1_w; model.tensors[prefix + "norm1.bias"] = l.norm1_b; model.tensors[prefix + "cross_attn_token_to_image.q_proj.weight"] = l.cross_attn_token_to_img.q_w; model.tensors[prefix + "cross_attn_token_to_image.q_proj.bias"] = l.cross_attn_token_to_img.q_b; model.tensors[prefix + "cross_attn_token_to_image.k_proj.weight"] = l.cross_attn_token_to_img.k_w; model.tensors[prefix + "cross_attn_token_to_image.k_proj.bias"] = l.cross_attn_token_to_img.k_b; model.tensors[prefix + "cross_attn_token_to_image.v_proj.weight"] = l.cross_attn_token_to_img.v_w; model.tensors[prefix + "cross_attn_token_to_image.v_proj.bias"] = l.cross_attn_token_to_img.v_b; model.tensors[prefix + "cross_attn_token_to_image.out_proj.weight"] = l.cross_attn_token_to_img.out_w; model.tensors[prefix + "cross_attn_token_to_image.out_proj.bias"] = l.cross_attn_token_to_img.out_b; model.tensors[prefix + "norm2.weight"] = l.norm2_w; model.tensors[prefix + "norm2.bias"] = l.norm2_b; model.tensors[prefix + "mlp.lin1.weight"] = l.mlp_lin1_w; model.tensors[prefix + "mlp.lin1.bias"] = l.mlp_lin1_b; model.tensors[prefix + "mlp.lin2.weight"] = l.mlp_lin2_w; model.tensors[prefix + "mlp.lin2.bias"] = l.mlp_lin2_b; model.tensors[prefix + "norm3.weight"] = l.norm3_w; model.tensors[prefix + "norm3.bias"] = l.norm3_b; model.tensors[prefix + "norm4.weight"] = l.norm4_w; model.tensors[prefix + "norm4.bias"] = l.norm4_b; model.tensors[prefix + "cross_attn_image_to_token.q_proj.weight"] = l.cross_attn_img_to_token.q_w; model.tensors[prefix + "cross_attn_image_to_token.q_proj.bias"] = l.cross_attn_img_to_token.q_b; model.tensors[prefix + "cross_attn_image_to_token.k_proj.weight"] = l.cross_attn_img_to_token.k_w; model.tensors[prefix + "cross_attn_image_to_token.k_proj.bias"] = l.cross_attn_img_to_token.k_b; model.tensors[prefix + "cross_attn_image_to_token.v_proj.weight"] = l.cross_attn_img_to_token.v_w; model.tensors[prefix + "cross_attn_image_to_token.v_proj.bias"] = l.cross_attn_img_to_token.v_b; model.tensors[prefix + "cross_attn_image_to_token.out_proj.weight"] = l.cross_attn_img_to_token.out_w; model.tensors[prefix + "cross_attn_image_to_token.out_proj.bias"] = l.cross_attn_img_to_token.out_b; } dec.transformer_final_attn_token_to_img.q_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.k_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.k_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.v_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans/2); dec.transformer_final_attn_token_to_img.out_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans/2, n_enc_out_chans); dec.transformer_final_attn_token_to_img.out_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); model.tensors["mask_decoder.transformer.final_attn_token_to_image.q_proj.weight"] = dec.transformer_final_attn_token_to_img.q_w; model.tensors["mask_decoder.transformer.final_attn_token_to_image.q_proj.bias"] = dec.transformer_final_attn_token_to_img.q_b; model.tensors["mask_decoder.transformer.final_attn_token_to_image.k_proj.weight"] = dec.transformer_final_attn_token_to_img.k_w; model.tensors["mask_decoder.transformer.final_attn_token_to_image.k_proj.bias"] = dec.transformer_final_attn_token_to_img.k_b; model.tensors["mask_decoder.transformer.final_attn_token_to_image.v_proj.weight"] = dec.transformer_final_attn_token_to_img.v_w; model.tensors["mask_decoder.transformer.final_attn_token_to_image.v_proj.bias"] = dec.transformer_final_attn_token_to_img.v_b; model.tensors["mask_decoder.transformer.final_attn_token_to_image.out_proj.weight"] = dec.transformer_final_attn_token_to_img.out_w; model.tensors["mask_decoder.transformer.final_attn_token_to_image.out_proj.bias"] = dec.transformer_final_attn_token_to_img.out_b; dec.transformer_norm_final_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); dec.transformer_norm_final_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); model.tensors["mask_decoder.transformer.norm_final_attn.weight"] = dec.transformer_norm_final_w; model.tensors["mask_decoder.transformer.norm_final_attn.bias"] = dec.transformer_norm_final_b; dec.output_upscaling_0_w = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 2, 2, n_img_embd, n_enc_out_chans); dec.output_upscaling_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_img_embd); dec.output_upscaling_1_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_img_embd); dec.output_upscaling_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_img_embd); dec.output_upscaling_3_w = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 2, 2, n_img_embd/2, n_img_embd); dec.output_upscaling_3_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_img_embd/2); model.tensors["mask_decoder.output_upscaling.0.weight"] = dec.output_upscaling_0_w; model.tensors["mask_decoder.output_upscaling.0.bias"] = dec.output_upscaling_0_b; model.tensors["mask_decoder.output_upscaling.1.weight"] = dec.output_upscaling_1_w; model.tensors["mask_decoder.output_upscaling.1.bias"] = dec.output_upscaling_1_b; model.tensors["mask_decoder.output_upscaling.3.weight"] = dec.output_upscaling_3_w; model.tensors["mask_decoder.output_upscaling.3.bias"] = dec.output_upscaling_3_b; const int n_hypernet_mpls_count = 4; dec.output_hypernet_mlps.resize(n_hypernet_mpls_count); for (int i = 0; i < n_hypernet_mpls_count; ++i) { auto& mlp = dec.output_hypernet_mlps[i]; mlp.w_0 = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); mlp.b_0 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); mlp.w_1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); mlp.b_1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); mlp.w_2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_img_embd/2); mlp.b_2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_img_embd/2); const auto prefix = "mask_decoder.output_hypernetworks_mlps." + std::to_string(i) + "."; model.tensors[prefix + "layers.0.weight"] = mlp.w_0; model.tensors[prefix + "layers.0.bias"] = mlp.b_0; model.tensors[prefix + "layers.1.weight"] = mlp.w_1; model.tensors[prefix + "layers.1.bias"] = mlp.b_1; model.tensors[prefix + "layers.2.weight"] = mlp.w_2; model.tensors[prefix + "layers.2.bias"] = mlp.b_2; } dec.iou_prediction_head_0_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); dec.iou_prediction_head_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); dec.iou_prediction_head_1_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_enc_out_chans); dec.iou_prediction_head_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_enc_out_chans); dec.iou_prediction_head_2_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F16, n_enc_out_chans, n_pt_embd); dec.iou_prediction_head_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_pt_embd); dec.iou_token_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_enc_out_chans, 1); dec.mask_tokens_w = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_enc_out_chans, n_pt_embd); model.tensors["mask_decoder.iou_prediction_head.layers.0.weight"] = dec.iou_prediction_head_0_w; model.tensors["mask_decoder.iou_prediction_head.layers.0.bias"] = dec.iou_prediction_head_0_b; model.tensors["mask_decoder.iou_prediction_head.layers.1.weight"] = dec.iou_prediction_head_1_w; model.tensors["mask_decoder.iou_prediction_head.layers.1.bias"] = dec.iou_prediction_head_1_b; model.tensors["mask_decoder.iou_prediction_head.layers.2.weight"] = dec.iou_prediction_head_2_w; model.tensors["mask_decoder.iou_prediction_head.layers.2.bias"] = dec.iou_prediction_head_2_b; model.tensors["mask_decoder.iou_token.weight"] = dec.iou_token_w; model.tensors["mask_decoder.mask_tokens.weight"] = dec.mask_tokens_w; } } // load weights { int n_tensors = 0; size_t total_size = 0; fprintf(stderr, "%s: ", __func__); while (true) { int32_t n_dims; int32_t length; int32_t ftype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); fin.read(reinterpret_cast(&ftype), sizeof(ftype)); if (fin.eof()) { break; } int64_t nelements = 1; int64_t ne[4] = { 1, 1, 1, 1 }; for (int i = 0; i < n_dims; ++i) { int32_t ne_cur; fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); ne[i] = ne_cur; nelements *= ne[i]; } std::string name(length, 0); fin.read(&name[0], length); if (model.tensors.find(name.data()) == model.tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); return false; } auto tensor = model.tensors[name.data()]; //printf("ne0 = %jd, ne1 = %jd, ne2 = %jd, ne3 = %jd\n", ne[0], ne[1], ne[2], ne[3]); if (ggml_nelements(tensor) != nelements) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %d, expected %d\n", __func__, name.data(), (int) nelements, (int) ggml_nelements(tensor)); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1] || tensor->ne[2] != ne[2] || tensor->ne[3] != ne[3]) { fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d, %d, %d], expected [%d, %d, %d, %d]\n", __func__, name.data(), (int) ne[0], (int) ne[1], (int) ne[2], (int) ne[3], (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2], (int) tensor->ne[3]); return false; } size_t bpe = 0; switch (ftype) { case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; default: { fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); return false; } }; if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), (size_t) nelements*bpe); return false; } fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { fprintf(stderr, "."); fflush(stdout); } } if (n_tensors != int(model.tensors.size())) { fprintf(stderr, "%s: model file has %d tensors, but %d tensors were expected\n", __func__, n_tensors, (int) model.tensors.size()); return false; } fprintf(stderr, " done\n"); fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors); } fin.close(); return true; } struct ggml_tensor * sam_fill_dense_pe( const sam_model & model, struct ggml_context * ctx0, struct ggml_cgraph * gf, sam_state & state) { const auto & hparams = model.hparams; const auto & enc = model.enc_prompt; const int32_t n_img_embd = hparams.n_img_embd(); struct ggml_tensor * xy_embed_stacked = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 2, n_img_embd, n_img_embd); ggml_set_name(xy_embed_stacked, "xy_embed_stacked"); ggml_set_input(xy_embed_stacked); struct ggml_tensor * cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, enc.pe)), xy_embed_stacked); cur = ggml_scale(ctx0, cur, float(2.0*M_PI)); // concat // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/prompt_encoder.py#L192 { struct ggml_tensor * t_sin = ggml_map_custom1(ctx0, cur, ggml_sam_sin, GGML_N_TASKS_MAX, NULL); struct ggml_tensor * t_cos = ggml_map_custom1(ctx0, cur, ggml_sam_cos, GGML_N_TASKS_MAX, NULL); cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, t_sin->ne[0] + t_cos->ne[0], cur->ne[1], cur->ne[2]); ggml_build_forward_expand(gf, ggml_cpy(ctx0, t_sin, ggml_view_3d(ctx0, cur, t_sin->ne[0], t_sin->ne[1], t_sin->ne[2], cur->nb[1], cur->nb[2], 0))); ggml_build_forward_expand(gf, ggml_cpy(ctx0, t_cos, ggml_view_3d(ctx0, cur, t_sin->ne[0], t_sin->ne[1], t_sin->ne[2], cur->nb[1], cur->nb[2], t_sin->nb[1]))); } struct ggml_tensor * pe_img_dense = ggml_cont(ctx0, ggml_permute(ctx0, cur, 2, 0, 1, 3)); ggml_build_forward_expand(gf, pe_img_dense); return pe_img_dense; } struct ggml_tensor* sam_layer_norm_2d( struct ggml_context * ctx0, struct ggml_tensor * layer, int n_channels, struct ggml_tensor * w, struct ggml_tensor * b, float eps) { // LayerNorm2d // normalize along channel dimmension // TODO: better implementation layer = ggml_permute(ctx0, ggml_norm(ctx0, ggml_cont(ctx0, ggml_permute(ctx0, layer, 1, 2, 0, 3)), eps), 2, 0, 1, 3); layer = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, ggml_reshape_3d(ctx0, w, 1, 1, n_channels), layer), layer), ggml_repeat(ctx0, ggml_reshape_3d(ctx0, b, 1, 1, n_channels), layer)); return layer; } struct ggml_cgraph * sam_encode_image( const sam_model & model, sam_state & state, const sam_image_f32 & img) { const auto & hparams = model.hparams; const auto & enc = model.enc_img; const int32_t n_enc_state = hparams.n_enc_state; const int32_t n_enc_layer = hparams.n_enc_layer; const int32_t n_enc_head = hparams.n_enc_head; const int32_t n_enc_head_dim = hparams.n_enc_head_dim(); const int32_t n_enc_out_chans = hparams.n_enc_out_chans; const int32_t n_img_size = hparams.n_img_size(); const int32_t n_window_size = hparams.n_window_size(); struct ggml_init_params ggml_params = { /*.mem_size =*/ state.buf_compute_img_enc.size(), /*.mem_buffer =*/ state.buf_compute_img_enc.data(), /*.no_alloc =*/ true, // skip allocating as we use ggml_alloc to allocate exact memory requirements }; struct ggml_context * ctx0 = ggml_init(ggml_params); struct ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * inp = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_img_size, n_img_size, 3, 1); ggml_set_name(inp, "inp"); ggml_set_input(inp); // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L392 struct ggml_tensor * cur = ggml_conv_2d_sk_p0(ctx0, enc.proj_w, inp); cur = ggml_add_inplace(ctx0, cur, ggml_repeat(ctx0, enc.proj_b, cur)); // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L394 // keep in F32 cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 1, 2, 0, 3)); // convert to F16 //cur = ggml_cpy(ctx0, // ggml_permute(ctx0, cur, 1, 2, 0, 3), // ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_enc_state, n_img_embd, n_img_embd)); // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L108-L109 cur = ggml_add_inplace(ctx0, cur, enc.pe); struct ggml_tensor * inpL = cur; for (int il = 0; il < n_enc_layer; ++il) { const auto & layer = enc.layers[il]; // norm // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L168 { cur = ggml_norm(ctx0, inpL, hparams.eps); // cur = ln_0_w*cur + ln_0_b cur = ggml_mul(ctx0, cur, layer.norm1_w); cur = ggml_add_inplace(ctx0, cur, layer.norm1_b); } const int64_t w0 = cur->ne[1]; const int64_t h0 = cur->ne[2]; if (hparams.is_global_attn(il) == false) { // local attention layer - apply window partition // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L169-L172 cur = ggml_win_part(ctx0, cur, n_window_size); } const int64_t W = cur->ne[1]; const int64_t H = cur->ne[2]; // self-attention { cur = ggml_mul_mat(ctx0, layer.qkv_w, cur); cur = ggml_add_inplace(ctx0, cur, layer.qkv_b); // split qkv into separate tensors // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L225-L229 const int B = cur->ne[3]; cur = ggml_reshape_4d(ctx0, cur, n_enc_state, 3, W*H, B); cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 0, 3, 1, 2)); struct ggml_tensor * Q; struct ggml_tensor * K; struct ggml_tensor * V; Q = ggml_view_3d (ctx0, cur, n_enc_state, W*H, B, cur->nb[1], cur->nb[2], 0*cur->nb[3]); Q = ggml_reshape_4d(ctx0, Q, n_enc_head_dim, n_enc_head, W*H, B); Q = ggml_cont (ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); Q = ggml_reshape_3d(ctx0, Q, n_enc_head_dim, W*H, B*n_enc_head); K = ggml_view_3d (ctx0, cur, n_enc_state, W*H, B, cur->nb[1], cur->nb[2], 1*cur->nb[3]); K = ggml_reshape_4d(ctx0, K, n_enc_head_dim, n_enc_head, W*H, B); K = ggml_cont (ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); K = ggml_reshape_3d(ctx0, K, n_enc_head_dim, W*H, B*n_enc_head); V = ggml_view_3d (ctx0, cur, n_enc_state, W*H, B, cur->nb[1], cur->nb[2], 2*cur->nb[3]); V = ggml_reshape_4d(ctx0, V, n_enc_head_dim, n_enc_head, W*H, B); V = ggml_cont (ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); // transposed V = ggml_reshape_3d(ctx0, V, W*H, n_enc_head_dim, B*n_enc_head); struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, 1.0f/sqrtf(n_enc_head_dim)); struct ggml_tensor * rw = ggml_get_rel_pos(ctx0, layer.rel_pos_w, W, W); struct ggml_tensor * rh = ggml_get_rel_pos(ctx0, layer.rel_pos_h, H, H); struct ggml_tensor * q_r = ggml_reshape_4d(ctx0, Q, n_enc_head_dim, W, H, B*n_enc_head); struct ggml_tensor * rel_w = ggml_cont(ctx0, ggml_permute(ctx0, ggml_mul_mat(ctx0, rw, ggml_cont(ctx0, ggml_permute(ctx0, q_r, 0, 2, 1, 3))), 0, 2, 1, 3)); struct ggml_tensor * rel_h = ggml_mul_mat(ctx0, rh, q_r); struct ggml_tensor * attn = ggml_add_rel_pos_inplace(ctx0, KQ_scaled, rel_w, rel_h); struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, attn); struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, ggml_permute(ctx0, ggml_reshape_4d(ctx0, KQV, n_enc_head_dim, W*H, n_enc_head, B), 0, 2, 1, 3)), n_enc_state, W, H, B); cur = ggml_mul_mat(ctx0, layer.proj_w, cur); cur = ggml_add_inplace(ctx0, cur, layer.proj_b); } if (hparams.is_global_attn(il) == false) { // local attention layer - reverse window partition cur = ggml_win_unpart(ctx0, cur, w0, h0, n_window_size); } cur = ggml_add_inplace(ctx0, cur, inpL); struct ggml_tensor * inpFF = cur; // feed-forward network { // norm { cur = ggml_norm(ctx0, inpFF, hparams.eps); // cur = mlp_ln_w*cur + mlp_ln_b cur = ggml_mul(ctx0, cur, layer.norm2_w); cur = ggml_add_inplace(ctx0, cur, layer.norm2_b); } // fully connected cur = ggml_mul_mat(ctx0, layer.mlp_lin1_w, cur); cur = ggml_add_inplace(ctx0, cur, layer.mlp_lin1_b); // GELU activation cur = ggml_gelu(ctx0, cur); // projection cur = ggml_mul_mat(ctx0, layer.mlp_lin2_w, cur); cur = ggml_add_inplace(ctx0, cur, layer.mlp_lin2_b); } inpL = ggml_add(ctx0, cur, inpFF); } cur = ggml_cont(ctx0, ggml_permute(ctx0, inpL, 2, 0, 1, 3)); cur = ggml_conv_2d_sk_p0(ctx0, enc.neck_conv_0, cur); cur = sam_layer_norm_2d(ctx0, cur, n_enc_out_chans, enc.neck_norm_0_w, enc.neck_norm_0_b, hparams.eps); cur = ggml_conv_2d_s1_ph(ctx0, enc.neck_conv_1, cur); cur = sam_layer_norm_2d(ctx0, cur, n_enc_out_chans, enc.neck_norm_1_w, enc.neck_norm_1_b, hparams.eps); cur = ggml_cpy(ctx0, cur, state.embd_img); ggml_build_forward_expand(gf, cur); ggml_disconnect_node_from_graph(state.embd_img); //ggml_graph_print(&gf); ggml_free(ctx0); ggml_gallocr_alloc_graph(state.allocr, gf); { struct ggml_tensor * inp = ggml_graph_get_tensor(gf, "inp"); float * data = (float *) ggml_get_data(inp); const int nx = img.nx; const int ny = img.ny; const int n = nx*ny; GGML_ASSERT(nx == n_img_size && ny == n_img_size); for (int k = 0; k < 3; k++) { for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { data[k*n + y*nx + x] = img.data[3*(y*nx + x) + k]; } } } } return gf; } struct prompt_encoder_result { struct ggml_tensor * embd_prompt_sparse = {}; struct ggml_tensor * embd_prompt_dense = {}; }; struct ggml_tensor * sam_prompt_encode_pe_encoding( const sam_encoder_prompt & enc, struct ggml_context * ctx0, struct ggml_cgraph * gf, struct ggml_tensor * coords) { auto * cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, enc.pe)), coords); cur = ggml_scale(ctx0, cur, float(2.0*M_PI)); // concat // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/prompt_encoder.py#L192 { struct ggml_tensor * t_sin = ggml_map_custom1(ctx0, cur, ggml_sam_sin, GGML_N_TASKS_MAX, NULL); struct ggml_tensor * t_cos = ggml_map_custom1(ctx0, cur, ggml_sam_cos, GGML_N_TASKS_MAX, NULL); cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, t_sin->ne[0] + t_cos->ne[0], cur->ne[1]); ggml_build_forward_expand(gf, ggml_cpy(ctx0, t_sin, ggml_view_2d(ctx0, cur, t_sin->ne[0], t_sin->ne[1], cur->nb[1], 0))); ggml_build_forward_expand(gf, ggml_cpy(ctx0, t_cos, ggml_view_2d(ctx0, cur, t_sin->ne[0], t_sin->ne[1], cur->nb[1], t_sin->nb[1]))); } return cur; } // encode a prompt // // - points // - boxes // - masks // // TODO: currently just encode a single point for simplicity // prompt_encoder_result sam_encode_prompt( const sam_model & model, struct ggml_context * ctx0, struct ggml_cgraph * gf, sam_state & state, const sam_prompt & prompt) { const auto & hparams = model.hparams; const auto & enc = model.enc_prompt; struct ggml_tensor * inp = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 2, 2); ggml_set_name(inp, "prompt_input"); ggml_set_input(inp); auto * embd_prompt_sparse = [&]() -> struct ggml_tensor * { switch (prompt.prompt_type) { case SAM_PROMPT_TYPE_POINT: { // PromptEncoder._embed_points auto * pt_embd = sam_prompt_encode_pe_encoding(enc, ctx0, gf, inp); // overwrite label == -1 with not_a_point_embed.weight // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/prompt_encoder.py#L86 // TODO: extend for multiple points auto * pt_embd_not = ggml_view_2d(ctx0, pt_embd, pt_embd->ne[0], 1, pt_embd->nb[1], pt_embd->nb[1]); ggml_build_forward_expand(gf, ggml_cpy(ctx0, enc.not_a_pt_embd_w, pt_embd_not)); // add point_embeddings[1] to label == 1 // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/prompt_encoder.py#L90 auto * pt_embd1 = ggml_view_2d(ctx0, pt_embd, pt_embd->ne[0], 1, pt_embd->nb[1], 0); ggml_build_forward_expand(gf, ggml_add_inplace(ctx0, pt_embd1, enc.pt_embd[1])); return pt_embd; } break; case SAM_PROMPT_TYPE_BOX: { // PromptEncoder._embed_boxes auto * corner_embd = sam_prompt_encode_pe_encoding(enc, ctx0, gf, inp); // corner_embd[:, 0, :] += self.point_embeddings[2].weight // corner_embd[:, 1, :] += self.point_embeddings[3].weight auto * corner0 = ggml_view_2d( ctx0, corner_embd, corner_embd->ne[0], 1, corner_embd->nb[1], 0); auto * corner1 = ggml_view_2d( ctx0, corner_embd, corner_embd->ne[0], 1, corner_embd->nb[1], corner_embd->nb[1]); ggml_build_forward_expand(gf, ggml_add_inplace(ctx0, corner0, enc.pt_embd[2])); ggml_build_forward_expand(gf, ggml_add_inplace(ctx0, corner1, enc.pt_embd[3])); return corner_embd; } break; default: { fprintf(stderr, "%s: unsupported prompt type %d\n", __func__, prompt.prompt_type); return nullptr; } break; } }(); ggml_build_forward_expand(gf, embd_prompt_sparse); struct ggml_tensor * embd_prompt_dense = ggml_repeat(ctx0, ggml_cont(ctx0, ggml_view_3d(ctx0, enc.no_mask_embd_w, 1, 1, enc.no_mask_embd_w->ne[0], enc.no_mask_embd_w->nb[0], enc.no_mask_embd_w->nb[0], 0)), ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hparams.n_img_embd(), hparams.n_img_embd(), hparams.n_enc_out_chans)); ggml_build_forward_expand(gf, embd_prompt_dense); //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); prompt_encoder_result res; res.embd_prompt_sparse = embd_prompt_sparse; res.embd_prompt_dense = embd_prompt_dense; return res; } struct ggml_tensor* sam_decode_mask_transformer_attn( const sam_layer_dec_transformer_attn & attn, struct ggml_tensor * queries, struct ggml_tensor * keys, struct ggml_tensor * values, struct ggml_context * ctx0, const sam_model & model) { const auto & hparams = model.hparams; const int n_head = hparams.n_dec_heads; struct ggml_tensor * Qcur = {}; struct ggml_tensor * Kcur = {}; struct ggml_tensor * Vcur = {}; Qcur = ggml_mul_mat(ctx0, attn.q_w, queries); Qcur = ggml_add_inplace(ctx0, Qcur, attn.q_b); Kcur = ggml_mul_mat(ctx0, attn.k_w, keys); Kcur = ggml_add_inplace(ctx0, Kcur, attn.k_b); Vcur = ggml_mul_mat(ctx0, attn.v_w, values); Vcur = ggml_add_inplace(ctx0, Vcur, attn.v_b); struct ggml_tensor * Q = {}; struct ggml_tensor * K = {}; struct ggml_tensor * V = {}; Q = ggml_reshape_4d(ctx0, Qcur, Qcur->ne[0]/n_head, n_head, Qcur->ne[1], Qcur->ne[2]); Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); K = ggml_reshape_4d(ctx0, Kcur, Kcur->ne[0]/n_head, n_head, Kcur->ne[1], Kcur->ne[2]); K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); V = ggml_reshape_4d(ctx0, Vcur, Vcur->ne[0]/n_head, n_head, Vcur->ne[1], Vcur->ne[2]); V = ggml_cont(ctx0, ggml_permute(ctx0, V, 0, 2, 1, 3)); // Q * K struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, 1.0f/sqrt(float(Q->ne[0]))); struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_scaled); struct ggml_tensor * KQV = ggml_mul_mat(ctx0, KQ_soft_max, ggml_cont(ctx0, ggml_transpose(ctx0, V))); struct ggml_tensor * KQV_merged = ggml_cont(ctx0, ggml_transpose(ctx0, KQV)); KQV_merged = ggml_cont(ctx0, ggml_permute(ctx0, KQV_merged, 0, 2, 1, 3)); KQV_merged = ggml_reshape_3d(ctx0, KQV_merged, KQV_merged->ne[0]*KQV_merged->ne[1], KQV_merged->ne[2], KQV_merged->ne[3]); KQV_merged = ggml_mul_mat(ctx0, attn.out_w, KQV_merged); KQV_merged = ggml_add_inplace(ctx0, KQV_merged, attn.out_b); return KQV_merged; } struct ggml_tensor * sam_decode_mask_mlp_relu_3( struct ggml_tensor * in, struct ggml_tensor * w_0, struct ggml_tensor * b_0, struct ggml_tensor * w_1, struct ggml_tensor * b_1, struct ggml_tensor * w_2, struct ggml_tensor * b_2, struct ggml_context * ctx0) { struct ggml_tensor * cur = {}; cur = ggml_mul_mat(ctx0, w_0, in); cur = ggml_add_inplace(ctx0, cur, b_0); cur = ggml_relu_inplace(ctx0, cur); cur = ggml_mul_mat(ctx0, w_1, cur); cur = ggml_add_inplace(ctx0, cur, b_1); cur = ggml_relu_inplace(ctx0, cur); cur = ggml_mul_mat(ctx0, w_2, cur); cur = ggml_add_inplace(ctx0, cur, b_2); return cur; } bool sam_decode_mask( const sam_model & model, const prompt_encoder_result & prompt, struct ggml_tensor * pe_img, struct ggml_context * ctx0, struct ggml_cgraph * gf, sam_state & state, const bool multimask_output) { const auto & hparams = model.hparams; const auto & dec = model.dec; const int n_img_embd = hparams.n_img_embd(); struct ggml_tensor * tokens = {}; { // Concatenate output tokens // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/mask_decoder.py#L120 const auto& sparse = prompt.embd_prompt_sparse; tokens = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, dec.iou_token_w->ne[0], dec.iou_token_w->ne[1] + dec.mask_tokens_w->ne[1] + sparse->ne[1], sparse->ne[2]); const size_t offsets[3] = { 0, dec.iou_token_w->ne[1]*tokens->nb[1], dec.iou_token_w->ne[1]*tokens->nb[1] + dec.mask_tokens_w->ne[1]*tokens->nb[1] }; ggml_build_forward_expand(gf, ggml_cpy(ctx0, dec.iou_token_w, ggml_view_2d(ctx0, tokens, tokens->ne[0], dec.iou_token_w->ne[1], tokens->nb[1], offsets[0]))); ggml_build_forward_expand(gf, ggml_cpy(ctx0, dec.mask_tokens_w, ggml_view_2d(ctx0, tokens, tokens->ne[0], dec.mask_tokens_w->ne[1], tokens->nb[1], offsets[1]))); ggml_build_forward_expand(gf, ggml_cpy(ctx0, sparse, ggml_view_2d(ctx0, tokens, tokens->ne[0], sparse->ne[1], tokens->nb[1], offsets[2]))); // TODO: Sparse prompt embeddings can have more than one point } struct ggml_tensor * src = {}; struct ggml_tensor * pos_src = {}; int srcNE[4] = { 0, 0, 0, 0 }; { // Expand per-image data in the batch direction to be per-mask // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/mask_decoder.py#L125 src = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, state.embd_img->ne[0], state.embd_img->ne[1], state.embd_img->ne[2], tokens->ne[2]); src = ggml_add(ctx0, ggml_repeat(ctx0, state.embd_img, src), prompt.embd_prompt_dense); srcNE[0] = src->ne[0]; srcNE[1] = src->ne[1]; srcNE[2] = src->ne[2]; srcNE[3] = src->ne[3]; // flatten & permute // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L83 src = ggml_cont(ctx0, ggml_permute(ctx0, ggml_view_3d(ctx0, src, src->ne[0]*src->ne[1], src->ne[2], src->ne[3], src->nb[2], src->nb[3], 0), 1, 0, 2, 3)); pos_src = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, pe_img->ne[0], pe_img->ne[1], pe_img->ne[2], tokens->ne[2]); pos_src = ggml_repeat(ctx0, pe_img, pos_src); // flatten & permute // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L83 pos_src = ggml_cont(ctx0, ggml_permute(ctx0, ggml_view_3d(ctx0, pos_src, pos_src->ne[0]*pos_src->ne[1], pos_src->ne[2], pos_src->ne[3], pos_src->nb[2], pos_src->nb[3], 0), 1, 0, 2, 3)); } struct ggml_tensor * queries = tokens; struct ggml_tensor * keys = src; { // Run the transformer // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L62 for (int i = 0; i < int(model.dec.transformer_layers.size()); ++i) { const auto& tfm_layer = model.dec.transformer_layers[i]; // Self attention block // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L154 const bool skip_first_layer_pe = i == 0; if (skip_first_layer_pe) { queries = sam_decode_mask_transformer_attn(tfm_layer.self_attn, queries, queries, queries, ctx0, model); } else { struct ggml_tensor * q_0 = ggml_add(ctx0, queries, tokens); struct ggml_tensor * self_attn = sam_decode_mask_transformer_attn(tfm_layer.self_attn, q_0, q_0, queries, ctx0, model); queries = ggml_add(ctx0, queries, self_attn); } queries = ggml_norm(ctx0, queries, hparams.eps_decoder_transformer); queries = ggml_add_inplace(ctx0, ggml_mul(ctx0, queries, tfm_layer.norm1_w), tfm_layer.norm1_b); // Cross attention block, tokens attending to image embedding // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L163 struct ggml_tensor * q_1 = ggml_add(ctx0, queries, tokens); struct ggml_tensor * k_1 = ggml_add(ctx0, keys, pos_src); struct ggml_tensor * cross_attn_token_to_img = sam_decode_mask_transformer_attn(tfm_layer.cross_attn_token_to_img, q_1, k_1, keys, ctx0, model); queries = ggml_add_inplace(ctx0, queries, cross_attn_token_to_img); queries = ggml_norm_inplace(ctx0, queries, hparams.eps_decoder_transformer); queries = ggml_add_inplace(ctx0, ggml_mul(ctx0, queries, tfm_layer.norm2_w), tfm_layer.norm2_b); // MLP block // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L170 struct ggml_tensor * mlp_out = ggml_mul_mat(ctx0, tfm_layer.mlp_lin1_w, queries); mlp_out = ggml_add_inplace(ctx0, mlp_out, tfm_layer.mlp_lin1_b); // RELU activation mlp_out = ggml_relu_inplace(ctx0, mlp_out); mlp_out = ggml_mul_mat(ctx0, tfm_layer.mlp_lin2_w, mlp_out); mlp_out = ggml_add_inplace(ctx0, mlp_out, tfm_layer.mlp_lin2_b); queries = ggml_add_inplace(ctx0, queries, mlp_out); queries = ggml_norm_inplace(ctx0, queries, hparams.eps_decoder_transformer); queries = ggml_add_inplace(ctx0, ggml_mul(ctx0, queries, tfm_layer.norm3_w), tfm_layer.norm3_b); // Cross attention block, image embedding attending to tokens // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L175 struct ggml_tensor * q_2 = ggml_add(ctx0, queries, tokens); struct ggml_tensor * k_2 = ggml_add(ctx0, keys, pos_src); struct ggml_tensor * cross_attn_img_to_token = sam_decode_mask_transformer_attn(tfm_layer.cross_attn_img_to_token, k_2, q_2, queries, ctx0, model); keys = ggml_add_inplace(ctx0, keys, cross_attn_img_to_token); keys = ggml_norm_inplace(ctx0, keys, hparams.eps_decoder_transformer); keys = ggml_add_inplace(ctx0, ggml_mul(ctx0, keys, tfm_layer.norm4_w), tfm_layer.norm4_b); } // Apply the final attention layer from the points to the image // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py#L99 struct ggml_tensor * q = ggml_add(ctx0, queries, tokens); struct ggml_tensor * k = ggml_add(ctx0, keys, pos_src); struct ggml_tensor * final_attn_token_to_img = sam_decode_mask_transformer_attn(dec.transformer_final_attn_token_to_img, q, k, keys, ctx0, model); queries = ggml_add_inplace(ctx0, queries, final_attn_token_to_img); queries = ggml_norm_inplace(ctx0, queries, hparams.eps_decoder_transformer); queries = ggml_add_inplace(ctx0, ggml_mul(ctx0, queries, dec.transformer_norm_final_w), dec.transformer_norm_final_b); } struct ggml_tensor * iou_pred = ggml_view_2d(ctx0, queries, queries->ne[0], queries->ne[2], queries->nb[2], 0); const int num_mask_tokens = 4; // num_multimask_outputs + 1 struct ggml_tensor * mask_tokens_out = ggml_view_3d(ctx0, queries, queries->ne[0], num_mask_tokens, queries->ne[2], queries->nb[1], num_mask_tokens*queries->nb[1], queries->nb[1]); // Upscale mask embeddings and predict masks using the mask tokens // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/mask_decoder.py#L136 keys = ggml_cont(ctx0, ggml_transpose(ctx0, keys)); keys = ggml_view_4d(ctx0, keys, srcNE[0], srcNE[1], srcNE[2], srcNE[3], srcNE[0]*keys->nb[0], keys->nb[1], keys->nb[2], 0); // ggml_build_forward_expand(gf, keys); struct ggml_tensor * upscaled_embedding = {}; { // ConvTranspose2d keys = ggml_conv_transpose_2d_p0(ctx0, dec.output_upscaling_0_w, keys, 2); keys = ggml_add_inplace(ctx0, keys, ggml_repeat(ctx0, ggml_reshape_3d(ctx0, dec.output_upscaling_0_b, 1, 1, dec.output_upscaling_0_b->ne[0]), keys)); keys = sam_layer_norm_2d(ctx0, keys, n_img_embd, dec.output_upscaling_1_w, dec.output_upscaling_1_b, hparams.eps); // GELU activation keys = ggml_gelu_inplace(ctx0, keys); // ConvTranspose2d keys = ggml_conv_transpose_2d_p0(ctx0, dec.output_upscaling_3_w, keys, 2); keys = ggml_add_inplace(ctx0, ggml_repeat(ctx0, ggml_reshape_3d(ctx0, dec.output_upscaling_3_b, 1, 1, dec.output_upscaling_3_b->ne[0]), keys), keys); // GELU activation keys = ggml_gelu_inplace(ctx0, keys); upscaled_embedding = ggml_reshape_3d(ctx0, keys, keys->ne[0]*keys->ne[1], keys->ne[2], keys->ne[3]); upscaled_embedding = ggml_cont(ctx0, ggml_transpose(ctx0, upscaled_embedding)); // TODO: Shouldn't be needed } struct ggml_tensor * hyper_in = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_img_embd/2, num_mask_tokens, mask_tokens_out->ne[2]); for (int i = 0; i < num_mask_tokens; ++i) { const auto& mlp = dec.output_hypernet_mlps[i]; struct ggml_tensor * in = ggml_view_2d(ctx0, mask_tokens_out, mask_tokens_out->ne[0], mask_tokens_out->ne[2], mask_tokens_out->nb[1], i*mask_tokens_out->nb[1]); struct ggml_tensor * out = sam_decode_mask_mlp_relu_3(in, mlp.w_0, mlp.b_0, mlp.w_1, mlp.b_1, mlp.w_2, mlp.b_2, ctx0); ggml_build_forward_expand(gf, ggml_cpy(ctx0, out, ggml_view_2d(ctx0, hyper_in, hyper_in->ne[0], hyper_in->ne[2], hyper_in->nb[1], i*hyper_in->nb[1]))); } struct ggml_tensor * masks = ggml_mul_mat(ctx0, hyper_in, upscaled_embedding); masks = ggml_cont(ctx0, ggml_transpose(ctx0, masks)); // TODO: Shouldn't be needed masks = ggml_reshape_4d(ctx0, masks, keys->ne[0], keys->ne[1], masks->ne[1], keys->ne[3]); // Generate mask quality predictions // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/mask_decoder.py#L146 iou_pred = sam_decode_mask_mlp_relu_3(iou_pred, dec.iou_prediction_head_0_w, dec.iou_prediction_head_0_b, dec.iou_prediction_head_1_w, dec.iou_prediction_head_1_b, dec.iou_prediction_head_2_w, dec.iou_prediction_head_2_b, ctx0); // Select the correct mask or masks for output // ref: https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/mask_decoder.py#L101 if (multimask_output) { iou_pred = ggml_cpy(state.ctx, ggml_view_1d(ctx0, iou_pred, iou_pred->ne[0] - 1, iou_pred->nb[0]), state.iou_predictions); masks = ggml_view_4d(ctx0, masks, masks->ne[0], masks->ne[1], masks->ne[2] - 1, masks->ne[3], masks->nb[1], masks->nb[2], masks->nb[3], masks->nb[2] /* offset*/); masks = ggml_cpy(state.ctx, masks, state.low_res_masks); } else { iou_pred = ggml_cpy(state.ctx, ggml_view_1d(ctx0, iou_pred, 1, 0), ggml_view_1d(ctx0, state.iou_predictions, 1, 0)); masks = ggml_view_4d(ctx0, masks, masks->ne[0], masks->ne[1], 1, masks->ne[3], masks->nb[1], masks->nb[2], masks->nb[3], 0); auto * low_res_mask = ggml_view_4d(ctx0, state.low_res_masks, masks->ne[0], masks->ne[1], 1, masks->ne[3], masks->nb[1], masks->nb[2], masks->nb[3], 0); masks = ggml_cpy(state.ctx, masks, low_res_mask); } ggml_build_forward_expand(gf, masks); ggml_build_forward_expand(gf, iou_pred); ggml_disconnect_node_from_graph(state.low_res_masks); ggml_disconnect_node_from_graph(state.iou_predictions); return true; } bool sam_write_masks(const sam_hparams& hparams, int nx, int ny, const sam_state & state, const std::string & fname, const bool multimask_output) { if (state.low_res_masks->ne[2] == 0) return true; if (state.low_res_masks->ne[2] != state.iou_predictions->ne[0]) { printf("Error: number of masks (%d) does not match number of iou predictions (%d)\n", (int)state.low_res_masks->ne[2], (int)state.iou_predictions->ne[0]); return false; } const int n_img_size = hparams.n_img_size(); const float mask_threshold = hparams.mask_threshold; const float iou_threshold = hparams.iou_threshold; const float stability_score_threshold = hparams.stability_score_threshold; const float intersection_threshold = mask_threshold + hparams.stability_score_offset; const float union_threshold = mask_threshold - hparams.stability_score_offset; const int ne0 = state.low_res_masks->ne[0]; const int ne1 = state.low_res_masks->ne[1]; const int ne2 = multimask_output ? state.low_res_masks->ne[2] : 1; // Remove padding and upscale masks to the original image size. // ref: https://github.com/facebookresearch/segment-anything/blob/efeab7296ab579d4a261e554eca80faf6b33924a/segment_anything/modeling/sam.py#L140 const float preprocess_scale = std::max(nx, ny) / float(n_img_size); const int cropped_nx = int(nx / preprocess_scale + 0.5f); const int cropped_ny = int(ny / preprocess_scale + 0.5f); const float scale_x_1 = (float)ne0 / (float)n_img_size; const float scale_y_1 = (float)ne1 / (float)n_img_size; const float scale_x_2 = float(cropped_nx) / float(nx); const float scale_y_2 = float(cropped_ny) / float(ny); const auto iou_data = (float*)state.iou_predictions->data; for (int i = 0; i < ne2; ++i) { if (iou_threshold > 0.f && iou_data[i] < iou_threshold) { printf("Skipping mask %d with iou %f below threshold %f\n", i, iou_data[i], iou_threshold); continue; // Filtering masks with iou below the threshold } std::vector mask_data(n_img_size*n_img_size); { const float* data = (float *) state.low_res_masks->data + i*ne0*ne1; for (int iy = 0; iy < n_img_size; ++iy) { for (int ix = 0; ix < n_img_size; ++ix) { const float sx = std::max(scale_x_1*(ix + 0.5f) - 0.5f, 0.0f); const float sy = std::max(scale_y_1*(iy + 0.5f) - 0.5f, 0.0f); const int x0 = std::max(0, (int)sx); const int y0 = std::max(0, (int)sy); const int x1 = std::min(x0 + 1, ne0 - 1); const int y1 = std::min(y0 + 1, ne1 - 1); const float dx = sx - x0; const float dy = sy - y0; const int j00 = y0*ne0 + x0; const int j01 = y0*ne0 + x1; const int j10 = y1*ne0 + x0; const int j11 = y1*ne0 + x1; const float v00 = data[j00]; const float v01 = data[j01]; const float v10 = data[j10]; const float v11 = data[j11]; const float v0 = (1-dx)*v00 + dx*v01; const float v1 = (1-dx)*v10 + dx*v11; const float v = (1-dy)*v0 + dy*v1; mask_data[iy*n_img_size + ix] = v; } } } int intersections = 0; int unions = 0; sam_image_u8 res; int min_iy = ny; int max_iy = 0; int min_ix = nx; int max_ix = 0; { const float* data = mask_data.data(); res.nx = nx; res.ny = ny; res.data.resize(nx*ny); for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) { const float sx = std::max(scale_x_2*(ix + 0.5f) - 0.5f, 0.0f); const float sy = std::max(scale_y_2*(iy + 0.5f) - 0.5f, 0.0f); const int x0 = std::max(0, (int)sx); const int y0 = std::max(0, (int)sy); const int x1 = std::min(x0 + 1, cropped_nx - 1); const int y1 = std::min(y0 + 1, cropped_ny - 1); const float dx = sx - x0; const float dy = sy - y0; const int j00 = y0*n_img_size + x0; const int j01 = y0*n_img_size + x1; const int j10 = y1*n_img_size + x0; const int j11 = y1*n_img_size + x1; const float v00 = data[j00]; const float v01 = data[j01]; const float v10 = data[j10]; const float v11 = data[j11]; const float v0 = (1-dx)*v00 + dx*v01; const float v1 = (1-dx)*v10 + dx*v11; const float v = (1-dy)*v0 + dy*v1; if (v > intersection_threshold) { intersections++; } if (v > union_threshold) { unions++; } if (v > mask_threshold) { min_iy = std::min(min_iy, iy); max_iy = std::max(max_iy, iy); min_ix = std::min(min_ix, ix); max_ix = std::max(max_ix, ix); res.data[iy*nx + ix] = 255; } } } } const float stability_score = float(intersections) / float(unions); if (stability_score_threshold > 0.f && stability_score < stability_score_threshold) { printf("Skipping mask %d with stability score %f below threshold %f\n", i, stability_score, stability_score_threshold); continue; // Filtering masks with stability score below the threshold } printf("Mask %d: iou = %f, stability_score = %f, bbox (%d, %d), (%d, %d)\n", i, iou_data[i], stability_score, min_ix, max_ix, min_iy, max_iy); const std::string filename = multimask_output ? fname + std::to_string(i) + ".png" : fname + ".png"; if (!stbi_write_png(filename.c_str(), res.nx, res.ny, 1, res.data.data(), res.nx)) { printf("%s: failed to write mask %s\n", __func__, filename.c_str()); return false; } } return true; } struct ggml_cgraph * sam_build_fast_graph( const sam_model & model, sam_state & state, const int nx, const int ny, const sam_prompt & prompt, const bool multimask_output) { struct ggml_init_params ggml_params = { /*.mem_size =*/ state.buf_compute_fast.size(), /*.mem_buffer =*/ state.buf_compute_fast.data(), /*.no_alloc =*/ true, // skip allocating as we use ggml_alloc to allocate exact memory requirements }; struct ggml_context * ctx0 = ggml_init(ggml_params); struct ggml_cgraph * gf = ggml_new_graph(ctx0); prompt_encoder_result enc_res = sam_encode_prompt(model, ctx0, gf, state, prompt); if (!enc_res.embd_prompt_sparse || !enc_res.embd_prompt_dense) { fprintf(stderr, "%s: failed to encode prompt\n", __func__); return {}; } struct ggml_tensor * pe_img_dense = sam_fill_dense_pe(model, ctx0, gf, state); if (!pe_img_dense) { fprintf(stderr, "%s: failed to get dense positional encoding\n", __func__); return {}; } if (!sam_decode_mask(model, enc_res, pe_img_dense, ctx0, gf, state, multimask_output)) { fprintf(stderr, "%s: failed to decode mask\n", __func__); return {}; } ggml_free(ctx0); ggml_gallocr_alloc_graph(state.allocr, gf); struct ggml_tensor * inp = ggml_graph_get_tensor(gf, "prompt_input"); auto * data = (float *) inp->data; // Transform prompt (point or box) { // https://github.com/facebookresearch/segment-anything/blob/dca509fe793f601edb92606367a655c15ac00fdf/segment_anything/utils/transforms.py#L33 // The point scaling here is greatly simplified but mathematically equivalent. const auto scale = 1.0F / std::max(nx, ny); switch (prompt.prompt_type) { case SAM_PROMPT_TYPE_POINT: { const auto & pt = prompt.pt; // set the input by converting the [0, 1] coordinates to [-1, 1] data[0] = 2.0f*pt.x*scale - 1.0f; data[1] = 2.0f*pt.y*scale - 1.0f; // padding // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/prompt_encoder.py#L81-L85 data[2] = 2.0f*(0.0f) - 1.0f; data[3] = 2.0f*(0.0f) - 1.0f; } break; case SAM_PROMPT_TYPE_BOX: { const auto & box = prompt.box; data[0] = 2.0f*box.x1*scale - 1.0f; data[1] = 2.0f*box.y1*scale - 1.0f; data[2] = 2.0f*box.x2*scale - 1.0f; data[3] = 2.0f*box.y2*scale - 1.0f; } break; } } // from sam_fill_dense_pe { struct ggml_tensor * xy_embed_stacked = ggml_graph_get_tensor(gf, "xy_embed_stacked"); const int32_t n_img_embd = model.hparams.n_img_embd(); const float n_img_embd_inv = 1.0f / n_img_embd; float * data = (float *) ggml_get_data(xy_embed_stacked); for (int i = 0; i < n_img_embd; ++i) { const int row = 2*i*n_img_embd; const float y_val = 2 * (i + 0.5f) * n_img_embd_inv - 1; for (int j = 0; j < n_img_embd; ++j) { const float x_val = 2 * (j + 0.5f) * n_img_embd_inv - 1; data[row + 2*j + 0] = x_val; data[row + 2*j + 1] = y_val; } } } return gf; } void sam_print_usage(int argc, char ** argv, const sam_params & params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); fprintf(stderr, " -i FNAME, --inp FNAME\n"); fprintf(stderr, " input file (default: %s)\n", params.fname_inp.c_str()); fprintf(stderr, " -o FNAME, --out FNAME\n"); fprintf(stderr, " mask file name prefix (default: %s)\n", params.fname_out.c_str()); fprintf(stderr, " -sm, --single-mask\n"); fprintf(stderr, " single mask output (default multi mask output)\n"); fprintf(stderr, "SAM hyperparameters:\n"); fprintf(stderr, " -mt FLOAT, --mask-threshold\n"); fprintf(stderr, " mask threshold (default: %f)\n", params.mask_threshold); fprintf(stderr, " -it FLOAT, --iou-threshold\n"); fprintf(stderr, " iou threshold (default: %f)\n", params.iou_threshold); fprintf(stderr, " -st FLOAT, --score-threshold\n"); fprintf(stderr, " score threshold (default: %f)\n", params.stability_score_threshold); fprintf(stderr, " -so FLOAT, --score-offset\n"); fprintf(stderr, " score offset (default: %f)\n", params.stability_score_offset); fprintf(stderr, " -e FLOAT, --epsilon\n"); fprintf(stderr, " epsilon (default: %f)\n", params.eps); fprintf(stderr, " -ed FLOAT, --epsilon-decoder-transformer\n"); fprintf(stderr, " epsilon decoder transformer (default: %f)\n", params.eps_decoder_transformer); fprintf(stderr, "SAM prompt:\n"); fprintf(stderr, " -p [x,y], --point-prompt\n"); fprintf(stderr, " point to be used as prompt for SAM (default: %f,%f). Must be in a format FLOAT,FLOAT \n", params.prompt.pt.x, params.prompt.pt.y); fprintf(stderr, " -b [x1,y1,x2,y2], --box-prompt\n"); fprintf(stderr, " box to be used as prompt for SAM (default: %f,%f,%f,%f). Must be in a format FLOAT,FLOAT,FLOAT,FLOAT \n", params.prompt.box.x1, params.prompt.box.y1, params.prompt.box.x2, params.prompt.box.y2); fprintf(stderr, "\n"); } bool sam_params_parse(int argc, char ** argv, sam_params & params) { bool use_point_prompt = false; bool use_box_prompt = false; for (int i = 1; i < argc; i++) { std::string arg = argv[i]; if (arg == "-s" || arg == "--seed") { params.seed = std::stoi(argv[++i]); } else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); } else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; } else if (arg == "-i" || arg == "--inp") { params.fname_inp = argv[++i]; } else if (arg == "-o" || arg == "--out") { params.fname_out = argv[++i]; } else if (arg == "-sm" || arg == "--single-mask") { params.multimask_output = false; } else if (arg == "-mt" || arg == "--mask-threshold") { params.mask_threshold = std::stof(argv[++i]); } else if (arg == "-it" || arg == "--iou-threshold") { params.iou_threshold = std::stof(argv[++i]); } else if (arg == "-st" || arg == "--score-threshold") { params.stability_score_threshold = std::stof(argv[++i]); } else if (arg == "-so" || arg == "--score-offset") { params.stability_score_offset = std::stof(argv[++i]); } else if (arg == "-e" || arg == "--epsilon") { params.eps = std::stof(argv[++i]); } else if (arg == "-ed" || arg == "--epsilon-decoder-transformer") { params.eps_decoder_transformer = std::stof(argv[++i]); } else if (arg == "-p" || arg == "--point-prompt") { // TODO multiple points per model invocation use_point_prompt = true; char* point = argv[++i]; char* coord = strtok(point, ","); if (!coord){ fprintf(stderr, "Error while parsing prompt!\n"); exit(1); } params.prompt.pt.x = std::stof(coord); coord = strtok(NULL, ","); if (!coord){ fprintf(stderr, "Error while parsing prompt!\n"); exit(1); } params.prompt.pt.y = std::stof(coord); } else if (arg == "-b" || arg == "--box-prompt") { use_box_prompt = true; char * box_prompt = argv[++i]; float box_vals[4]; char * val = strtok(box_prompt, ","); if (!val) { fprintf(stderr, "Error while parsing prompt!\n"); exit(1); } box_vals[0] = std::stof(val); for (int j = 1; j < 4; ++j) { char * val = strtok(NULL, ","); if (!val) { fprintf(stderr, "Error while parsing prompt!\n"); exit(1); } box_vals[j] = std::stof(val); } params.prompt.box.x1 = box_vals[0]; params.prompt.box.y1 = box_vals[1]; params.prompt.box.x2 = box_vals[2]; params.prompt.box.y2 = box_vals[3]; } else if (arg == "-h" || arg == "--help") { sam_print_usage(argc, argv, params); exit(0); } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); sam_print_usage(argc, argv, params); exit(0); } } if (use_box_prompt && use_point_prompt) { fprintf(stderr, "Error: use either point or box prompt, not both.\n"); exit(1); } params.prompt.prompt_type = SAM_PROMPT_TYPE_POINT; if (use_box_prompt) { params.prompt.prompt_type = SAM_PROMPT_TYPE_BOX; } return true; } int main(int argc, char ** argv) { const int64_t t_main_start_us = ggml_time_us(); sam_params params; params.model = "models/sam-vit-b/ggml-model-f16.bin"; sam_model model; sam_state state; int64_t t_load_us = 0; if (sam_params_parse(argc, argv, params) == false) { return 1; } if (params.seed < 0) { params.seed = time(NULL); } fprintf(stderr, "%s: seed = %d\n", __func__, params.seed); // load the image sam_image_u8 img0; if (!sam_image_load_from_file(params.fname_inp, img0)) { fprintf(stderr, "%s: failed to load image from '%s'\n", __func__, params.fname_inp.c_str()); return 1; } fprintf(stderr, "%s: loaded image '%s' (%d x %d)\n", __func__, params.fname_inp.c_str(), img0.nx, img0.ny); // preprocess to f32 sam_image_f32 img1; if (!sam_image_preprocess(img0, img1)) { fprintf(stderr, "%s: failed to preprocess image\n", __func__); return 1; } fprintf(stderr, "%s: preprocessed image (%d x %d)\n", __func__, img1.nx, img1.ny); // load the model { const int64_t t_start_us = ggml_time_us(); if (!sam_model_load(params, model)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } t_load_us = ggml_time_us() - t_start_us; } { static size_t buf_size = 256u*1024*1024; struct ggml_init_params ggml_params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, }; state.ctx = ggml_init(ggml_params); state.embd_img = ggml_new_tensor_3d(state.ctx, GGML_TYPE_F32, model.hparams.n_img_embd(), model.hparams.n_img_embd(), model.hparams.n_enc_out_chans); state.low_res_masks = ggml_new_tensor_3d(state.ctx, GGML_TYPE_F32, model.hparams.n_enc_out_chans, model.hparams.n_enc_out_chans, 3); state.iou_predictions = ggml_new_tensor_1d(state.ctx, GGML_TYPE_F32, 3); } // Encode image { state.buf_compute_img_enc.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead()); state.allocr = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); struct ggml_cgraph * gf = sam_encode_image(model, state, img1); if (!gf) { fprintf(stderr, "%s: failed to encode image\n", __func__); return 1; } ggml_graph_compute_helper(state.work_buffer, gf, params.n_threads); // print_t_f32("embd_img", state.embd_img); ggml_gallocr_free(state.allocr); state.allocr = NULL; state.work_buffer.clear(); } // Encode prompt and decode mask { state.buf_compute_fast.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead()); state.allocr = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); switch (params.prompt.prompt_type) { case SAM_PROMPT_TYPE_POINT: fprintf(stderr, "Using point prompt: (%f, %f)\n", params.prompt.pt.x, params.prompt.pt.y); break; case SAM_PROMPT_TYPE_BOX: fprintf(stderr, "Using box prompt: (%f, %f, %f, %f)\n", params.prompt.box.x1, params.prompt.box.y1, params.prompt.box.x2, params.prompt.box.y2); break; } struct ggml_cgraph * gf = sam_build_fast_graph(model, state, img0.nx, img0.ny, params.prompt, params.multimask_output); if (!gf) { fprintf(stderr, "%s: failed to build fast graph\n", __func__); return 1; } ggml_graph_compute_helper(state.work_buffer, gf, params.n_threads); //print_t_f32("iou_predictions", state.iou_predictions); //print_t_f32("low_res_masks", state.low_res_masks); ggml_gallocr_free(state.allocr); state.allocr = NULL; } if (!sam_write_masks(model.hparams, img0.nx, img0.ny, state, params.fname_out, params.multimask_output)) { fprintf(stderr, "%s: failed to write masks\n", __func__); return 1; } // report timing { const int64_t t_main_end_us = ggml_time_us(); fprintf(stderr, "\n\n"); fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } ggml_free(model.ctx); return 0; } ggml-org-ggml-3678254/examples/simple/000077500000000000000000000000001512524704700174735ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/simple/CMakeLists.txt000066400000000000000000000006571512524704700222430ustar00rootroot00000000000000# # simple-ctx set(TEST_TARGET simple-ctx) add_executable(${TEST_TARGET} simple-ctx.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml) # # simple-backend set(TEST_TARGET simple-backend) add_executable(${TEST_TARGET} simple-backend.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml) if (GGML_CUDA) add_compile_definitions(GGML_USE_CUDA) endif() if (GGML_METAL) add_compile_definitions(GGML_USE_METAL) endif() ggml-org-ggml-3678254/examples/simple/README.md000066400000000000000000000020661512524704700207560ustar00rootroot00000000000000## Simple This example simply performs a matrix multiplication, solely for the purpose of demonstrating a basic usage of ggml and backend handling. The code is commented to help understand what each part does. Traditional matrix multiplication goes like this (multiply row-by-column): $$ A \times B = C $$ $$ \begin{bmatrix} 2 & 8 \\ 5 & 1 \\ 4 & 2 \\ 8 & 6 \\ \end{bmatrix} \times \begin{bmatrix} 10 & 9 & 5 \\ 5 & 9 & 4 \\ \end{bmatrix} \= \begin{bmatrix} 60 & 90 & 42 \\ 55 & 54 & 29 \\ 50 & 54 & 28 \\ 110 & 126 & 64 \\ \end{bmatrix} $$ In `ggml`, we pass the matrix $B$ in transposed form and multiply row-by-row. The result $C$ is also transposed: $$ ggml\\_mul\\_mat(A, B^T) = C^T $$ $$ ggml\\_mul\\_mat( \begin{bmatrix} 2 & 8 \\ 5 & 1 \\ 4 & 2 \\ 8 & 6 \\ \end{bmatrix} , \begin{bmatrix} 10 & 5 \\ 9 & 9 \\ 5 & 4 \\ \end{bmatrix} ) \= \begin{bmatrix} 60 & 55 & 50 & 110 \\ 90 & 54 & 54 & 126 \\ 42 & 29 & 28 & 64 \\ \end{bmatrix} $$ The `simple-ctx` doesn't support gpu acceleration. `simple-backend` demonstrates how to use other backends like CUDA and Metal. ggml-org-ggml-3678254/examples/simple/simple-backend.cpp000066400000000000000000000102301512524704700230510ustar00rootroot00000000000000#include "ggml.h" #include "ggml-backend.h" #include #include #include #include #include #include static void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); fflush(stderr); } // This is a simple model with two tensors a and b struct simple_model { struct ggml_tensor * a {}; struct ggml_tensor * b {}; // the backend to perform the computation (CPU, CUDA, METAL) ggml_backend_t backend {}; ggml_backend_t cpu_backend {}; ggml_backend_sched_t sched {}; // storage for the graph and tensors std::vector buf; }; // initialize data of matrices to perform matrix multiplication const int rows_A = 4, cols_A = 2; float matrix_A[rows_A * cols_A] = { 2, 8, 5, 1, 4, 2, 8, 6 }; const int rows_B = 3, cols_B = 2; /* Transpose([ 10, 9, 5, 5, 9, 4 ]) 2 rows, 3 cols */ float matrix_B[rows_B * cols_B] = { 10, 5, 9, 9, 5, 4 }; // initialize the tensors of the model in this case two matrices 2x2 void init_model(simple_model & model) { ggml_log_set(ggml_log_callback_default, nullptr); ggml_backend_load_all(); model.backend = ggml_backend_init_best(); model.cpu_backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); ggml_backend_t backends[2] = { model.backend, model.cpu_backend }; model.sched = ggml_backend_sched_new(backends, nullptr, 2, GGML_DEFAULT_GRAPH_SIZE, false, true); } // build the compute graph to perform a matrix multiplication struct ggml_cgraph * build_graph(simple_model& model) { size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(); model.buf.resize(buf_size); struct ggml_init_params params0 = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ model.buf.data(), /*.no_alloc =*/ true, // the tensors will be allocated later }; // create a context to build the graph struct ggml_context * ctx = ggml_init(params0); struct ggml_cgraph * gf = ggml_new_graph(ctx); // create tensors model.a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, cols_A, rows_A); model.b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, cols_B, rows_B); // result = a*b^T struct ggml_tensor * result = ggml_mul_mat(ctx, model.a, model.b); // build operations nodes ggml_build_forward_expand(gf, result); ggml_free(ctx); return gf; } // compute with backend struct ggml_tensor * compute(simple_model & model, struct ggml_cgraph * gf) { ggml_backend_sched_reset(model.sched); ggml_backend_sched_alloc_graph(model.sched, gf); // load data from cpu memory to backend buffer ggml_backend_tensor_set(model.a, matrix_A, 0, ggml_nbytes(model.a)); ggml_backend_tensor_set(model.b, matrix_B, 0, ggml_nbytes(model.b)); // compute the graph ggml_backend_sched_graph_compute(model.sched, gf); // in this case, the output tensor is the last one in the graph return ggml_graph_node(gf, -1); } int main(void) { ggml_time_init(); simple_model model; init_model(model); struct ggml_cgraph * gf = build_graph(model); // perform computation struct ggml_tensor * result = compute(model, gf); // create a array to print result std::vector out_data(ggml_nelements(result)); // bring the data from the backend memory ggml_backend_tensor_get(result, out_data.data(), 0, ggml_nbytes(result)); // expected result: // [ 60.00 55.00 50.00 110.00 // 90.00 54.00 54.00 126.00 // 42.00 29.00 28.00 64.00 ] printf("mul mat (%d x %d) (transposed result):\n[", (int) result->ne[0], (int) result->ne[1]); for (int j = 0; j < result->ne[1] /* rows */; j++) { if (j > 0) { printf("\n"); } for (int i = 0; i < result->ne[0] /* cols */; i++) { printf(" %.2f", out_data[j * result->ne[0] + i]); } } printf(" ]\n"); // release backend memory and free backend ggml_backend_sched_free(model.sched); ggml_backend_free(model.backend); ggml_backend_free(model.cpu_backend); return 0; } ggml-org-ggml-3678254/examples/simple/simple-ctx.cpp000066400000000000000000000071061512524704700222700ustar00rootroot00000000000000#include "ggml.h" #include "ggml-cpu.h" #include #include #include #include #include #include #include #include // This is a simple model with two tensors a and b struct simple_model { struct ggml_tensor * a; struct ggml_tensor * b; // the context to define the tensor information (dimensions, size, memory data) struct ggml_context * ctx; }; // initialize the tensors of the model in this case two matrices 2x2 void load_model(simple_model & model, float * a, float * b, int rows_A, int cols_A, int rows_B, int cols_B) { size_t ctx_size = 0; { ctx_size += rows_A * cols_A * ggml_type_size(GGML_TYPE_F32); // tensor a ctx_size += rows_B * cols_B * ggml_type_size(GGML_TYPE_F32); // tensor b ctx_size += 2 * ggml_tensor_overhead(), // tensors ctx_size += ggml_graph_overhead(); // compute graph ctx_size += 1024; // some overhead } struct ggml_init_params params { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API }; // create context model.ctx = ggml_init(params); // create tensors model.a = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, cols_A, rows_A); model.b = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, cols_B, rows_B); memcpy(model.a->data, a, ggml_nbytes(model.a)); memcpy(model.b->data, b, ggml_nbytes(model.b)); } // build the compute graph to perform a matrix multiplication struct ggml_cgraph * build_graph(const simple_model& model) { struct ggml_cgraph * gf = ggml_new_graph(model.ctx); // result = a*b^T struct ggml_tensor * result = ggml_mul_mat(model.ctx, model.a, model.b); ggml_build_forward_expand(gf, result); return gf; } // compute with backend struct ggml_tensor * compute(const simple_model & model) { struct ggml_cgraph * gf = build_graph(model); int n_threads = 1; // number of threads to perform some operations with multi-threading ggml_graph_compute_with_ctx(model.ctx, gf, n_threads); // in this case, the output tensor is the last one in the graph return ggml_graph_node(gf, -1); } int main(void) { ggml_time_init(); // initialize data of matrices to perform matrix multiplication const int rows_A = 4, cols_A = 2; float matrix_A[rows_A * cols_A] = { 2, 8, 5, 1, 4, 2, 8, 6 }; const int rows_B = 3, cols_B = 2; /* Transpose([ 10, 9, 5, 5, 9, 4 ]) 2 rows, 3 cols */ float matrix_B[rows_B * cols_B] = { 10, 5, 9, 9, 5, 4 }; simple_model model; load_model(model, matrix_A, matrix_B, rows_A, cols_A, rows_B, cols_B); // perform computation in cpu struct ggml_tensor * result = compute(model); // get the result data pointer as a float array to print std::vector out_data(ggml_nelements(result)); memcpy(out_data.data(), result->data, ggml_nbytes(result)); // expected result: // [ 60.00 55.00 50.00 110.00 // 90.00 54.00 54.00 126.00 // 42.00 29.00 28.00 64.00 ] printf("mul mat (%d x %d) (transposed result):\n[", (int) result->ne[0], (int) result->ne[1]); for (int j = 0; j < result->ne[1] /* rows */; j++) { if (j > 0) { printf("\n"); } for (int i = 0; i < result->ne[0] /* cols */; i++) { printf(" %.2f", out_data[j * result->ne[0] + i]); } } printf(" ]\n"); // free memory ggml_free(model.ctx); return 0; } ggml-org-ggml-3678254/examples/stb_image.h000066400000000000000000010540751512524704700203210ustar00rootroot00000000000000/* stb_image - v2.28 - public domain image loader - http://nothings.org/stb no warranty implied; use at your own risk Do this: #define STB_IMAGE_IMPLEMENTATION before you include this file in *one* C or C++ file to create the implementation. // i.e. it should look like this: #include ... #include ... #include ... #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free QUICK NOTES: Primarily of interest to game developers and other people who can avoid problematic images and only need the trivial interface JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) PNG 1/2/4/8/16-bit-per-channel TGA (not sure what subset, if a subset) BMP non-1bpp, non-RLE PSD (composited view only, no extra channels, 8/16 bit-per-channel) GIF (*comp always reports as 4-channel) HDR (radiance rgbE format) PIC (Softimage PIC) PNM (PPM and PGM binary only) Animated GIF still needs a proper API, but here's one way to do it: http://gist.github.com/urraka/685d9a6340b26b830d49 - decode from memory or through FILE (define STBI_NO_STDIO to remove code) - decode from arbitrary I/O callbacks - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) Full documentation under "DOCUMENTATION" below. LICENSE See end of file for license information. RECENT REVISION HISTORY: 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes 2.26 (2020-07-13) many minor fixes 2.25 (2020-02-02) fix warnings 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically 2.23 (2019-08-11) fix clang static analysis warning 2.22 (2019-03-04) gif fixes, fix warnings 2.21 (2019-02-25) fix typo in comment 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs 2.19 (2018-02-11) fix warning 2.18 (2018-01-30) fix warnings 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 RGB-format JPEG; remove white matting in PSD; allocate large structures on the stack; correct channel count for PNG & BMP 2.10 (2016-01-22) avoid warning introduced in 2.09 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED See end of file for full revision history. ============================ Contributors ========================= Image formats Extensions, features Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) github:urraka (animated gif) Junggon Kim (PNM comments) Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) socks-the-fox (16-bit PNG) Jeremy Sawicki (handle all ImageNet JPGs) Optimizations & bugfixes Mikhail Morozov (1-bit BMP) Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) Arseny Kapoulkine Simon Breuss (16-bit PNM) John-Mark Allen Carmelo J Fdez-Aguera Bug & warning fixes Marc LeBlanc David Woo Guillaume George Martins Mozeiko Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski Phil Jordan Dave Moore Roy Eltham Hayaki Saito Nathan Reed Won Chun Luke Graham Johan Duparc Nick Verigakis the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh Janez Zemva John Bartholomew Michal Cichon github:romigrou Jonathan Blow Ken Hamada Tero Hanninen github:svdijk Eugene Golushkov Laurent Gomila Cort Stratton github:snagar Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex Cass Everitt Ryamond Barbiero github:grim210 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo Julian Raschke Gregory Mullen Christian Floisand github:darealshinji Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 Brad Weinberger Matvey Cherevko github:mosra Luca Sas Alexander Veselov Zack Middleton [reserved] Ryan C. Gordon [reserved] [reserved] DO NOT ADD YOUR NAME HERE Jacko Dirks To add your name to the credits, pick a random blank space in the middle and fill it. 80% of merge conflicts on stb PRs are due to people adding their name at the end of the credits. */ #ifndef STBI_INCLUDE_STB_IMAGE_H #define STBI_INCLUDE_STB_IMAGE_H // DOCUMENTATION // // Limitations: // - no 12-bit-per-channel JPEG // - no JPEGs with arithmetic coding // - GIF always returns *comp=4 // // Basic usage (see HDR discussion below for HDR usage): // int x,y,n; // unsigned char *data = stbi_load(filename, &x, &y, &n, 0); // // ... process data if not NULL ... // // ... x = width, y = height, n = # 8-bit components per pixel ... // // ... replace '0' with '1'..'4' to force that many components per pixel // // ... but 'n' will always be the number that it would have been if you said 0 // stbi_image_free(data); // // Standard parameters: // int *x -- outputs image width in pixels // int *y -- outputs image height in pixels // int *channels_in_file -- outputs # of image components in image file // int desired_channels -- if non-zero, # of image components requested in result // // The return value from an image loader is an 'unsigned char *' which points // to the pixel data, or NULL on an allocation failure or if the image is // corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, // with each pixel consisting of N interleaved 8-bit components; the first // pixel pointed to is top-left-most in the image. There is no padding between // image scanlines or between pixels, regardless of format. The number of // components N is 'desired_channels' if desired_channels is non-zero, or // *channels_in_file otherwise. If desired_channels is non-zero, // *channels_in_file has the number of components that _would_ have been // output otherwise. E.g. if you set desired_channels to 4, you will always // get RGBA output, but you can check *channels_in_file to see if it's trivially // opaque because e.g. there were only 3 channels in the source image. // // An output image with N components has the following components interleaved // in this order in each pixel: // // N=#comp components // 1 grey // 2 grey, alpha // 3 red, green, blue // 4 red, green, blue, alpha // // If image loading fails for any reason, the return value will be NULL, // and *x, *y, *channels_in_file will be unchanged. The function // stbi_failure_reason() can be queried for an extremely brief, end-user // unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS // to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly // more user-friendly ones. // // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. // // To query the width, height and component count of an image without having to // decode the full file, you can use the stbi_info family of functions: // // int x,y,n,ok; // ok = stbi_info(filename, &x, &y, &n); // // returns ok=1 and sets x, y, n if image is a supported format, // // 0 otherwise. // // Note that stb_image pervasively uses ints in its public API for sizes, // including sizes of memory buffers. This is now part of the API and thus // hard to change without causing breakage. As a result, the various image // loaders all have certain limits on image size; these differ somewhat // by format but generally boil down to either just under 2GB or just under // 1GB. When the decoded image would be larger than this, stb_image decoding // will fail. // // Additionally, stb_image will reject image files that have any of their // dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, // which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, // the only way to have an image with such dimensions load correctly // is for it to have a rather extreme aspect ratio. Either way, the // assumption here is that such larger images are likely to be malformed // or malicious. If you do need to load an image with individual dimensions // larger than that, and it still fits in the overall size limit, you can // #define STBI_MAX_DIMENSIONS on your own to be something larger. // // =========================================================================== // // UNICODE: // // If compiling for Windows and you wish to use Unicode filenames, compile // with // #define STBI_WINDOWS_UTF8 // and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert // Windows wchar_t filenames to utf8. // // =========================================================================== // // Philosophy // // stb libraries are designed with the following priorities: // // 1. easy to use // 2. easy to maintain // 3. good performance // // Sometimes I let "good performance" creep up in priority over "easy to maintain", // and for best performance I may provide less-easy-to-use APIs that give higher // performance, in addition to the easy-to-use ones. Nevertheless, it's important // to keep in mind that from the standpoint of you, a client of this library, // all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. // // Some secondary priorities arise directly from the first two, some of which // provide more explicit reasons why performance can't be emphasized. // // - Portable ("ease of use") // - Small source code footprint ("easy to maintain") // - No dependencies ("ease of use") // // =========================================================================== // // I/O callbacks // // I/O callbacks allow you to read from arbitrary sources, like packaged // files or some other source. Data read from callbacks are processed // through a small internal buffer (currently 128 bytes) to try to reduce // overhead. // // The three functions you must define are "read" (reads some bytes of data), // "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). // // =========================================================================== // // SIMD support // // The JPEG decoder will try to automatically use SIMD kernels on x86 when // supported by the compiler. For ARM Neon support, you must explicitly // request it. // // (The old do-it-yourself SIMD API is no longer supported in the current // code.) // // On x86, SSE2 will automatically be used when available based on a run-time // test; if not, the generic C versions are used as a fall-back. On ARM targets, // the typical path is to have separate builds for NEON and non-NEON devices // (at least this is true for iOS and Android). Therefore, the NEON support is // toggled by a build flag: define STBI_NEON to get NEON loops. // // If for some reason you do not want to use any of SIMD code, or if // you have issues compiling it, you can disable it entirely by // defining STBI_NO_SIMD. // // =========================================================================== // // HDR image support (disable by defining STBI_NO_HDR) // // stb_image supports loading HDR images in general, and currently the Radiance // .HDR file format specifically. You can still load any file through the existing // interface; if you attempt to load an HDR file, it will be automatically remapped // to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; // both of these constants can be reconfigured through this interface: // // stbi_hdr_to_ldr_gamma(2.2f); // stbi_hdr_to_ldr_scale(1.0f); // // (note, do not use _inverse_ constants; stbi_image will invert them // appropriately). // // Additionally, there is a new, parallel interface for loading files as // (linear) floats to preserve the full dynamic range: // // float *data = stbi_loadf(filename, &x, &y, &n, 0); // // If you load LDR images through this interface, those images will // be promoted to floating point values, run through the inverse of // constants corresponding to the above: // // stbi_ldr_to_hdr_scale(1.0f); // stbi_ldr_to_hdr_gamma(2.2f); // // Finally, given a filename (or an open file or memory block--see header // file for details) containing image data, you can query for the "most // appropriate" interface to use (that is, whether the image is HDR or // not), using: // // stbi_is_hdr(char *filename); // // =========================================================================== // // iPhone PNG support: // // We optionally support converting iPhone-formatted PNGs (which store // premultiplied BGRA) back to RGB, even though they're internally encoded // differently. To enable this conversion, call // stbi_convert_iphone_png_to_rgb(1). // // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per // pixel to remove any premultiplied alpha *only* if the image file explicitly // says there's premultiplied data (currently only happens in iPhone images, // and only if iPhone convert-to-rgb processing is on). // // =========================================================================== // // ADDITIONAL CONFIGURATION // // - You can suppress implementation of any of the decoders to reduce // your code footprint by #defining one or more of the following // symbols before creating the implementation. // // STBI_NO_JPEG // STBI_NO_PNG // STBI_NO_BMP // STBI_NO_PSD // STBI_NO_TGA // STBI_NO_GIF // STBI_NO_HDR // STBI_NO_PIC // STBI_NO_PNM (.ppm and .pgm) // // - You can request *only* certain decoders and suppress all other ones // (this will be more forward-compatible, as addition of new decoders // doesn't require you to disable them explicitly): // // STBI_ONLY_JPEG // STBI_ONLY_PNG // STBI_ONLY_BMP // STBI_ONLY_PSD // STBI_ONLY_TGA // STBI_ONLY_GIF // STBI_ONLY_HDR // STBI_ONLY_PIC // STBI_ONLY_PNM (.ppm and .pgm) // // - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still // want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB // // - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater // than that size (in either width or height) without further processing. // This is to let programs in the wild set an upper bound to prevent // denial-of-service attacks on untrusted data, as one could generate a // valid image of gigantic dimensions and force stb_image to allocate a // huge block of memory and spend disproportionate time decoding it. By // default this is set to (1 << 24), which is 16777216, but that's still // very big. #ifndef STBI_NO_STDIO #include #endif // STBI_NO_STDIO #define STBI_VERSION 1 enum { STBI_default = 0, // only used for desired_channels STBI_grey = 1, STBI_grey_alpha = 2, STBI_rgb = 3, STBI_rgb_alpha = 4 }; #include typedef unsigned char stbi_uc; typedef unsigned short stbi_us; #ifdef __cplusplus extern "C" { #endif #ifndef STBIDEF #ifdef STB_IMAGE_STATIC #define STBIDEF static #else #define STBIDEF extern #endif #endif ////////////////////////////////////////////////////////////////////////////// // // PRIMARY API - works on images of any type // // // load image by filename, open file, or memory buffer // typedef struct { int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative int (*eof) (void *user); // returns nonzero if we are at end of file/data } stbi_io_callbacks; //////////////////////////////////// // // 8-bits-per-channel interface // STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); // for stbi_load_from_file, file pointer is left pointing immediately after image #endif #ifndef STBI_NO_GIF STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); #endif #ifdef STBI_WINDOWS_UTF8 STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); #endif //////////////////////////////////// // // 16-bits-per-channel interface // STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); #endif //////////////////////////////////// // // float-per-channel interface // #ifndef STBI_NO_LINEAR STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); #endif #endif #ifndef STBI_NO_HDR STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); STBIDEF void stbi_hdr_to_ldr_scale(float scale); #endif // STBI_NO_HDR #ifndef STBI_NO_LINEAR STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); STBIDEF void stbi_ldr_to_hdr_scale(float scale); #endif // STBI_NO_LINEAR // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); #ifndef STBI_NO_STDIO STBIDEF int stbi_is_hdr (char const *filename); STBIDEF int stbi_is_hdr_from_file(FILE *f); #endif // STBI_NO_STDIO // get a VERY brief reason for failure // on most compilers (and ALL modern mainstream compilers) this is threadsafe STBIDEF const char *stbi_failure_reason (void); // free the loaded image -- this is just free() STBIDEF void stbi_image_free (void *retval_from_stbi_load); // get image dimensions & components without fully decoding STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); #ifndef STBI_NO_STDIO STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); STBIDEF int stbi_is_16_bit (char const *filename); STBIDEF int stbi_is_16_bit_from_file(FILE *f); #endif // for image formats that explicitly notate that they have premultiplied alpha, // we just return the colors as stored in the file. set this flag to force // unpremultiplication. results are undefined if the unpremultiply overflow. STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); // indicate whether we should process iphone images back to canonical format, // or just pass them through "as-is" STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); // flip the image vertically, so the first pixel in the output array is the bottom left STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); // as above, but only applies to images loaded on the thread that calls the function // this function is only available if your compiler supports thread-local variables; // calling it will fail to link if your compiler doesn't STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); // ZLIB client - used by PNG, available for other purposes STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); #ifdef __cplusplus } #endif // // //// end header file ///////////////////////////////////////////////////// #endif // STBI_INCLUDE_STB_IMAGE_H #ifdef STB_IMAGE_IMPLEMENTATION #if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ || defined(STBI_ONLY_ZLIB) #ifndef STBI_ONLY_JPEG #define STBI_NO_JPEG #endif #ifndef STBI_ONLY_PNG #define STBI_NO_PNG #endif #ifndef STBI_ONLY_BMP #define STBI_NO_BMP #endif #ifndef STBI_ONLY_PSD #define STBI_NO_PSD #endif #ifndef STBI_ONLY_TGA #define STBI_NO_TGA #endif #ifndef STBI_ONLY_GIF #define STBI_NO_GIF #endif #ifndef STBI_ONLY_HDR #define STBI_NO_HDR #endif #ifndef STBI_ONLY_PIC #define STBI_NO_PIC #endif #ifndef STBI_ONLY_PNM #define STBI_NO_PNM #endif #endif #if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) #define STBI_NO_ZLIB #endif #include #include // ptrdiff_t on osx #include #include #include #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) #include // ldexp, pow #endif #ifndef STBI_NO_STDIO #include #endif #ifndef STBI_ASSERT #include #define STBI_ASSERT(x) assert(x) #endif #ifdef __cplusplus #define STBI_EXTERN extern "C" #else #define STBI_EXTERN extern #endif #ifndef _MSC_VER #ifdef __cplusplus #define stbi_inline inline #else #define stbi_inline #endif #else #define stbi_inline __forceinline #endif #ifndef STBI_NO_THREAD_LOCALS #if defined(__cplusplus) && __cplusplus >= 201103L #define STBI_THREAD_LOCAL thread_local #elif defined(__GNUC__) && __GNUC__ < 5 #define STBI_THREAD_LOCAL __thread #elif defined(_MSC_VER) #define STBI_THREAD_LOCAL __declspec(thread) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) #define STBI_THREAD_LOCAL _Thread_local #endif #ifndef STBI_THREAD_LOCAL #if defined(__GNUC__) #define STBI_THREAD_LOCAL __thread #endif #endif #endif #if defined(_MSC_VER) || defined(__SYMBIAN32__) typedef unsigned short stbi__uint16; typedef signed short stbi__int16; typedef unsigned int stbi__uint32; typedef signed int stbi__int32; #else #include typedef uint16_t stbi__uint16; typedef int16_t stbi__int16; typedef uint32_t stbi__uint32; typedef int32_t stbi__int32; #endif // should produce compiler error if size is wrong typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; #ifdef _MSC_VER #define STBI_NOTUSED(v) (void)(v) #else #define STBI_NOTUSED(v) (void)sizeof(v) #endif #ifdef _MSC_VER #define STBI_HAS_LROTL #endif #ifdef STBI_HAS_LROTL #define stbi_lrot(x,y) _lrotl(x,y) #else #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) #endif #if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) // ok #elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) // ok #else #error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." #endif #ifndef STBI_MALLOC #define STBI_MALLOC(sz) malloc(sz) #define STBI_REALLOC(p,newsz) realloc(p,newsz) #define STBI_FREE(p) free(p) #endif #ifndef STBI_REALLOC_SIZED #define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) #endif // x86/x64 detection #if defined(__x86_64__) || defined(_M_X64) #define STBI__X64_TARGET #elif defined(__i386) || defined(_M_IX86) #define STBI__X86_TARGET #endif #if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) // gcc doesn't support sse2 intrinsics unless you compile with -msse2, // which in turn means it gets to use SSE2 everywhere. This is unfortunate, // but previous attempts to provide the SSE2 functions with runtime // detection caused numerous issues. The way architecture extensions are // exposed in GCC/Clang is, sadly, not really suited for one-file libs. // New behavior: if compiled with -msse2, we use SSE2 without any // detection; if not, we don't use it at all. #define STBI_NO_SIMD #endif #if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) // Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET // // 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the // Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. // As a result, enabling SSE2 on 32-bit MinGW is dangerous when not // simultaneously enabling "-mstackrealign". // // See https://github.com/nothings/stb/issues/81 for more information. // // So default to no SSE2 on 32-bit MinGW. If you've read this far and added // -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. #define STBI_NO_SIMD #endif #if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) #define STBI_SSE2 #include #ifdef _MSC_VER #if _MSC_VER >= 1400 // not VC6 #include // __cpuid static int stbi__cpuid3(void) { int info[4]; __cpuid(info,1); return info[3]; } #else static int stbi__cpuid3(void) { int res; __asm { mov eax,1 cpuid mov res,edx } return res; } #endif #define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) static int stbi__sse2_available(void) { int info3 = stbi__cpuid3(); return ((info3 >> 26) & 1) != 0; } #endif #else // assume GCC-style if not VC++ #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) static int stbi__sse2_available(void) { // If we're even attempting to compile this on GCC/Clang, that means // -msse2 is on, which means the compiler is allowed to use SSE2 // instructions at will, and so are we. return 1; } #endif #endif #endif // ARM NEON #if defined(STBI_NO_SIMD) && defined(STBI_NEON) #undef STBI_NEON #endif #ifdef STBI_NEON #include #ifdef _MSC_VER #define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name #else #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) #endif #endif #ifndef STBI_SIMD_ALIGN #define STBI_SIMD_ALIGN(type, name) type name #endif #ifndef STBI_MAX_DIMENSIONS #define STBI_MAX_DIMENSIONS (1 << 24) #endif /////////////////////////////////////////////// // // stbi__context struct and start_xxx functions // stbi__context structure is our basic context used by all images, so it // contains all the IO context, plus some basic image information typedef struct { stbi__uint32 img_x, img_y; int img_n, img_out_n; stbi_io_callbacks io; void *io_user_data; int read_from_callbacks; int buflen; stbi_uc buffer_start[128]; int callback_already_read; stbi_uc *img_buffer, *img_buffer_end; stbi_uc *img_buffer_original, *img_buffer_original_end; } stbi__context; static void stbi__refill_buffer(stbi__context *s); // initialize a memory-decode context static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) { s->io.read = NULL; s->read_from_callbacks = 0; s->callback_already_read = 0; s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; } // initialize a callback-based context static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) { s->io = *c; s->io_user_data = user; s->buflen = sizeof(s->buffer_start); s->read_from_callbacks = 1; s->callback_already_read = 0; s->img_buffer = s->img_buffer_original = s->buffer_start; stbi__refill_buffer(s); s->img_buffer_original_end = s->img_buffer_end; } #ifndef STBI_NO_STDIO static int stbi__stdio_read(void *user, char *data, int size) { return (int) fread(data,1,size,(FILE*) user); } static void stbi__stdio_skip(void *user, int n) { int ch; fseek((FILE*) user, n, SEEK_CUR); ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ if (ch != EOF) { ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ } } static int stbi__stdio_eof(void *user) { return feof((FILE*) user) || ferror((FILE *) user); } static stbi_io_callbacks stbi__stdio_callbacks = { stbi__stdio_read, stbi__stdio_skip, stbi__stdio_eof, }; static void stbi__start_file(stbi__context *s, FILE *f) { stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); } //static void stop_file(stbi__context *s) { } #endif // !STBI_NO_STDIO static void stbi__rewind(stbi__context *s) { // conceptually rewind SHOULD rewind to the beginning of the stream, // but we just rewind to the beginning of the initial buffer, because // we only use it after doing 'test', which only ever looks at at most 92 bytes s->img_buffer = s->img_buffer_original; s->img_buffer_end = s->img_buffer_original_end; } enum { STBI_ORDER_RGB, STBI_ORDER_BGR }; typedef struct { int bits_per_channel; int num_channels; int channel_order; } stbi__result_info; #ifndef STBI_NO_JPEG static int stbi__jpeg_test(stbi__context *s); static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PNG static int stbi__png_test(stbi__context *s); static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); static int stbi__png_is16(stbi__context *s); #endif #ifndef STBI_NO_BMP static int stbi__bmp_test(stbi__context *s); static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_TGA static int stbi__tga_test(stbi__context *s); static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PSD static int stbi__psd_test(stbi__context *s); static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); static int stbi__psd_is16(stbi__context *s); #endif #ifndef STBI_NO_HDR static int stbi__hdr_test(stbi__context *s); static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PIC static int stbi__pic_test(stbi__context *s); static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_GIF static int stbi__gif_test(stbi__context *s); static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PNM static int stbi__pnm_test(stbi__context *s); static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); static int stbi__pnm_is16(stbi__context *s); #endif static #ifdef STBI_THREAD_LOCAL STBI_THREAD_LOCAL #endif const char *stbi__g_failure_reason; STBIDEF const char *stbi_failure_reason(void) { return stbi__g_failure_reason; } #ifndef STBI_NO_FAILURE_STRINGS static int stbi__err(const char *str) { stbi__g_failure_reason = str; return 0; } #endif static void *stbi__malloc(size_t size) { return STBI_MALLOC(size); } // stb_image uses ints pervasively, including for offset calculations. // therefore the largest decoded image size we can support with the // current code, even on 64-bit targets, is INT_MAX. this is not a // significant limitation for the intended use case. // // we do, however, need to make sure our size calculations don't // overflow. hence a few helper functions for size calculations that // multiply integers together, making sure that they're non-negative // and no overflow occurs. // return 1 if the sum is valid, 0 on overflow. // negative terms are considered invalid. static int stbi__addsizes_valid(int a, int b) { if (b < 0) return 0; // now 0 <= b <= INT_MAX, hence also // 0 <= INT_MAX - b <= INTMAX. // And "a + b <= INT_MAX" (which might overflow) is the // same as a <= INT_MAX - b (no overflow) return a <= INT_MAX - b; } // returns 1 if the product is valid, 0 on overflow. // negative factors are considered invalid. static int stbi__mul2sizes_valid(int a, int b) { if (a < 0 || b < 0) return 0; if (b == 0) return 1; // mul-by-0 is always safe // portable way to check for no overflows in a*b return a <= INT_MAX/b; } #if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) // returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow static int stbi__mad2sizes_valid(int a, int b, int add) { return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); } #endif // returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow static int stbi__mad3sizes_valid(int a, int b, int c, int add) { return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && stbi__addsizes_valid(a*b*c, add); } // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) { return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); } #endif #if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) // mallocs with size overflow checking static void *stbi__malloc_mad2(int a, int b, int add) { if (!stbi__mad2sizes_valid(a, b, add)) return NULL; return stbi__malloc(a*b + add); } #endif static void *stbi__malloc_mad3(int a, int b, int c, int add) { if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; return stbi__malloc(a*b*c + add); } #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) { if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; return stbi__malloc(a*b*c*d + add); } #endif // returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. static int stbi__addints_valid(int a, int b) { if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. return a <= INT_MAX - b; } // returns 1 if the product of two signed shorts is valid, 0 on overflow. static int stbi__mul2shorts_valid(short a, short b) { if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN return a >= SHRT_MIN / b; } // stbi__err - error // stbi__errpf - error returning pointer to float // stbi__errpuc - error returning pointer to unsigned char #ifdef STBI_NO_FAILURE_STRINGS #define stbi__err(x,y) 0 #elif defined(STBI_FAILURE_USERMSG) #define stbi__err(x,y) stbi__err(y) #else #define stbi__err(x,y) stbi__err(x) #endif #define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) #define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) STBIDEF void stbi_image_free(void *retval_from_stbi_load) { STBI_FREE(retval_from_stbi_load); } #ifndef STBI_NO_LINEAR static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); #endif #ifndef STBI_NO_HDR static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); #endif static int stbi__vertically_flip_on_load_global = 0; STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) { stbi__vertically_flip_on_load_global = flag_true_if_should_flip; } #ifndef STBI_THREAD_LOCAL #define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global #else static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) { stbi__vertically_flip_on_load_local = flag_true_if_should_flip; stbi__vertically_flip_on_load_set = 1; } #define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ ? stbi__vertically_flip_on_load_local \ : stbi__vertically_flip_on_load_global) #endif // STBI_THREAD_LOCAL static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) { memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order ri->num_channels = 0; // test the formats with a very explicit header first (at least a FOURCC // or distinctive magic number first) #ifndef STBI_NO_PNG if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_BMP if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_GIF if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_PSD if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); #else STBI_NOTUSED(bpc); #endif #ifndef STBI_NO_PIC if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); #endif // then the formats that can end up attempting to load with just 1 or 2 // bytes matching expectations; these are prone to false positives, so // try them later #ifndef STBI_NO_JPEG if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_PNM if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_HDR if (stbi__hdr_test(s)) { float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); } #endif #ifndef STBI_NO_TGA // test tga last because it's a crappy test! if (stbi__tga_test(s)) return stbi__tga_load(s,x,y,comp,req_comp, ri); #endif return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); } static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) { int i; int img_len = w * h * channels; stbi_uc *reduced; reduced = (stbi_uc *) stbi__malloc(img_len); if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); for (i = 0; i < img_len; ++i) reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling STBI_FREE(orig); return reduced; } static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) { int i; int img_len = w * h * channels; stbi__uint16 *enlarged; enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); for (i = 0; i < img_len; ++i) enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff STBI_FREE(orig); return enlarged; } static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) { int row; size_t bytes_per_row = (size_t)w * bytes_per_pixel; stbi_uc temp[2048]; stbi_uc *bytes = (stbi_uc *)image; for (row = 0; row < (h>>1); row++) { stbi_uc *row0 = bytes + row*bytes_per_row; stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; // swap row0 with row1 size_t bytes_left = bytes_per_row; while (bytes_left) { size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); memcpy(temp, row0, bytes_copy); memcpy(row0, row1, bytes_copy); memcpy(row1, temp, bytes_copy); row0 += bytes_copy; row1 += bytes_copy; bytes_left -= bytes_copy; } } } #ifndef STBI_NO_GIF static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) { int slice; int slice_size = w * h * bytes_per_pixel; stbi_uc *bytes = (stbi_uc *)image; for (slice = 0; slice < z; ++slice) { stbi__vertical_flip(bytes, w, h, bytes_per_pixel); bytes += slice_size; } } #endif static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) { stbi__result_info ri; void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); if (result == NULL) return NULL; // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); if (ri.bits_per_channel != 8) { result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); ri.bits_per_channel = 8; } // @TODO: move stbi__convert_format to here if (stbi__vertically_flip_on_load) { int channels = req_comp ? req_comp : *comp; stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); } return (unsigned char *) result; } static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) { stbi__result_info ri; void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); if (result == NULL) return NULL; // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); if (ri.bits_per_channel != 16) { result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); ri.bits_per_channel = 16; } // @TODO: move stbi__convert_format16 to here // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision if (stbi__vertically_flip_on_load) { int channels = req_comp ? req_comp : *comp; stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); } return (stbi__uint16 *) result; } #if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) { if (stbi__vertically_flip_on_load && result != NULL) { int channels = req_comp ? req_comp : *comp; stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); } } #endif #ifndef STBI_NO_STDIO #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); #endif #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) { return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); } #endif static FILE *stbi__fopen(char const *filename, char const *mode) { FILE *f; #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) wchar_t wMode[64]; wchar_t wFilename[1024]; if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) return 0; if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) return 0; #if defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != _wfopen_s(&f, wFilename, wMode)) f = 0; #else f = _wfopen(wFilename, wMode); #endif #elif defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != fopen_s(&f, filename, mode)) f=0; #else f = fopen(filename, mode); #endif return f; } STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) { FILE *f = stbi__fopen(filename, "rb"); unsigned char *result; if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); result = stbi_load_from_file(f,x,y,comp,req_comp); fclose(f); return result; } STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) { unsigned char *result; stbi__context s; stbi__start_file(&s,f); result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); if (result) { // need to 'unget' all the characters in the IO buffer fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); } return result; } STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) { stbi__uint16 *result; stbi__context s; stbi__start_file(&s,f); result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); if (result) { // need to 'unget' all the characters in the IO buffer fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); } return result; } STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) { FILE *f = stbi__fopen(filename, "rb"); stbi__uint16 *result; if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); result = stbi_load_from_file_16(f,x,y,comp,req_comp); fclose(f); return result; } #endif //!STBI_NO_STDIO STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) { stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); } STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); } STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); } STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); } #ifndef STBI_NO_GIF STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) { unsigned char *result; stbi__context s; stbi__start_mem(&s,buffer,len); result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); if (stbi__vertically_flip_on_load) { stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); } return result; } #endif #ifndef STBI_NO_LINEAR static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) { unsigned char *data; #ifndef STBI_NO_HDR if (stbi__hdr_test(s)) { stbi__result_info ri; float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); if (hdr_data) stbi__float_postprocess(hdr_data,x,y,comp,req_comp); return hdr_data; } #endif data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); if (data) return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); } STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__loadf_main(&s,x,y,comp,req_comp); } STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); return stbi__loadf_main(&s,x,y,comp,req_comp); } #ifndef STBI_NO_STDIO STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) { float *result; FILE *f = stbi__fopen(filename, "rb"); if (!f) return stbi__errpf("can't fopen", "Unable to open file"); result = stbi_loadf_from_file(f,x,y,comp,req_comp); fclose(f); return result; } STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_file(&s,f); return stbi__loadf_main(&s,x,y,comp,req_comp); } #endif // !STBI_NO_STDIO #endif // !STBI_NO_LINEAR // these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is // defined, for API simplicity; if STBI_NO_LINEAR is defined, it always // reports false! STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) { #ifndef STBI_NO_HDR stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__hdr_test(&s); #else STBI_NOTUSED(buffer); STBI_NOTUSED(len); return 0; #endif } #ifndef STBI_NO_STDIO STBIDEF int stbi_is_hdr (char const *filename) { FILE *f = stbi__fopen(filename, "rb"); int result=0; if (f) { result = stbi_is_hdr_from_file(f); fclose(f); } return result; } STBIDEF int stbi_is_hdr_from_file(FILE *f) { #ifndef STBI_NO_HDR long pos = ftell(f); int res; stbi__context s; stbi__start_file(&s,f); res = stbi__hdr_test(&s); fseek(f, pos, SEEK_SET); return res; #else STBI_NOTUSED(f); return 0; #endif } #endif // !STBI_NO_STDIO STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) { #ifndef STBI_NO_HDR stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); return stbi__hdr_test(&s); #else STBI_NOTUSED(clbk); STBI_NOTUSED(user); return 0; #endif } #ifndef STBI_NO_LINEAR static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } #endif static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } ////////////////////////////////////////////////////////////////////////////// // // Common code used by all image loaders // enum { STBI__SCAN_load=0, STBI__SCAN_type, STBI__SCAN_header }; static void stbi__refill_buffer(stbi__context *s) { int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); if (n == 0) { // at end of file, treat same as if from memory, but need to handle case // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file s->read_from_callbacks = 0; s->img_buffer = s->buffer_start; s->img_buffer_end = s->buffer_start+1; *s->img_buffer = 0; } else { s->img_buffer = s->buffer_start; s->img_buffer_end = s->buffer_start + n; } } stbi_inline static stbi_uc stbi__get8(stbi__context *s) { if (s->img_buffer < s->img_buffer_end) return *s->img_buffer++; if (s->read_from_callbacks) { stbi__refill_buffer(s); return *s->img_buffer++; } return 0; } #if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) // nothing #else stbi_inline static int stbi__at_eof(stbi__context *s) { if (s->io.read) { if (!(s->io.eof)(s->io_user_data)) return 0; // if feof() is true, check if buffer = end // special case: we've only got the special 0 character at the end if (s->read_from_callbacks == 0) return 1; } return s->img_buffer >= s->img_buffer_end; } #endif #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) // nothing #else static void stbi__skip(stbi__context *s, int n) { if (n == 0) return; // already there! if (n < 0) { s->img_buffer = s->img_buffer_end; return; } if (s->io.read) { int blen = (int) (s->img_buffer_end - s->img_buffer); if (blen < n) { s->img_buffer = s->img_buffer_end; (s->io.skip)(s->io_user_data, n - blen); return; } } s->img_buffer += n; } #endif #if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) // nothing #else static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) { if (s->io.read) { int blen = (int) (s->img_buffer_end - s->img_buffer); if (blen < n) { int res, count; memcpy(buffer, s->img_buffer, blen); count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); res = (count == (n-blen)); s->img_buffer = s->img_buffer_end; return res; } } if (s->img_buffer+n <= s->img_buffer_end) { memcpy(buffer, s->img_buffer, n); s->img_buffer += n; return 1; } else return 0; } #endif #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) // nothing #else static int stbi__get16be(stbi__context *s) { int z = stbi__get8(s); return (z << 8) + stbi__get8(s); } #endif #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) // nothing #else static stbi__uint32 stbi__get32be(stbi__context *s) { stbi__uint32 z = stbi__get16be(s); return (z << 16) + stbi__get16be(s); } #endif #if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) // nothing #else static int stbi__get16le(stbi__context *s) { int z = stbi__get8(s); return z + (stbi__get8(s) << 8); } #endif #ifndef STBI_NO_BMP static stbi__uint32 stbi__get32le(stbi__context *s) { stbi__uint32 z = stbi__get16le(s); z += (stbi__uint32)stbi__get16le(s) << 16; return z; } #endif #define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) // nothing #else ////////////////////////////////////////////////////////////////////////////// // // generic converter from built-in img_n to req_comp // individual types do this automatically as much as possible (e.g. jpeg // does all cases internally since it needs to colorspace convert anyway, // and it never has alpha, so very few cases ). png can automatically // interleave an alpha=255 channel, but falls back to this for other cases // // assume data buffer is malloced, so malloc a new one and free that one // only failure mode is malloc failing static stbi_uc stbi__compute_y(int r, int g, int b) { return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); } #endif #if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) // nothing #else static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) { int i,j; unsigned char *good; if (req_comp == img_n) return data; STBI_ASSERT(req_comp >= 1 && req_comp <= 4); good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); if (good == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } for (j=0; j < (int) y; ++j) { unsigned char *src = data + j * x * img_n ; unsigned char *dest = good + j * x * req_comp; #define STBI__COMBO(a,b) ((a)*8+(b)) #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) // convert source image with img_n components to one with req_comp components; // avoid switch per pixel, so use switch per scanline and massive macros switch (STBI__COMBO(img_n, req_comp)) { STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; STBI__CASE(2,1) { dest[0]=src[0]; } break; STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); } #undef STBI__CASE } STBI_FREE(data); return good; } #endif #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) // nothing #else static stbi__uint16 stbi__compute_y_16(int r, int g, int b) { return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); } #endif #if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) // nothing #else static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) { int i,j; stbi__uint16 *good; if (req_comp == img_n) return data; STBI_ASSERT(req_comp >= 1 && req_comp <= 4); good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); if (good == NULL) { STBI_FREE(data); return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); } for (j=0; j < (int) y; ++j) { stbi__uint16 *src = data + j * x * img_n ; stbi__uint16 *dest = good + j * x * req_comp; #define STBI__COMBO(a,b) ((a)*8+(b)) #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) // convert source image with img_n components to one with req_comp components; // avoid switch per pixel, so use switch per scanline and massive macros switch (STBI__COMBO(img_n, req_comp)) { STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; STBI__CASE(2,1) { dest[0]=src[0]; } break; STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); } #undef STBI__CASE } STBI_FREE(data); return good; } #endif #ifndef STBI_NO_LINEAR static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) { int i,k,n; float *output; if (!data) return NULL; output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } // compute number of non-alpha components if (comp & 1) n = comp; else n = comp-1; for (i=0; i < x*y; ++i) { for (k=0; k < n; ++k) { output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); } } if (n < comp) { for (i=0; i < x*y; ++i) { output[i*comp + n] = data[i*comp + n]/255.0f; } } STBI_FREE(data); return output; } #endif #ifndef STBI_NO_HDR #define stbi__float2int(x) ((int) (x)) static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) { int i,k,n; stbi_uc *output; if (!data) return NULL; output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } // compute number of non-alpha components if (comp & 1) n = comp; else n = comp-1; for (i=0; i < x*y; ++i) { for (k=0; k < n; ++k) { float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; if (z < 0) z = 0; if (z > 255) z = 255; output[i*comp + k] = (stbi_uc) stbi__float2int(z); } if (k < comp) { float z = data[i*comp+k] * 255 + 0.5f; if (z < 0) z = 0; if (z > 255) z = 255; output[i*comp + k] = (stbi_uc) stbi__float2int(z); } } STBI_FREE(data); return output; } #endif ////////////////////////////////////////////////////////////////////////////// // // "baseline" JPEG/JFIF decoder // // simple implementation // - doesn't support delayed output of y-dimension // - simple interface (only one output format: 8-bit interleaved RGB) // - doesn't try to recover corrupt jpegs // - doesn't allow partial loading, loading multiple at once // - still fast on x86 (copying globals into locals doesn't help x86) // - allocates lots of intermediate memory (full size of all components) // - non-interleaved case requires this anyway // - allows good upsampling (see next) // high-quality // - upsampled channels are bilinearly interpolated, even across blocks // - quality integer IDCT derived from IJG's 'slow' // performance // - fast huffman; reasonable integer IDCT // - some SIMD kernels for common paths on targets with SSE2/NEON // - uses a lot of intermediate memory, could cache poorly #ifndef STBI_NO_JPEG // huffman decoding acceleration #define FAST_BITS 9 // larger handles more cases; smaller stomps less cache typedef struct { stbi_uc fast[1 << FAST_BITS]; // weirdly, repacking this into AoS is a 10% speed loss, instead of a win stbi__uint16 code[256]; stbi_uc values[256]; stbi_uc size[257]; unsigned int maxcode[18]; int delta[17]; // old 'firstsymbol' - old 'firstcode' } stbi__huffman; typedef struct { stbi__context *s; stbi__huffman huff_dc[4]; stbi__huffman huff_ac[4]; stbi__uint16 dequant[4][64]; stbi__int16 fast_ac[4][1 << FAST_BITS]; // sizes for components, interleaved MCUs int img_h_max, img_v_max; int img_mcu_x, img_mcu_y; int img_mcu_w, img_mcu_h; // definition of jpeg image component struct { int id; int h,v; int tq; int hd,ha; int dc_pred; int x,y,w2,h2; stbi_uc *data; void *raw_data, *raw_coeff; stbi_uc *linebuf; short *coeff; // progressive only int coeff_w, coeff_h; // number of 8x8 coefficient blocks } img_comp[4]; stbi__uint32 code_buffer; // jpeg entropy-coded buffer int code_bits; // number of valid bits unsigned char marker; // marker seen while filling entropy buffer int nomore; // flag if we saw a marker so must stop int progressive; int spec_start; int spec_end; int succ_high; int succ_low; int eob_run; int jfif; int app14_color_transform; // Adobe APP14 tag int rgb; int scan_n, order[4]; int restart_interval, todo; // kernels void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); } stbi__jpeg; static int stbi__build_huffman(stbi__huffman *h, int *count) { int i,j,k=0; unsigned int code; // build size list for each symbol (from JPEG spec) for (i=0; i < 16; ++i) { for (j=0; j < count[i]; ++j) { h->size[k++] = (stbi_uc) (i+1); if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); } } h->size[k] = 0; // compute actual symbols (from jpeg spec) code = 0; k = 0; for(j=1; j <= 16; ++j) { // compute delta to add to code to compute symbol id h->delta[j] = k - code; if (h->size[k] == j) { while (h->size[k] == j) h->code[k++] = (stbi__uint16) (code++); if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); } // compute largest code + 1 for this size, preshifted as needed later h->maxcode[j] = code << (16-j); code <<= 1; } h->maxcode[j] = 0xffffffff; // build non-spec acceleration table; 255 is flag for not-accelerated memset(h->fast, 255, 1 << FAST_BITS); for (i=0; i < k; ++i) { int s = h->size[i]; if (s <= FAST_BITS) { int c = h->code[i] << (FAST_BITS-s); int m = 1 << (FAST_BITS-s); for (j=0; j < m; ++j) { h->fast[c+j] = (stbi_uc) i; } } } return 1; } // build a table that decodes both magnitude and value of small ACs in // one go. static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) { int i; for (i=0; i < (1 << FAST_BITS); ++i) { stbi_uc fast = h->fast[i]; fast_ac[i] = 0; if (fast < 255) { int rs = h->values[fast]; int run = (rs >> 4) & 15; int magbits = rs & 15; int len = h->size[fast]; if (magbits && len + magbits <= FAST_BITS) { // magnitude code followed by receive_extend code int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); int m = 1 << (magbits - 1); if (k < m) k += (~0U << magbits) + 1; // if the result is small enough, we can fit it in fast_ac table if (k >= -128 && k <= 127) fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); } } } } static void stbi__grow_buffer_unsafe(stbi__jpeg *j) { do { unsigned int b = j->nomore ? 0 : stbi__get8(j->s); if (b == 0xff) { int c = stbi__get8(j->s); while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes if (c != 0) { j->marker = (unsigned char) c; j->nomore = 1; return; } } j->code_buffer |= b << (24 - j->code_bits); j->code_bits += 8; } while (j->code_bits <= 24); } // (1 << n) - 1 static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; // decode a jpeg huffman value from the bitstream stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) { unsigned int temp; int c,k; if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); // look at the top FAST_BITS and determine what symbol ID it is, // if the code is <= FAST_BITS c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); k = h->fast[c]; if (k < 255) { int s = h->size[k]; if (s > j->code_bits) return -1; j->code_buffer <<= s; j->code_bits -= s; return h->values[k]; } // naive test is to shift the code_buffer down so k bits are // valid, then test against maxcode. To speed this up, we've // preshifted maxcode left so that it has (16-k) 0s at the // end; in other words, regardless of the number of bits, it // wants to be compared against something shifted to have 16; // that way we don't need to shift inside the loop. temp = j->code_buffer >> 16; for (k=FAST_BITS+1 ; ; ++k) if (temp < h->maxcode[k]) break; if (k == 17) { // error! code not found j->code_bits -= 16; return -1; } if (k > j->code_bits) return -1; // convert the huffman code to the symbol id c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; if(c < 0 || c >= 256) // symbol id out of bounds! return -1; STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); // convert the id to a symbol j->code_bits -= k; j->code_buffer <<= k; return h->values[c]; } // bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) k = stbi_lrot(j->code_buffer, n); j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; j->code_bits -= n; return k + (stbi__jbias[n] & (sgn - 1)); } // get some unsigned bits stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) { unsigned int k; if (j->code_bits < n) stbi__grow_buffer_unsafe(j); if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing k = stbi_lrot(j->code_buffer, n); j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; j->code_bits -= n; return k; } stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) { unsigned int k; if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing k = j->code_buffer; j->code_buffer <<= 1; --j->code_bits; return k & 0x80000000; } // given a value that's at position X in the zigzag stream, // where does it appear in the 8x8 matrix coded as row-major? static const stbi_uc stbi__jpeg_dezigzag[64+15] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, // let corrupt input sample past end 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }; // decode one 64-entry block-- static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) { int diff,dc,k; int t; if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); t = stbi__jpeg_huff_decode(j, hdc); if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); // 0 all the ac values now so we can do it 32-bits at a time memset(data,0,64*sizeof(data[0])); diff = t ? stbi__extend_receive(j, t) : 0; if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); data[0] = (short) (dc * dequant[0]); // decode AC components, see JPEG spec k = 1; do { unsigned int zig; int c,r,s; if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); r = fac[c]; if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; // decode into unzigzag'd location zig = stbi__jpeg_dezigzag[k++]; data[zig] = (short) ((r >> 8) * dequant[zig]); } else { int rs = stbi__jpeg_huff_decode(j, hac); if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); s = rs & 15; r = rs >> 4; if (s == 0) { if (rs != 0xf0) break; // end block k += 16; } else { k += r; // decode into unzigzag'd location zig = stbi__jpeg_dezigzag[k++]; data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); } } } while (k < 64); return 1; } static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) { int diff,dc; int t; if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); if (j->succ_high == 0) { // first scan for DC coefficient, must be first memset(data,0,64*sizeof(data[0])); // 0 all the ac values now t = stbi__jpeg_huff_decode(j, hdc); if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); diff = t ? stbi__extend_receive(j, t) : 0; if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); data[0] = (short) (dc * (1 << j->succ_low)); } else { // refinement scan for DC coefficient if (stbi__jpeg_get_bit(j)) data[0] += (short) (1 << j->succ_low); } return 1; } // @OPTIMIZE: store non-zigzagged during the decode passes, // and only de-zigzag when dequantizing static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) { int k; if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); if (j->succ_high == 0) { int shift = j->succ_low; if (j->eob_run) { --j->eob_run; return 1; } k = j->spec_start; do { unsigned int zig; int c,r,s; if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); r = fac[c]; if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; zig = stbi__jpeg_dezigzag[k++]; data[zig] = (short) ((r >> 8) * (1 << shift)); } else { int rs = stbi__jpeg_huff_decode(j, hac); if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); s = rs & 15; r = rs >> 4; if (s == 0) { if (r < 15) { j->eob_run = (1 << r); if (r) j->eob_run += stbi__jpeg_get_bits(j, r); --j->eob_run; break; } k += 16; } else { k += r; zig = stbi__jpeg_dezigzag[k++]; data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); } } } while (k <= j->spec_end); } else { // refinement scan for these AC coefficients short bit = (short) (1 << j->succ_low); if (j->eob_run) { --j->eob_run; for (k = j->spec_start; k <= j->spec_end; ++k) { short *p = &data[stbi__jpeg_dezigzag[k]]; if (*p != 0) if (stbi__jpeg_get_bit(j)) if ((*p & bit)==0) { if (*p > 0) *p += bit; else *p -= bit; } } } else { k = j->spec_start; do { int r,s; int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); s = rs & 15; r = rs >> 4; if (s == 0) { if (r < 15) { j->eob_run = (1 << r) - 1; if (r) j->eob_run += stbi__jpeg_get_bits(j, r); r = 64; // force end of block } else { // r=15 s=0 should write 16 0s, so we just do // a run of 15 0s and then write s (which is 0), // so we don't have to do anything special here } } else { if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); // sign bit if (stbi__jpeg_get_bit(j)) s = bit; else s = -bit; } // advance by r while (k <= j->spec_end) { short *p = &data[stbi__jpeg_dezigzag[k++]]; if (*p != 0) { if (stbi__jpeg_get_bit(j)) if ((*p & bit)==0) { if (*p > 0) *p += bit; else *p -= bit; } } else { if (r == 0) { *p = (short) s; break; } --r; } } } while (k <= j->spec_end); } } return 1; } // take a -128..127 value and stbi__clamp it and convert to 0..255 stbi_inline static stbi_uc stbi__clamp(int x) { // trick to use a single test to catch both cases if ((unsigned int) x > 255) { if (x < 0) return 0; if (x > 255) return 255; } return (stbi_uc) x; } #define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) #define stbi__fsh(x) ((x) * 4096) // derived from jidctint -- DCT_ISLOW #define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ p2 = s2; \ p3 = s6; \ p1 = (p2+p3) * stbi__f2f(0.5411961f); \ t2 = p1 + p3*stbi__f2f(-1.847759065f); \ t3 = p1 + p2*stbi__f2f( 0.765366865f); \ p2 = s0; \ p3 = s4; \ t0 = stbi__fsh(p2+p3); \ t1 = stbi__fsh(p2-p3); \ x0 = t0+t3; \ x3 = t0-t3; \ x1 = t1+t2; \ x2 = t1-t2; \ t0 = s7; \ t1 = s5; \ t2 = s3; \ t3 = s1; \ p3 = t0+t2; \ p4 = t1+t3; \ p1 = t0+t3; \ p2 = t1+t2; \ p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ t0 = t0*stbi__f2f( 0.298631336f); \ t1 = t1*stbi__f2f( 2.053119869f); \ t2 = t2*stbi__f2f( 3.072711026f); \ t3 = t3*stbi__f2f( 1.501321110f); \ p1 = p5 + p1*stbi__f2f(-0.899976223f); \ p2 = p5 + p2*stbi__f2f(-2.562915447f); \ p3 = p3*stbi__f2f(-1.961570560f); \ p4 = p4*stbi__f2f(-0.390180644f); \ t3 += p1+p4; \ t2 += p2+p3; \ t1 += p2+p4; \ t0 += p1+p3; static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) { int i,val[64],*v=val; stbi_uc *o; short *d = data; // columns for (i=0; i < 8; ++i,++d, ++v) { // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 && d[40]==0 && d[48]==0 && d[56]==0) { // no shortcut 0 seconds // (1|2|3|4|5|6|7)==0 0 seconds // all separate -0.047 seconds // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds int dcterm = d[0]*4; v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; } else { STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) // constants scaled things up by 1<<12; let's bring them back // down, but keep 2 extra bits of precision x0 += 512; x1 += 512; x2 += 512; x3 += 512; v[ 0] = (x0+t3) >> 10; v[56] = (x0-t3) >> 10; v[ 8] = (x1+t2) >> 10; v[48] = (x1-t2) >> 10; v[16] = (x2+t1) >> 10; v[40] = (x2-t1) >> 10; v[24] = (x3+t0) >> 10; v[32] = (x3-t0) >> 10; } } for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { // no fast case since the first 1D IDCT spread components out STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) // constants scaled things up by 1<<12, plus we had 1<<2 from first // loop, plus horizontal and vertical each scale by sqrt(8) so together // we've got an extra 1<<3, so 1<<17 total we need to remove. // so we want to round that, which means adding 0.5 * 1<<17, // aka 65536. Also, we'll end up with -128 to 127 that we want // to encode as 0..255 by adding 128, so we'll add that before the shift x0 += 65536 + (128<<17); x1 += 65536 + (128<<17); x2 += 65536 + (128<<17); x3 += 65536 + (128<<17); // tried computing the shifts into temps, or'ing the temps to see // if any were out of range, but that was slower o[0] = stbi__clamp((x0+t3) >> 17); o[7] = stbi__clamp((x0-t3) >> 17); o[1] = stbi__clamp((x1+t2) >> 17); o[6] = stbi__clamp((x1-t2) >> 17); o[2] = stbi__clamp((x2+t1) >> 17); o[5] = stbi__clamp((x2-t1) >> 17); o[3] = stbi__clamp((x3+t0) >> 17); o[4] = stbi__clamp((x3-t0) >> 17); } } #ifdef STBI_SSE2 // sse2 integer IDCT. not the fastest possible implementation but it // produces bit-identical results to the generic C version so it's // fully "transparent". static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) { // This is constructed to match our regular (generic) integer IDCT exactly. __m128i row0, row1, row2, row3, row4, row5, row6, row7; __m128i tmp; // dot product constant: even elems=x, odd elems=y #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) // out(1) = c1[even]*x + c1[odd]*y #define dct_rot(out0,out1, x,y,c0,c1) \ __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) // out = in << 12 (in 16-bit, out 32-bit) #define dct_widen(out, in) \ __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) // wide add #define dct_wadd(out, a, b) \ __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ __m128i out##_h = _mm_add_epi32(a##_h, b##_h) // wide sub #define dct_wsub(out, a, b) \ __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) // butterfly a/b, add bias, then shift by "s" and pack #define dct_bfly32o(out0, out1, a,b,bias,s) \ { \ __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ dct_wadd(sum, abiased, b); \ dct_wsub(dif, abiased, b); \ out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ } // 8-bit interleave step (for transposes) #define dct_interleave8(a, b) \ tmp = a; \ a = _mm_unpacklo_epi8(a, b); \ b = _mm_unpackhi_epi8(tmp, b) // 16-bit interleave step (for transposes) #define dct_interleave16(a, b) \ tmp = a; \ a = _mm_unpacklo_epi16(a, b); \ b = _mm_unpackhi_epi16(tmp, b) #define dct_pass(bias,shift) \ { \ /* even part */ \ dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ __m128i sum04 = _mm_add_epi16(row0, row4); \ __m128i dif04 = _mm_sub_epi16(row0, row4); \ dct_widen(t0e, sum04); \ dct_widen(t1e, dif04); \ dct_wadd(x0, t0e, t3e); \ dct_wsub(x3, t0e, t3e); \ dct_wadd(x1, t1e, t2e); \ dct_wsub(x2, t1e, t2e); \ /* odd part */ \ dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ __m128i sum17 = _mm_add_epi16(row1, row7); \ __m128i sum35 = _mm_add_epi16(row3, row5); \ dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ dct_wadd(x4, y0o, y4o); \ dct_wadd(x5, y1o, y5o); \ dct_wadd(x6, y2o, y5o); \ dct_wadd(x7, y3o, y4o); \ dct_bfly32o(row0,row7, x0,x7,bias,shift); \ dct_bfly32o(row1,row6, x1,x6,bias,shift); \ dct_bfly32o(row2,row5, x2,x5,bias,shift); \ dct_bfly32o(row3,row4, x3,x4,bias,shift); \ } __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); // rounding biases in column/row passes, see stbi__idct_block for explanation. __m128i bias_0 = _mm_set1_epi32(512); __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); // load row0 = _mm_load_si128((const __m128i *) (data + 0*8)); row1 = _mm_load_si128((const __m128i *) (data + 1*8)); row2 = _mm_load_si128((const __m128i *) (data + 2*8)); row3 = _mm_load_si128((const __m128i *) (data + 3*8)); row4 = _mm_load_si128((const __m128i *) (data + 4*8)); row5 = _mm_load_si128((const __m128i *) (data + 5*8)); row6 = _mm_load_si128((const __m128i *) (data + 6*8)); row7 = _mm_load_si128((const __m128i *) (data + 7*8)); // column pass dct_pass(bias_0, 10); { // 16bit 8x8 transpose pass 1 dct_interleave16(row0, row4); dct_interleave16(row1, row5); dct_interleave16(row2, row6); dct_interleave16(row3, row7); // transpose pass 2 dct_interleave16(row0, row2); dct_interleave16(row1, row3); dct_interleave16(row4, row6); dct_interleave16(row5, row7); // transpose pass 3 dct_interleave16(row0, row1); dct_interleave16(row2, row3); dct_interleave16(row4, row5); dct_interleave16(row6, row7); } // row pass dct_pass(bias_1, 17); { // pack __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 __m128i p1 = _mm_packus_epi16(row2, row3); __m128i p2 = _mm_packus_epi16(row4, row5); __m128i p3 = _mm_packus_epi16(row6, row7); // 8bit 8x8 transpose pass 1 dct_interleave8(p0, p2); // a0e0a1e1... dct_interleave8(p1, p3); // c0g0c1g1... // transpose pass 2 dct_interleave8(p0, p1); // a0c0e0g0... dct_interleave8(p2, p3); // b0d0f0h0... // transpose pass 3 dct_interleave8(p0, p2); // a0b0c0d0... dct_interleave8(p1, p3); // a4b4c4d4... // store _mm_storel_epi64((__m128i *) out, p0); out += out_stride; _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; _mm_storel_epi64((__m128i *) out, p2); out += out_stride; _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; _mm_storel_epi64((__m128i *) out, p1); out += out_stride; _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; _mm_storel_epi64((__m128i *) out, p3); out += out_stride; _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); } #undef dct_const #undef dct_rot #undef dct_widen #undef dct_wadd #undef dct_wsub #undef dct_bfly32o #undef dct_interleave8 #undef dct_interleave16 #undef dct_pass } #endif // STBI_SSE2 #ifdef STBI_NEON // NEON integer IDCT. should produce bit-identical // results to the generic C version. static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) { int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); #define dct_long_mul(out, inq, coeff) \ int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) #define dct_long_mac(out, acc, inq, coeff) \ int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) #define dct_widen(out, inq) \ int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) // wide add #define dct_wadd(out, a, b) \ int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ int32x4_t out##_h = vaddq_s32(a##_h, b##_h) // wide sub #define dct_wsub(out, a, b) \ int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ int32x4_t out##_h = vsubq_s32(a##_h, b##_h) // butterfly a/b, then shift using "shiftop" by "s" and pack #define dct_bfly32o(out0,out1, a,b,shiftop,s) \ { \ dct_wadd(sum, a, b); \ dct_wsub(dif, a, b); \ out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ } #define dct_pass(shiftop, shift) \ { \ /* even part */ \ int16x8_t sum26 = vaddq_s16(row2, row6); \ dct_long_mul(p1e, sum26, rot0_0); \ dct_long_mac(t2e, p1e, row6, rot0_1); \ dct_long_mac(t3e, p1e, row2, rot0_2); \ int16x8_t sum04 = vaddq_s16(row0, row4); \ int16x8_t dif04 = vsubq_s16(row0, row4); \ dct_widen(t0e, sum04); \ dct_widen(t1e, dif04); \ dct_wadd(x0, t0e, t3e); \ dct_wsub(x3, t0e, t3e); \ dct_wadd(x1, t1e, t2e); \ dct_wsub(x2, t1e, t2e); \ /* odd part */ \ int16x8_t sum15 = vaddq_s16(row1, row5); \ int16x8_t sum17 = vaddq_s16(row1, row7); \ int16x8_t sum35 = vaddq_s16(row3, row5); \ int16x8_t sum37 = vaddq_s16(row3, row7); \ int16x8_t sumodd = vaddq_s16(sum17, sum35); \ dct_long_mul(p5o, sumodd, rot1_0); \ dct_long_mac(p1o, p5o, sum17, rot1_1); \ dct_long_mac(p2o, p5o, sum35, rot1_2); \ dct_long_mul(p3o, sum37, rot2_0); \ dct_long_mul(p4o, sum15, rot2_1); \ dct_wadd(sump13o, p1o, p3o); \ dct_wadd(sump24o, p2o, p4o); \ dct_wadd(sump23o, p2o, p3o); \ dct_wadd(sump14o, p1o, p4o); \ dct_long_mac(x4, sump13o, row7, rot3_0); \ dct_long_mac(x5, sump24o, row5, rot3_1); \ dct_long_mac(x6, sump23o, row3, rot3_2); \ dct_long_mac(x7, sump14o, row1, rot3_3); \ dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ } // load row0 = vld1q_s16(data + 0*8); row1 = vld1q_s16(data + 1*8); row2 = vld1q_s16(data + 2*8); row3 = vld1q_s16(data + 3*8); row4 = vld1q_s16(data + 4*8); row5 = vld1q_s16(data + 5*8); row6 = vld1q_s16(data + 6*8); row7 = vld1q_s16(data + 7*8); // add DC bias row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); // column pass dct_pass(vrshrn_n_s32, 10); // 16bit 8x8 transpose { // these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. // whether compilers actually get this is another story, sadly. #define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } #define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } #define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } // pass 1 dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 dct_trn16(row2, row3); dct_trn16(row4, row5); dct_trn16(row6, row7); // pass 2 dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 dct_trn32(row1, row3); dct_trn32(row4, row6); dct_trn32(row5, row7); // pass 3 dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 dct_trn64(row1, row5); dct_trn64(row2, row6); dct_trn64(row3, row7); #undef dct_trn16 #undef dct_trn32 #undef dct_trn64 } // row pass // vrshrn_n_s32 only supports shifts up to 16, we need // 17. so do a non-rounding shift of 16 first then follow // up with a rounding shift by 1. dct_pass(vshrn_n_s32, 16); { // pack and round uint8x8_t p0 = vqrshrun_n_s16(row0, 1); uint8x8_t p1 = vqrshrun_n_s16(row1, 1); uint8x8_t p2 = vqrshrun_n_s16(row2, 1); uint8x8_t p3 = vqrshrun_n_s16(row3, 1); uint8x8_t p4 = vqrshrun_n_s16(row4, 1); uint8x8_t p5 = vqrshrun_n_s16(row5, 1); uint8x8_t p6 = vqrshrun_n_s16(row6, 1); uint8x8_t p7 = vqrshrun_n_s16(row7, 1); // again, these can translate into one instruction, but often don't. #define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } #define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } #define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } // sadly can't use interleaved stores here since we only write // 8 bytes to each scan line! // 8x8 8-bit transpose pass 1 dct_trn8_8(p0, p1); dct_trn8_8(p2, p3); dct_trn8_8(p4, p5); dct_trn8_8(p6, p7); // pass 2 dct_trn8_16(p0, p2); dct_trn8_16(p1, p3); dct_trn8_16(p4, p6); dct_trn8_16(p5, p7); // pass 3 dct_trn8_32(p0, p4); dct_trn8_32(p1, p5); dct_trn8_32(p2, p6); dct_trn8_32(p3, p7); // store vst1_u8(out, p0); out += out_stride; vst1_u8(out, p1); out += out_stride; vst1_u8(out, p2); out += out_stride; vst1_u8(out, p3); out += out_stride; vst1_u8(out, p4); out += out_stride; vst1_u8(out, p5); out += out_stride; vst1_u8(out, p6); out += out_stride; vst1_u8(out, p7); #undef dct_trn8_8 #undef dct_trn8_16 #undef dct_trn8_32 } #undef dct_long_mul #undef dct_long_mac #undef dct_widen #undef dct_wadd #undef dct_wsub #undef dct_bfly32o #undef dct_pass } #endif // STBI_NEON #define STBI__MARKER_none 0xff // if there's a pending marker from the entropy stream, return that // otherwise, fetch from the stream and get a marker. if there's no // marker, return 0xff, which is never a valid marker value static stbi_uc stbi__get_marker(stbi__jpeg *j) { stbi_uc x; if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } x = stbi__get8(j->s); if (x != 0xff) return STBI__MARKER_none; while (x == 0xff) x = stbi__get8(j->s); // consume repeated 0xff fill bytes return x; } // in each scan, we'll have scan_n components, and the order // of the components is specified by order[] #define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) // after a restart interval, stbi__jpeg_reset the entropy decoder and // the dc prediction static void stbi__jpeg_reset(stbi__jpeg *j) { j->code_bits = 0; j->code_buffer = 0; j->nomore = 0; j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; j->marker = STBI__MARKER_none; j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; j->eob_run = 0; // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, // since we don't even allow 1<<30 pixels } static int stbi__parse_entropy_coded_data(stbi__jpeg *z) { stbi__jpeg_reset(z); if (!z->progressive) { if (z->scan_n == 1) { int i,j; STBI_SIMD_ALIGN(short, data[64]); int n = z->order[0]; // non-interleaved data, we just need to process one block at a time, // in trivial scanline order // number of blocks to do just depends on how many actual "pixels" this // component has, independent of interleaved MCU blocking and such int w = (z->img_comp[n].x+7) >> 3; int h = (z->img_comp[n].y+7) >> 3; for (j=0; j < h; ++j) { for (i=0; i < w; ++i) { int ha = z->img_comp[n].ha; if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); // every data block is an MCU, so countdown the restart interval if (--z->todo <= 0) { if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); // if it's NOT a restart, then just bail, so we get corrupt data // rather than no data if (!STBI__RESTART(z->marker)) return 1; stbi__jpeg_reset(z); } } } return 1; } else { // interleaved int i,j,k,x,y; STBI_SIMD_ALIGN(short, data[64]); for (j=0; j < z->img_mcu_y; ++j) { for (i=0; i < z->img_mcu_x; ++i) { // scan an interleaved mcu... process scan_n components in order for (k=0; k < z->scan_n; ++k) { int n = z->order[k]; // scan out an mcu's worth of this component; that's just determined // by the basic H and V specified for the component for (y=0; y < z->img_comp[n].v; ++y) { for (x=0; x < z->img_comp[n].h; ++x) { int x2 = (i*z->img_comp[n].h + x)*8; int y2 = (j*z->img_comp[n].v + y)*8; int ha = z->img_comp[n].ha; if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); } } } // after all interleaved components, that's an interleaved MCU, // so now count down the restart interval if (--z->todo <= 0) { if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); if (!STBI__RESTART(z->marker)) return 1; stbi__jpeg_reset(z); } } } return 1; } } else { if (z->scan_n == 1) { int i,j; int n = z->order[0]; // non-interleaved data, we just need to process one block at a time, // in trivial scanline order // number of blocks to do just depends on how many actual "pixels" this // component has, independent of interleaved MCU blocking and such int w = (z->img_comp[n].x+7) >> 3; int h = (z->img_comp[n].y+7) >> 3; for (j=0; j < h; ++j) { for (i=0; i < w; ++i) { short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); if (z->spec_start == 0) { if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) return 0; } else { int ha = z->img_comp[n].ha; if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) return 0; } // every data block is an MCU, so countdown the restart interval if (--z->todo <= 0) { if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); if (!STBI__RESTART(z->marker)) return 1; stbi__jpeg_reset(z); } } } return 1; } else { // interleaved int i,j,k,x,y; for (j=0; j < z->img_mcu_y; ++j) { for (i=0; i < z->img_mcu_x; ++i) { // scan an interleaved mcu... process scan_n components in order for (k=0; k < z->scan_n; ++k) { int n = z->order[k]; // scan out an mcu's worth of this component; that's just determined // by the basic H and V specified for the component for (y=0; y < z->img_comp[n].v; ++y) { for (x=0; x < z->img_comp[n].h; ++x) { int x2 = (i*z->img_comp[n].h + x); int y2 = (j*z->img_comp[n].v + y); short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) return 0; } } } // after all interleaved components, that's an interleaved MCU, // so now count down the restart interval if (--z->todo <= 0) { if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); if (!STBI__RESTART(z->marker)) return 1; stbi__jpeg_reset(z); } } } return 1; } } } static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) { int i; for (i=0; i < 64; ++i) data[i] *= dequant[i]; } static void stbi__jpeg_finish(stbi__jpeg *z) { if (z->progressive) { // dequantize and idct the data int i,j,n; for (n=0; n < z->s->img_n; ++n) { int w = (z->img_comp[n].x+7) >> 3; int h = (z->img_comp[n].y+7) >> 3; for (j=0; j < h; ++j) { for (i=0; i < w; ++i) { short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); } } } } } static int stbi__process_marker(stbi__jpeg *z, int m) { int L; switch (m) { case STBI__MARKER_none: // no marker found return stbi__err("expected marker","Corrupt JPEG"); case 0xDD: // DRI - specify restart interval if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); z->restart_interval = stbi__get16be(z->s); return 1; case 0xDB: // DQT - define quantization table L = stbi__get16be(z->s)-2; while (L > 0) { int q = stbi__get8(z->s); int p = q >> 4, sixteen = (p != 0); int t = q & 15,i; if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); for (i=0; i < 64; ++i) z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); L -= (sixteen ? 129 : 65); } return L==0; case 0xC4: // DHT - define huffman table L = stbi__get16be(z->s)-2; while (L > 0) { stbi_uc *v; int sizes[16],i,n=0; int q = stbi__get8(z->s); int tc = q >> 4; int th = q & 15; if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); for (i=0; i < 16; ++i) { sizes[i] = stbi__get8(z->s); n += sizes[i]; } if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! L -= 17; if (tc == 0) { if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; v = z->huff_dc[th].values; } else { if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; v = z->huff_ac[th].values; } for (i=0; i < n; ++i) v[i] = stbi__get8(z->s); if (tc != 0) stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); L -= n; } return L==0; } // check for comment block or APP blocks if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { L = stbi__get16be(z->s); if (L < 2) { if (m == 0xFE) return stbi__err("bad COM len","Corrupt JPEG"); else return stbi__err("bad APP len","Corrupt JPEG"); } L -= 2; if (m == 0xE0 && L >= 5) { // JFIF APP0 segment static const unsigned char tag[5] = {'J','F','I','F','\0'}; int ok = 1; int i; for (i=0; i < 5; ++i) if (stbi__get8(z->s) != tag[i]) ok = 0; L -= 5; if (ok) z->jfif = 1; } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; int ok = 1; int i; for (i=0; i < 6; ++i) if (stbi__get8(z->s) != tag[i]) ok = 0; L -= 6; if (ok) { stbi__get8(z->s); // version stbi__get16be(z->s); // flags0 stbi__get16be(z->s); // flags1 z->app14_color_transform = stbi__get8(z->s); // color transform L -= 6; } } stbi__skip(z->s, L); return 1; } return stbi__err("unknown marker","Corrupt JPEG"); } // after we see SOS static int stbi__process_scan_header(stbi__jpeg *z) { int i; int Ls = stbi__get16be(z->s); z->scan_n = stbi__get8(z->s); if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); for (i=0; i < z->scan_n; ++i) { int id = stbi__get8(z->s), which; int q = stbi__get8(z->s); for (which = 0; which < z->s->img_n; ++which) if (z->img_comp[which].id == id) break; if (which == z->s->img_n) return 0; // no match z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); z->order[i] = which; } { int aa; z->spec_start = stbi__get8(z->s); z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 aa = stbi__get8(z->s); z->succ_high = (aa >> 4); z->succ_low = (aa & 15); if (z->progressive) { if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) return stbi__err("bad SOS", "Corrupt JPEG"); } else { if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); z->spec_end = 63; } } return 1; } static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) { int i; for (i=0; i < ncomp; ++i) { if (z->img_comp[i].raw_data) { STBI_FREE(z->img_comp[i].raw_data); z->img_comp[i].raw_data = NULL; z->img_comp[i].data = NULL; } if (z->img_comp[i].raw_coeff) { STBI_FREE(z->img_comp[i].raw_coeff); z->img_comp[i].raw_coeff = 0; z->img_comp[i].coeff = 0; } if (z->img_comp[i].linebuf) { STBI_FREE(z->img_comp[i].linebuf); z->img_comp[i].linebuf = NULL; } } return why; } static int stbi__process_frame_header(stbi__jpeg *z, int scan) { stbi__context *s = z->s; int Lf,p,i,q, h_max=1,v_max=1,c; Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); c = stbi__get8(s); if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); s->img_n = c; for (i=0; i < c; ++i) { z->img_comp[i].data = NULL; z->img_comp[i].linebuf = NULL; } if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); z->rgb = 0; for (i=0; i < s->img_n; ++i) { static const unsigned char rgb[3] = { 'R', 'G', 'B' }; z->img_comp[i].id = stbi__get8(s); if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) ++z->rgb; q = stbi__get8(s); z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); } if (scan != STBI__SCAN_load) return 1; if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); for (i=0; i < s->img_n; ++i) { if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; } // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios // and I've never seen a non-corrupted JPEG file actually use them for (i=0; i < s->img_n; ++i) { if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); } // compute interleaved mcu info z->img_h_max = h_max; z->img_v_max = v_max; z->img_mcu_w = h_max * 8; z->img_mcu_h = v_max * 8; // these sizes can't be more than 17 bits z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; for (i=0; i < s->img_n; ++i) { // number of effective pixels (e.g. for non-interleaved MCU) z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; // to simplify generation, we'll allocate enough memory to decode // the bogus oversized data from using interleaved MCUs and their // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't // discard the extra data until colorspace conversion // // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) // so these muls can't overflow with 32-bit ints (which we require) z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; z->img_comp[i].coeff = 0; z->img_comp[i].raw_coeff = 0; z->img_comp[i].linebuf = NULL; z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); if (z->img_comp[i].raw_data == NULL) return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); // align blocks for idct using mmx/sse z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); if (z->progressive) { // w2, h2 are multiples of 8 (see above) z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); if (z->img_comp[i].raw_coeff == NULL) return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); } } return 1; } // use comparisons since in some cases we handle more than one case (e.g. SOF) #define stbi__DNL(x) ((x) == 0xdc) #define stbi__SOI(x) ((x) == 0xd8) #define stbi__EOI(x) ((x) == 0xd9) #define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) #define stbi__SOS(x) ((x) == 0xda) #define stbi__SOF_progressive(x) ((x) == 0xc2) static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) { int m; z->jfif = 0; z->app14_color_transform = -1; // valid values are 0,1,2 z->marker = STBI__MARKER_none; // initialize cached marker to empty m = stbi__get_marker(z); if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); if (scan == STBI__SCAN_type) return 1; m = stbi__get_marker(z); while (!stbi__SOF(m)) { if (!stbi__process_marker(z,m)) return 0; m = stbi__get_marker(z); while (m == STBI__MARKER_none) { // some files have extra padding after their blocks, so ok, we'll scan if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); m = stbi__get_marker(z); } } z->progressive = stbi__SOF_progressive(m); if (!stbi__process_frame_header(z, scan)) return 0; return 1; } static int stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) { // some JPEGs have junk at end, skip over it but if we find what looks // like a valid marker, resume there while (!stbi__at_eof(j->s)) { int x = stbi__get8(j->s); while (x == 255) { // might be a marker if (stbi__at_eof(j->s)) return STBI__MARKER_none; x = stbi__get8(j->s); if (x != 0x00 && x != 0xff) { // not a stuffed zero or lead-in to another marker, looks // like an actual marker, return it return x; } // stuffed zero has x=0 now which ends the loop, meaning we go // back to regular scan loop. // repeated 0xff keeps trying to read the next byte of the marker. } } return STBI__MARKER_none; } // decode image to YCbCr format static int stbi__decode_jpeg_image(stbi__jpeg *j) { int m; for (m = 0; m < 4; m++) { j->img_comp[m].raw_data = NULL; j->img_comp[m].raw_coeff = NULL; } j->restart_interval = 0; if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; m = stbi__get_marker(j); while (!stbi__EOI(m)) { if (stbi__SOS(m)) { if (!stbi__process_scan_header(j)) return 0; if (!stbi__parse_entropy_coded_data(j)) return 0; if (j->marker == STBI__MARKER_none ) { j->marker = stbi__skip_jpeg_junk_at_end(j); // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 } m = stbi__get_marker(j); if (STBI__RESTART(m)) m = stbi__get_marker(j); } else if (stbi__DNL(m)) { int Ld = stbi__get16be(j->s); stbi__uint32 NL = stbi__get16be(j->s); if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); m = stbi__get_marker(j); } else { if (!stbi__process_marker(j, m)) return 1; m = stbi__get_marker(j); } } if (j->progressive) stbi__jpeg_finish(j); return 1; } // static jfif-centered resampling (across block boundaries) typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, int w, int hs); #define stbi__div4(x) ((stbi_uc) ((x) >> 2)) static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { STBI_NOTUSED(out); STBI_NOTUSED(in_far); STBI_NOTUSED(w); STBI_NOTUSED(hs); return in_near; } static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // need to generate two samples vertically for every one in input int i; STBI_NOTUSED(hs); for (i=0; i < w; ++i) out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); return out; } static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // need to generate two samples horizontally for every one in input int i; stbi_uc *input = in_near; if (w == 1) { // if only one sample, can't do any interpolation out[0] = out[1] = input[0]; return out; } out[0] = input[0]; out[1] = stbi__div4(input[0]*3 + input[1] + 2); for (i=1; i < w-1; ++i) { int n = 3*input[i]+2; out[i*2+0] = stbi__div4(n+input[i-1]); out[i*2+1] = stbi__div4(n+input[i+1]); } out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); out[i*2+1] = input[w-1]; STBI_NOTUSED(in_far); STBI_NOTUSED(hs); return out; } #define stbi__div16(x) ((stbi_uc) ((x) >> 4)) static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // need to generate 2x2 samples for every one in input int i,t0,t1; if (w == 1) { out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); return out; } t1 = 3*in_near[0] + in_far[0]; out[0] = stbi__div4(t1+2); for (i=1; i < w; ++i) { t0 = t1; t1 = 3*in_near[i]+in_far[i]; out[i*2-1] = stbi__div16(3*t0 + t1 + 8); out[i*2 ] = stbi__div16(3*t1 + t0 + 8); } out[w*2-1] = stbi__div4(t1+2); STBI_NOTUSED(hs); return out; } #if defined(STBI_SSE2) || defined(STBI_NEON) static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // need to generate 2x2 samples for every one in input int i=0,t0,t1; if (w == 1) { out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); return out; } t1 = 3*in_near[0] + in_far[0]; // process groups of 8 pixels for as long as we can. // note we can't handle the last pixel in a row in this loop // because we need to handle the filter boundary conditions. for (; i < ((w-1) & ~7); i += 8) { #if defined(STBI_SSE2) // load and perform the vertical filtering pass // this uses 3*x + y = 4*x + (y - x) __m128i zero = _mm_setzero_si128(); __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); __m128i farw = _mm_unpacklo_epi8(farb, zero); __m128i nearw = _mm_unpacklo_epi8(nearb, zero); __m128i diff = _mm_sub_epi16(farw, nearw); __m128i nears = _mm_slli_epi16(nearw, 2); __m128i curr = _mm_add_epi16(nears, diff); // current row // horizontal filter works the same based on shifted vers of current // row. "prev" is current row shifted right by 1 pixel; we need to // insert the previous pixel value (from t1). // "next" is current row shifted left by 1 pixel, with first pixel // of next block of 8 pixels added in. __m128i prv0 = _mm_slli_si128(curr, 2); __m128i nxt0 = _mm_srli_si128(curr, 2); __m128i prev = _mm_insert_epi16(prv0, t1, 0); __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); // horizontal filter, polyphase implementation since it's convenient: // even pixels = 3*cur + prev = cur*4 + (prev - cur) // odd pixels = 3*cur + next = cur*4 + (next - cur) // note the shared term. __m128i bias = _mm_set1_epi16(8); __m128i curs = _mm_slli_epi16(curr, 2); __m128i prvd = _mm_sub_epi16(prev, curr); __m128i nxtd = _mm_sub_epi16(next, curr); __m128i curb = _mm_add_epi16(curs, bias); __m128i even = _mm_add_epi16(prvd, curb); __m128i odd = _mm_add_epi16(nxtd, curb); // interleave even and odd pixels, then undo scaling. __m128i int0 = _mm_unpacklo_epi16(even, odd); __m128i int1 = _mm_unpackhi_epi16(even, odd); __m128i de0 = _mm_srli_epi16(int0, 4); __m128i de1 = _mm_srli_epi16(int1, 4); // pack and write output __m128i outv = _mm_packus_epi16(de0, de1); _mm_storeu_si128((__m128i *) (out + i*2), outv); #elif defined(STBI_NEON) // load and perform the vertical filtering pass // this uses 3*x + y = 4*x + (y - x) uint8x8_t farb = vld1_u8(in_far + i); uint8x8_t nearb = vld1_u8(in_near + i); int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); int16x8_t curr = vaddq_s16(nears, diff); // current row // horizontal filter works the same based on shifted vers of current // row. "prev" is current row shifted right by 1 pixel; we need to // insert the previous pixel value (from t1). // "next" is current row shifted left by 1 pixel, with first pixel // of next block of 8 pixels added in. int16x8_t prv0 = vextq_s16(curr, curr, 7); int16x8_t nxt0 = vextq_s16(curr, curr, 1); int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); // horizontal filter, polyphase implementation since it's convenient: // even pixels = 3*cur + prev = cur*4 + (prev - cur) // odd pixels = 3*cur + next = cur*4 + (next - cur) // note the shared term. int16x8_t curs = vshlq_n_s16(curr, 2); int16x8_t prvd = vsubq_s16(prev, curr); int16x8_t nxtd = vsubq_s16(next, curr); int16x8_t even = vaddq_s16(curs, prvd); int16x8_t odd = vaddq_s16(curs, nxtd); // undo scaling and round, then store with even/odd phases interleaved uint8x8x2_t o; o.val[0] = vqrshrun_n_s16(even, 4); o.val[1] = vqrshrun_n_s16(odd, 4); vst2_u8(out + i*2, o); #endif // "previous" value for next iter t1 = 3*in_near[i+7] + in_far[i+7]; } t0 = t1; t1 = 3*in_near[i] + in_far[i]; out[i*2] = stbi__div16(3*t1 + t0 + 8); for (++i; i < w; ++i) { t0 = t1; t1 = 3*in_near[i]+in_far[i]; out[i*2-1] = stbi__div16(3*t0 + t1 + 8); out[i*2 ] = stbi__div16(3*t1 + t0 + 8); } out[w*2-1] = stbi__div4(t1+2); STBI_NOTUSED(hs); return out; } #endif static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { // resample with nearest-neighbor int i,j; STBI_NOTUSED(in_far); for (i=0; i < w; ++i) for (j=0; j < hs; ++j) out[i*hs+j] = in_near[i]; return out; } // this is a reduced-precision calculation of YCbCr-to-RGB introduced // to make sure the code produces the same results in both SIMD and scalar #define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) { int i; for (i=0; i < count; ++i) { int y_fixed = (y[i] << 20) + (1<<19); // rounding int r,g,b; int cr = pcr[i] - 128; int cb = pcb[i] - 128; r = y_fixed + cr* stbi__float2fixed(1.40200f); g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); b = y_fixed + cb* stbi__float2fixed(1.77200f); r >>= 20; g >>= 20; b >>= 20; if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } out[0] = (stbi_uc)r; out[1] = (stbi_uc)g; out[2] = (stbi_uc)b; out[3] = 255; out += step; } } #if defined(STBI_SSE2) || defined(STBI_NEON) static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) { int i = 0; #ifdef STBI_SSE2 // step == 3 is pretty ugly on the final interleave, and i'm not convinced // it's useful in practice (you wouldn't use it for textures, for example). // so just accelerate step == 4 case. if (step == 4) { // this is a fairly straightforward implementation and not super-optimized. __m128i signflip = _mm_set1_epi8(-0x80); __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); __m128i xw = _mm_set1_epi16(255); // alpha channel for (; i+7 < count; i += 8) { // load __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 // unpack to short (and left-shift cr, cb by 8) __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); // color transform __m128i yws = _mm_srli_epi16(yw, 4); __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); __m128i rws = _mm_add_epi16(cr0, yws); __m128i gwt = _mm_add_epi16(cb0, yws); __m128i bws = _mm_add_epi16(yws, cb1); __m128i gws = _mm_add_epi16(gwt, cr1); // descale __m128i rw = _mm_srai_epi16(rws, 4); __m128i bw = _mm_srai_epi16(bws, 4); __m128i gw = _mm_srai_epi16(gws, 4); // back to byte, set up for transpose __m128i brb = _mm_packus_epi16(rw, bw); __m128i gxb = _mm_packus_epi16(gw, xw); // transpose to interleave channels __m128i t0 = _mm_unpacklo_epi8(brb, gxb); __m128i t1 = _mm_unpackhi_epi8(brb, gxb); __m128i o0 = _mm_unpacklo_epi16(t0, t1); __m128i o1 = _mm_unpackhi_epi16(t0, t1); // store _mm_storeu_si128((__m128i *) (out + 0), o0); _mm_storeu_si128((__m128i *) (out + 16), o1); out += 32; } } #endif #ifdef STBI_NEON // in this version, step=3 support would be easy to add. but is there demand? if (step == 4) { // this is a fairly straightforward implementation and not super-optimized. uint8x8_t signflip = vdup_n_u8(0x80); int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); for (; i+7 < count; i += 8) { // load uint8x8_t y_bytes = vld1_u8(y + i); uint8x8_t cr_bytes = vld1_u8(pcr + i); uint8x8_t cb_bytes = vld1_u8(pcb + i); int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); // expand to s16 int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); int16x8_t crw = vshll_n_s8(cr_biased, 7); int16x8_t cbw = vshll_n_s8(cb_biased, 7); // color transform int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); int16x8_t rws = vaddq_s16(yws, cr0); int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); int16x8_t bws = vaddq_s16(yws, cb1); // undo scaling, round, convert to byte uint8x8x4_t o; o.val[0] = vqrshrun_n_s16(rws, 4); o.val[1] = vqrshrun_n_s16(gws, 4); o.val[2] = vqrshrun_n_s16(bws, 4); o.val[3] = vdup_n_u8(255); // store, interleaving r/g/b/a vst4_u8(out, o); out += 8*4; } } #endif for (; i < count; ++i) { int y_fixed = (y[i] << 20) + (1<<19); // rounding int r,g,b; int cr = pcr[i] - 128; int cb = pcb[i] - 128; r = y_fixed + cr* stbi__float2fixed(1.40200f); g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); b = y_fixed + cb* stbi__float2fixed(1.77200f); r >>= 20; g >>= 20; b >>= 20; if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } out[0] = (stbi_uc)r; out[1] = (stbi_uc)g; out[2] = (stbi_uc)b; out[3] = 255; out += step; } } #endif // set up the kernels static void stbi__setup_jpeg(stbi__jpeg *j) { j->idct_block_kernel = stbi__idct_block; j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; #ifdef STBI_SSE2 if (stbi__sse2_available()) { j->idct_block_kernel = stbi__idct_simd; j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; } #endif #ifdef STBI_NEON j->idct_block_kernel = stbi__idct_simd; j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; #endif } // clean up the temporary component buffers static void stbi__cleanup_jpeg(stbi__jpeg *j) { stbi__free_jpeg_components(j, j->s->img_n, 0); } typedef struct { resample_row_func resample; stbi_uc *line0,*line1; int hs,vs; // expansion factor in each axis int w_lores; // horizontal pixels pre-expansion int ystep; // how far through vertical expansion we are int ypos; // which pre-expansion row we're on } stbi__resample; // fast 0..255 * 0..255 => 0..255 rounded multiplication static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) { unsigned int t = x*y + 128; return (stbi_uc) ((t + (t >>8)) >> 8); } static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) { int n, decode_n, is_rgb; z->s->img_n = 0; // make stbi__cleanup_jpeg safe // validate req_comp if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); // load a jpeg image from whichever source, but leave in YCbCr format if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } // determine actual number of components to generate n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); if (z->s->img_n == 3 && n < 3 && !is_rgb) decode_n = 1; else decode_n = z->s->img_n; // nothing to do if no components requested; check this now to avoid // accessing uninitialized coutput[0] later if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } // resample and color-convert { int k; unsigned int i,j; stbi_uc *output; stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; stbi__resample res_comp[4]; for (k=0; k < decode_n; ++k) { stbi__resample *r = &res_comp[k]; // allocate line buffer big enough for upsampling off the edges // with upsample factor of 4 z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } r->hs = z->img_h_max / z->img_comp[k].h; r->vs = z->img_v_max / z->img_comp[k].v; r->ystep = r->vs >> 1; r->w_lores = (z->s->img_x + r->hs-1) / r->hs; r->ypos = 0; r->line0 = r->line1 = z->img_comp[k].data; if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; else r->resample = stbi__resample_row_generic; } // can't error after this so, this is safe output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } // now go ahead and resample for (j=0; j < z->s->img_y; ++j) { stbi_uc *out = output + n * z->s->img_x * j; for (k=0; k < decode_n; ++k) { stbi__resample *r = &res_comp[k]; int y_bot = r->ystep >= (r->vs >> 1); coutput[k] = r->resample(z->img_comp[k].linebuf, y_bot ? r->line1 : r->line0, y_bot ? r->line0 : r->line1, r->w_lores, r->hs); if (++r->ystep >= r->vs) { r->ystep = 0; r->line0 = r->line1; if (++r->ypos < z->img_comp[k].y) r->line1 += z->img_comp[k].w2; } } if (n >= 3) { stbi_uc *y = coutput[0]; if (z->s->img_n == 3) { if (is_rgb) { for (i=0; i < z->s->img_x; ++i) { out[0] = y[i]; out[1] = coutput[1][i]; out[2] = coutput[2][i]; out[3] = 255; out += n; } } else { z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); } } else if (z->s->img_n == 4) { if (z->app14_color_transform == 0) { // CMYK for (i=0; i < z->s->img_x; ++i) { stbi_uc m = coutput[3][i]; out[0] = stbi__blinn_8x8(coutput[0][i], m); out[1] = stbi__blinn_8x8(coutput[1][i], m); out[2] = stbi__blinn_8x8(coutput[2][i], m); out[3] = 255; out += n; } } else if (z->app14_color_transform == 2) { // YCCK z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); for (i=0; i < z->s->img_x; ++i) { stbi_uc m = coutput[3][i]; out[0] = stbi__blinn_8x8(255 - out[0], m); out[1] = stbi__blinn_8x8(255 - out[1], m); out[2] = stbi__blinn_8x8(255 - out[2], m); out += n; } } else { // YCbCr + alpha? Ignore the fourth channel for now z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); } } else for (i=0; i < z->s->img_x; ++i) { out[0] = out[1] = out[2] = y[i]; out[3] = 255; // not used if n==3 out += n; } } else { if (is_rgb) { if (n == 1) for (i=0; i < z->s->img_x; ++i) *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); else { for (i=0; i < z->s->img_x; ++i, out += 2) { out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); out[1] = 255; } } } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { for (i=0; i < z->s->img_x; ++i) { stbi_uc m = coutput[3][i]; stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); out[0] = stbi__compute_y(r, g, b); out[1] = 255; out += n; } } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { for (i=0; i < z->s->img_x; ++i) { out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); out[1] = 255; out += n; } } else { stbi_uc *y = coutput[0]; if (n == 1) for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; else for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } } } } stbi__cleanup_jpeg(z); *out_x = z->s->img_x; *out_y = z->s->img_y; if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output return output; } } static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { unsigned char* result; stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); if (!j) return stbi__errpuc("outofmem", "Out of memory"); memset(j, 0, sizeof(stbi__jpeg)); STBI_NOTUSED(ri); j->s = s; stbi__setup_jpeg(j); result = load_jpeg_image(j, x,y,comp,req_comp); STBI_FREE(j); return result; } static int stbi__jpeg_test(stbi__context *s) { int r; stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); if (!j) return stbi__err("outofmem", "Out of memory"); memset(j, 0, sizeof(stbi__jpeg)); j->s = s; stbi__setup_jpeg(j); r = stbi__decode_jpeg_header(j, STBI__SCAN_type); stbi__rewind(s); STBI_FREE(j); return r; } static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) { if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { stbi__rewind( j->s ); return 0; } if (x) *x = j->s->img_x; if (y) *y = j->s->img_y; if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; return 1; } static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) { int result; stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); if (!j) return stbi__err("outofmem", "Out of memory"); memset(j, 0, sizeof(stbi__jpeg)); j->s = s; result = stbi__jpeg_info_raw(j, x, y, comp); STBI_FREE(j); return result; } #endif // public domain zlib decode v0.2 Sean Barrett 2006-11-18 // simple implementation // - all input must be provided in an upfront buffer // - all output is written to a single output buffer (can malloc/realloc) // performance // - fast huffman #ifndef STBI_NO_ZLIB // fast-way is faster to check than jpeg huffman, but slow way is slower #define STBI__ZFAST_BITS 9 // accelerate all cases in default tables #define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) #define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet // zlib-style huffman encoding // (jpegs packs from left, zlib from right, so can't share code) typedef struct { stbi__uint16 fast[1 << STBI__ZFAST_BITS]; stbi__uint16 firstcode[16]; int maxcode[17]; stbi__uint16 firstsymbol[16]; stbi_uc size[STBI__ZNSYMS]; stbi__uint16 value[STBI__ZNSYMS]; } stbi__zhuffman; stbi_inline static int stbi__bitreverse16(int n) { n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); return n; } stbi_inline static int stbi__bit_reverse(int v, int bits) { STBI_ASSERT(bits <= 16); // to bit reverse n bits, reverse 16 and shift // e.g. 11 bits, bit reverse and shift away 5 return stbi__bitreverse16(v) >> (16-bits); } static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) { int i,k=0; int code, next_code[16], sizes[17]; // DEFLATE spec for generating codes memset(sizes, 0, sizeof(sizes)); memset(z->fast, 0, sizeof(z->fast)); for (i=0; i < num; ++i) ++sizes[sizelist[i]]; sizes[0] = 0; for (i=1; i < 16; ++i) if (sizes[i] > (1 << i)) return stbi__err("bad sizes", "Corrupt PNG"); code = 0; for (i=1; i < 16; ++i) { next_code[i] = code; z->firstcode[i] = (stbi__uint16) code; z->firstsymbol[i] = (stbi__uint16) k; code = (code + sizes[i]); if (sizes[i]) if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); z->maxcode[i] = code << (16-i); // preshift for inner loop code <<= 1; k += sizes[i]; } z->maxcode[16] = 0x10000; // sentinel for (i=0; i < num; ++i) { int s = sizelist[i]; if (s) { int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); z->size [c] = (stbi_uc ) s; z->value[c] = (stbi__uint16) i; if (s <= STBI__ZFAST_BITS) { int j = stbi__bit_reverse(next_code[s],s); while (j < (1 << STBI__ZFAST_BITS)) { z->fast[j] = fastv; j += (1 << s); } } ++next_code[s]; } } return 1; } // zlib-from-memory implementation for PNG reading // because PNG allows splitting the zlib stream arbitrarily, // and it's annoying structurally to have PNG call ZLIB call PNG, // we require PNG read all the IDATs and combine them into a single // memory buffer typedef struct { stbi_uc *zbuffer, *zbuffer_end; int num_bits; stbi__uint32 code_buffer; char *zout; char *zout_start; char *zout_end; int z_expandable; stbi__zhuffman z_length, z_distance; } stbi__zbuf; stbi_inline static int stbi__zeof(stbi__zbuf *z) { return (z->zbuffer >= z->zbuffer_end); } stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) { return stbi__zeof(z) ? 0 : *z->zbuffer++; } static void stbi__fill_bits(stbi__zbuf *z) { do { if (z->code_buffer >= (1U << z->num_bits)) { z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ return; } z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; z->num_bits += 8; } while (z->num_bits <= 24); } stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) { unsigned int k; if (z->num_bits < n) stbi__fill_bits(z); k = z->code_buffer & ((1 << n) - 1); z->code_buffer >>= n; z->num_bits -= n; return k; } static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) { int b,s,k; // not resolved by fast table, so compute it the slow way // use jpeg approach, which requires MSbits at top k = stbi__bit_reverse(a->code_buffer, 16); for (s=STBI__ZFAST_BITS+1; ; ++s) if (k < z->maxcode[s]) break; if (s >= 16) return -1; // invalid code! // code size is s, so: b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. a->code_buffer >>= s; a->num_bits -= s; return z->value[b]; } stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) { int b,s; if (a->num_bits < 16) { if (stbi__zeof(a)) { return -1; /* report error for unexpected end of data. */ } stbi__fill_bits(a); } b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; if (b) { s = b >> 9; a->code_buffer >>= s; a->num_bits -= s; return b & 511; } return stbi__zhuffman_decode_slowpath(a, z); } static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes { char *q; unsigned int cur, limit, old_limit; z->zout = zout; if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); cur = (unsigned int) (z->zout - z->zout_start); limit = old_limit = (unsigned) (z->zout_end - z->zout_start); if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); while (cur + n > limit) { if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); limit *= 2; } q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); STBI_NOTUSED(old_limit); if (q == NULL) return stbi__err("outofmem", "Out of memory"); z->zout_start = q; z->zout = q + cur; z->zout_end = q + limit; return 1; } static const int stbi__zlength_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 }; static const int stbi__zlength_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; static const int stbi__zdist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; static int stbi__parse_huffman_block(stbi__zbuf *a) { char *zout = a->zout; for(;;) { int z = stbi__zhuffman_decode(a, &a->z_length); if (z < 256) { if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes if (zout >= a->zout_end) { if (!stbi__zexpand(a, zout, 1)) return 0; zout = a->zout; } *zout++ = (char) z; } else { stbi_uc *p; int len,dist; if (z == 256) { a->zout = zout; return 1; } if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data z -= 257; len = stbi__zlength_base[z]; if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); z = stbi__zhuffman_decode(a, &a->z_distance); if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data dist = stbi__zdist_base[z]; if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); if (zout + len > a->zout_end) { if (!stbi__zexpand(a, zout, len)) return 0; zout = a->zout; } p = (stbi_uc *) (zout - dist); if (dist == 1) { // run of one byte; common in images. stbi_uc v = *p; if (len) { do *zout++ = v; while (--len); } } else { if (len) { do *zout++ = *p++; while (--len); } } } } } static int stbi__compute_huffman_codes(stbi__zbuf *a) { static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; stbi__zhuffman z_codelength; stbi_uc lencodes[286+32+137];//padding for maximum single op stbi_uc codelength_sizes[19]; int i,n; int hlit = stbi__zreceive(a,5) + 257; int hdist = stbi__zreceive(a,5) + 1; int hclen = stbi__zreceive(a,4) + 4; int ntot = hlit + hdist; memset(codelength_sizes, 0, sizeof(codelength_sizes)); for (i=0; i < hclen; ++i) { int s = stbi__zreceive(a,3); codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; } if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; n = 0; while (n < ntot) { int c = stbi__zhuffman_decode(a, &z_codelength); if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); if (c < 16) lencodes[n++] = (stbi_uc) c; else { stbi_uc fill = 0; if (c == 16) { c = stbi__zreceive(a,2)+3; if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); fill = lencodes[n-1]; } else if (c == 17) { c = stbi__zreceive(a,3)+3; } else if (c == 18) { c = stbi__zreceive(a,7)+11; } else { return stbi__err("bad codelengths", "Corrupt PNG"); } if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); memset(lencodes+n, fill, c); n += c; } } if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; return 1; } static int stbi__parse_uncompressed_block(stbi__zbuf *a) { stbi_uc header[4]; int len,nlen,k; if (a->num_bits & 7) stbi__zreceive(a, a->num_bits & 7); // discard // drain the bit-packed data into header k = 0; while (a->num_bits > 0) { header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check a->code_buffer >>= 8; a->num_bits -= 8; } if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); // now fill header the normal way while (k < 4) header[k++] = stbi__zget8(a); len = header[1] * 256 + header[0]; nlen = header[3] * 256 + header[2]; if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); if (a->zout + len > a->zout_end) if (!stbi__zexpand(a, a->zout, len)) return 0; memcpy(a->zout, a->zbuffer, len); a->zbuffer += len; a->zout += len; return 1; } static int stbi__parse_zlib_header(stbi__zbuf *a) { int cmf = stbi__zget8(a); int cm = cmf & 15; /* int cinfo = cmf >> 4; */ int flg = stbi__zget8(a); if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png // window = 1 << (8 + cinfo)... but who cares, we fully buffer output return 1; } static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = { 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 }; static const stbi_uc stbi__zdefault_distance[32] = { 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 }; /* Init algorithm: { int i; // use <= to match clearly with spec for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; } */ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) { int final, type; if (parse_header) if (!stbi__parse_zlib_header(a)) return 0; a->num_bits = 0; a->code_buffer = 0; do { final = stbi__zreceive(a,1); type = stbi__zreceive(a,2); if (type == 0) { if (!stbi__parse_uncompressed_block(a)) return 0; } else if (type == 3) { return 0; } else { if (type == 1) { // use fixed code lengths if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; } else { if (!stbi__compute_huffman_codes(a)) return 0; } if (!stbi__parse_huffman_block(a)) return 0; } } while (!final); return 1; } static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) { a->zout_start = obuf; a->zout = obuf; a->zout_end = obuf + olen; a->z_expandable = exp; return stbi__parse_zlib(a, parse_header); } STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) { stbi__zbuf a; char *p = (char *) stbi__malloc(initial_size); if (p == NULL) return NULL; a.zbuffer = (stbi_uc *) buffer; a.zbuffer_end = (stbi_uc *) buffer + len; if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { if (outlen) *outlen = (int) (a.zout - a.zout_start); return a.zout_start; } else { STBI_FREE(a.zout_start); return NULL; } } STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) { return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); } STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) { stbi__zbuf a; char *p = (char *) stbi__malloc(initial_size); if (p == NULL) return NULL; a.zbuffer = (stbi_uc *) buffer; a.zbuffer_end = (stbi_uc *) buffer + len; if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { if (outlen) *outlen = (int) (a.zout - a.zout_start); return a.zout_start; } else { STBI_FREE(a.zout_start); return NULL; } } STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) { stbi__zbuf a; a.zbuffer = (stbi_uc *) ibuffer; a.zbuffer_end = (stbi_uc *) ibuffer + ilen; if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) return (int) (a.zout - a.zout_start); else return -1; } STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) { stbi__zbuf a; char *p = (char *) stbi__malloc(16384); if (p == NULL) return NULL; a.zbuffer = (stbi_uc *) buffer; a.zbuffer_end = (stbi_uc *) buffer+len; if (stbi__do_zlib(&a, p, 16384, 1, 0)) { if (outlen) *outlen = (int) (a.zout - a.zout_start); return a.zout_start; } else { STBI_FREE(a.zout_start); return NULL; } } STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) { stbi__zbuf a; a.zbuffer = (stbi_uc *) ibuffer; a.zbuffer_end = (stbi_uc *) ibuffer + ilen; if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) return (int) (a.zout - a.zout_start); else return -1; } #endif // public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 // simple implementation // - only 8-bit samples // - no CRC checking // - allocates lots of intermediate memory // - avoids problem of streaming data between subsystems // - avoids explicit window management // performance // - uses stb_zlib, a PD zlib implementation with fast huffman decoding #ifndef STBI_NO_PNG typedef struct { stbi__uint32 length; stbi__uint32 type; } stbi__pngchunk; static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) { stbi__pngchunk c; c.length = stbi__get32be(s); c.type = stbi__get32be(s); return c; } static int stbi__check_png_header(stbi__context *s) { static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; int i; for (i=0; i < 8; ++i) if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); return 1; } typedef struct { stbi__context *s; stbi_uc *idata, *expanded, *out; int depth; } stbi__png; enum { STBI__F_none=0, STBI__F_sub=1, STBI__F_up=2, STBI__F_avg=3, STBI__F_paeth=4, // synthetic filters used for first scanline to avoid needing a dummy row of 0s STBI__F_avg_first, STBI__F_paeth_first }; static stbi_uc first_row_filter[5] = { STBI__F_none, STBI__F_sub, STBI__F_none, STBI__F_avg_first, STBI__F_paeth_first }; static int stbi__paeth(int a, int b, int c) { int p = a + b - c; int pa = abs(p-a); int pb = abs(p-b); int pc = abs(p-c); if (pa <= pb && pa <= pc) return a; if (pb <= pc) return b; return c; } static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; // create the png data from post-deflated data static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) { int bytes = (depth == 16? 2 : 1); stbi__context *s = a->s; stbi__uint32 i,j,stride = x*out_n*bytes; stbi__uint32 img_len, img_width_bytes; int k; int img_n = s->img_n; // copy it into a local for later int output_bytes = out_n*bytes; int filter_bytes = img_n*bytes; int width = x; STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into if (!a->out) return stbi__err("outofmem", "Out of memory"); if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); img_width_bytes = (((img_n * x * depth) + 7) >> 3); img_len = (img_width_bytes + 1) * y; // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), // so just check for raw_len < img_len always. if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); for (j=0; j < y; ++j) { stbi_uc *cur = a->out + stride*j; stbi_uc *prior; int filter = *raw++; if (filter > 4) return stbi__err("invalid filter","Corrupt PNG"); if (depth < 8) { if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place filter_bytes = 1; width = img_width_bytes; } prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above // if first row, use special filter that doesn't sample previous row if (j == 0) filter = first_row_filter[filter]; // handle first byte explicitly for (k=0; k < filter_bytes; ++k) { switch (filter) { case STBI__F_none : cur[k] = raw[k]; break; case STBI__F_sub : cur[k] = raw[k]; break; case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; case STBI__F_avg_first : cur[k] = raw[k]; break; case STBI__F_paeth_first: cur[k] = raw[k]; break; } } if (depth == 8) { if (img_n != out_n) cur[img_n] = 255; // first pixel raw += img_n; cur += out_n; prior += out_n; } else if (depth == 16) { if (img_n != out_n) { cur[filter_bytes] = 255; // first pixel top byte cur[filter_bytes+1] = 255; // first pixel bottom byte } raw += filter_bytes; cur += output_bytes; prior += output_bytes; } else { raw += 1; cur += 1; prior += 1; } // this is a little gross, so that we don't switch per-pixel or per-component if (depth < 8 || img_n == out_n) { int nk = (width - 1)*filter_bytes; #define STBI__CASE(f) \ case f: \ for (k=0; k < nk; ++k) switch (filter) { // "none" filter turns into a memcpy here; make that explicit. case STBI__F_none: memcpy(cur, raw, nk); break; STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; } #undef STBI__CASE raw += nk; } else { STBI_ASSERT(img_n+1 == out_n); #define STBI__CASE(f) \ case f: \ for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ for (k=0; k < filter_bytes; ++k) switch (filter) { STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; } #undef STBI__CASE // the loop above sets the high byte of the pixels' alpha, but for // 16 bit png files we also need the low byte set. we'll do that here. if (depth == 16) { cur = a->out + stride*j; // start at the beginning of the row again for (i=0; i < x; ++i,cur+=output_bytes) { cur[filter_bytes+1] = 255; } } } } // we make a separate pass to expand bits to pixels; for performance, // this could run two scanlines behind the above code, so it won't // intefere with filtering but will still be in the cache. if (depth < 8) { for (j=0; j < y; ++j) { stbi_uc *cur = a->out + stride*j; stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range // note that the final byte might overshoot and write more data than desired. // we can allocate enough data that this never writes out of memory, but it // could also overwrite the next scanline. can it overwrite non-empty data // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. // so we need to explicitly clamp the final ones if (depth == 4) { for (k=x*img_n; k >= 2; k-=2, ++in) { *cur++ = scale * ((*in >> 4) ); *cur++ = scale * ((*in ) & 0x0f); } if (k > 0) *cur++ = scale * ((*in >> 4) ); } else if (depth == 2) { for (k=x*img_n; k >= 4; k-=4, ++in) { *cur++ = scale * ((*in >> 6) ); *cur++ = scale * ((*in >> 4) & 0x03); *cur++ = scale * ((*in >> 2) & 0x03); *cur++ = scale * ((*in ) & 0x03); } if (k > 0) *cur++ = scale * ((*in >> 6) ); if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); } else if (depth == 1) { for (k=x*img_n; k >= 8; k-=8, ++in) { *cur++ = scale * ((*in >> 7) ); *cur++ = scale * ((*in >> 6) & 0x01); *cur++ = scale * ((*in >> 5) & 0x01); *cur++ = scale * ((*in >> 4) & 0x01); *cur++ = scale * ((*in >> 3) & 0x01); *cur++ = scale * ((*in >> 2) & 0x01); *cur++ = scale * ((*in >> 1) & 0x01); *cur++ = scale * ((*in ) & 0x01); } if (k > 0) *cur++ = scale * ((*in >> 7) ); if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); } if (img_n != out_n) { int q; // insert alpha = 255 cur = a->out + stride*j; if (img_n == 1) { for (q=x-1; q >= 0; --q) { cur[q*2+1] = 255; cur[q*2+0] = cur[q]; } } else { STBI_ASSERT(img_n == 3); for (q=x-1; q >= 0; --q) { cur[q*4+3] = 255; cur[q*4+2] = cur[q*3+2]; cur[q*4+1] = cur[q*3+1]; cur[q*4+0] = cur[q*3+0]; } } } } } else if (depth == 16) { // force the image data from big-endian to platform-native. // this is done in a separate pass due to the decoding relying // on the data being untouched, but could probably be done // per-line during decode if care is taken. stbi_uc *cur = a->out; stbi__uint16 *cur16 = (stbi__uint16*)cur; for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { *cur16 = (cur[0] << 8) | cur[1]; } } return 1; } static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) { int bytes = (depth == 16 ? 2 : 1); int out_bytes = out_n * bytes; stbi_uc *final; int p; if (!interlaced) return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); // de-interlacing final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); if (!final) return stbi__err("outofmem", "Out of memory"); for (p=0; p < 7; ++p) { int xorig[] = { 0,4,0,2,0,1,0 }; int yorig[] = { 0,0,4,0,2,0,1 }; int xspc[] = { 8,8,4,4,2,2,1 }; int yspc[] = { 8,8,8,4,4,2,2 }; int i,j,x,y; // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; if (x && y) { stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { STBI_FREE(final); return 0; } for (j=0; j < y; ++j) { for (i=0; i < x; ++i) { int out_y = j*yspc[p]+yorig[p]; int out_x = i*xspc[p]+xorig[p]; memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, a->out + (j*x+i)*out_bytes, out_bytes); } } STBI_FREE(a->out); image_data += img_len; image_data_len -= img_len; } } a->out = final; return 1; } static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) { stbi__context *s = z->s; stbi__uint32 i, pixel_count = s->img_x * s->img_y; stbi_uc *p = z->out; // compute color-based transparency, assuming we've // already got 255 as the alpha value in the output STBI_ASSERT(out_n == 2 || out_n == 4); if (out_n == 2) { for (i=0; i < pixel_count; ++i) { p[1] = (p[0] == tc[0] ? 0 : 255); p += 2; } } else { for (i=0; i < pixel_count; ++i) { if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) p[3] = 0; p += 4; } } return 1; } static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) { stbi__context *s = z->s; stbi__uint32 i, pixel_count = s->img_x * s->img_y; stbi__uint16 *p = (stbi__uint16*) z->out; // compute color-based transparency, assuming we've // already got 65535 as the alpha value in the output STBI_ASSERT(out_n == 2 || out_n == 4); if (out_n == 2) { for (i = 0; i < pixel_count; ++i) { p[1] = (p[0] == tc[0] ? 0 : 65535); p += 2; } } else { for (i = 0; i < pixel_count; ++i) { if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) p[3] = 0; p += 4; } } return 1; } static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) { stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; stbi_uc *p, *temp_out, *orig = a->out; p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); if (p == NULL) return stbi__err("outofmem", "Out of memory"); // between here and free(out) below, exitting would leak temp_out = p; if (pal_img_n == 3) { for (i=0; i < pixel_count; ++i) { int n = orig[i]*4; p[0] = palette[n ]; p[1] = palette[n+1]; p[2] = palette[n+2]; p += 3; } } else { for (i=0; i < pixel_count; ++i) { int n = orig[i]*4; p[0] = palette[n ]; p[1] = palette[n+1]; p[2] = palette[n+2]; p[3] = palette[n+3]; p += 4; } } STBI_FREE(a->out); a->out = temp_out; STBI_NOTUSED(len); return 1; } static int stbi__unpremultiply_on_load_global = 0; static int stbi__de_iphone_flag_global = 0; STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) { stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; } STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) { stbi__de_iphone_flag_global = flag_true_if_should_convert; } #ifndef STBI_THREAD_LOCAL #define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global #define stbi__de_iphone_flag stbi__de_iphone_flag_global #else static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) { stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; stbi__unpremultiply_on_load_set = 1; } STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) { stbi__de_iphone_flag_local = flag_true_if_should_convert; stbi__de_iphone_flag_set = 1; } #define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ ? stbi__unpremultiply_on_load_local \ : stbi__unpremultiply_on_load_global) #define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ ? stbi__de_iphone_flag_local \ : stbi__de_iphone_flag_global) #endif // STBI_THREAD_LOCAL static void stbi__de_iphone(stbi__png *z) { stbi__context *s = z->s; stbi__uint32 i, pixel_count = s->img_x * s->img_y; stbi_uc *p = z->out; if (s->img_out_n == 3) { // convert bgr to rgb for (i=0; i < pixel_count; ++i) { stbi_uc t = p[0]; p[0] = p[2]; p[2] = t; p += 3; } } else { STBI_ASSERT(s->img_out_n == 4); if (stbi__unpremultiply_on_load) { // convert bgr to rgb and unpremultiply for (i=0; i < pixel_count; ++i) { stbi_uc a = p[3]; stbi_uc t = p[0]; if (a) { stbi_uc half = a / 2; p[0] = (p[2] * 255 + half) / a; p[1] = (p[1] * 255 + half) / a; p[2] = ( t * 255 + half) / a; } else { p[0] = p[2]; p[2] = t; } p += 4; } } else { // convert bgr to rgb for (i=0; i < pixel_count; ++i) { stbi_uc t = p[0]; p[0] = p[2]; p[2] = t; p += 4; } } } } #define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) { stbi_uc palette[1024], pal_img_n=0; stbi_uc has_trans=0, tc[3]={0}; stbi__uint16 tc16[3]; stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; int first=1,k,interlace=0, color=0, is_iphone=0; stbi__context *s = z->s; z->expanded = NULL; z->idata = NULL; z->out = NULL; if (!stbi__check_png_header(s)) return 0; if (scan == STBI__SCAN_type) return 1; for (;;) { stbi__pngchunk c = stbi__get_chunk_header(s); switch (c.type) { case STBI__PNG_TYPE('C','g','B','I'): is_iphone = 1; stbi__skip(s, c.length); break; case STBI__PNG_TYPE('I','H','D','R'): { int comp,filter; if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); first = 0; if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); s->img_x = stbi__get32be(s); s->img_y = stbi__get32be(s); if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); if (!pal_img_n) { s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); } else { // if paletted, then pal_n is our final components, and // img_n is # components to decompress/filter. s->img_n = 1; if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); } // even with SCAN_header, have to scan to see if we have a tRNS break; } case STBI__PNG_TYPE('P','L','T','E'): { if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); pal_len = c.length / 3; if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); for (i=0; i < pal_len; ++i) { palette[i*4+0] = stbi__get8(s); palette[i*4+1] = stbi__get8(s); palette[i*4+2] = stbi__get8(s); palette[i*4+3] = 255; } break; } case STBI__PNG_TYPE('t','R','N','S'): { if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); if (pal_img_n) { if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); pal_img_n = 4; for (i=0; i < c.length; ++i) palette[i*4+3] = stbi__get8(s); } else { if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); has_trans = 1; // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } if (z->depth == 16) { for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is } else { for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger } } break; } case STBI__PNG_TYPE('I','D','A','T'): { if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); if (scan == STBI__SCAN_header) { // header scan definitely stops at first IDAT if (pal_img_n) s->img_n = pal_img_n; return 1; } if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); if ((int)(ioff + c.length) < (int)ioff) return 0; if (ioff + c.length > idata_limit) { stbi__uint32 idata_limit_old = idata_limit; stbi_uc *p; if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; while (ioff + c.length > idata_limit) idata_limit *= 2; STBI_NOTUSED(idata_limit_old); p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); z->idata = p; } if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); ioff += c.length; break; } case STBI__PNG_TYPE('I','E','N','D'): { stbi__uint32 raw_len, bpl; if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (scan != STBI__SCAN_load) return 1; if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); // initial guess for decoded data size to avoid unnecessary reallocs bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); if (z->expanded == NULL) return 0; // zlib should set error STBI_FREE(z->idata); z->idata = NULL; if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) s->img_out_n = s->img_n+1; else s->img_out_n = s->img_n; if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; if (has_trans) { if (z->depth == 16) { if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; } else { if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; } } if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) stbi__de_iphone(z); if (pal_img_n) { // pal_img_n == 3 or 4 s->img_n = pal_img_n; // record the actual colors we had s->img_out_n = pal_img_n; if (req_comp >= 3) s->img_out_n = req_comp; if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) return 0; } else if (has_trans) { // non-paletted image with tRNS -> source image has (constant) alpha ++s->img_n; } STBI_FREE(z->expanded); z->expanded = NULL; // end of PNG chunk, read and skip CRC stbi__get32be(s); return 1; } default: // if critical, fail if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if ((c.type & (1 << 29)) == 0) { #ifndef STBI_NO_FAILURE_STRINGS // not threadsafe static char invalid_chunk[] = "XXXX PNG chunk not known"; invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); #endif return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); } stbi__skip(s, c.length); break; } // end of PNG chunk, read and skip CRC stbi__get32be(s); } } static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) { void *result=NULL; if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { if (p->depth <= 8) ri->bits_per_channel = 8; else if (p->depth == 16) ri->bits_per_channel = 16; else return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); result = p->out; p->out = NULL; if (req_comp && req_comp != p->s->img_out_n) { if (ri->bits_per_channel == 8) result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); else result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); p->s->img_out_n = req_comp; if (result == NULL) return result; } *x = p->s->img_x; *y = p->s->img_y; if (n) *n = p->s->img_n; } STBI_FREE(p->out); p->out = NULL; STBI_FREE(p->expanded); p->expanded = NULL; STBI_FREE(p->idata); p->idata = NULL; return result; } static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi__png p; p.s = s; return stbi__do_png(&p, x,y,comp,req_comp, ri); } static int stbi__png_test(stbi__context *s) { int r; r = stbi__check_png_header(s); stbi__rewind(s); return r; } static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) { if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { stbi__rewind( p->s ); return 0; } if (x) *x = p->s->img_x; if (y) *y = p->s->img_y; if (comp) *comp = p->s->img_n; return 1; } static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) { stbi__png p; p.s = s; return stbi__png_info_raw(&p, x, y, comp); } static int stbi__png_is16(stbi__context *s) { stbi__png p; p.s = s; if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) return 0; if (p.depth != 16) { stbi__rewind(p.s); return 0; } return 1; } #endif // Microsoft/Windows BMP image #ifndef STBI_NO_BMP static int stbi__bmp_test_raw(stbi__context *s) { int r; int sz; if (stbi__get8(s) != 'B') return 0; if (stbi__get8(s) != 'M') return 0; stbi__get32le(s); // discard filesize stbi__get16le(s); // discard reserved stbi__get16le(s); // discard reserved stbi__get32le(s); // discard data offset sz = stbi__get32le(s); r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); return r; } static int stbi__bmp_test(stbi__context *s) { int r = stbi__bmp_test_raw(s); stbi__rewind(s); return r; } // returns 0..31 for the highest set bit static int stbi__high_bit(unsigned int z) { int n=0; if (z == 0) return -1; if (z >= 0x10000) { n += 16; z >>= 16; } if (z >= 0x00100) { n += 8; z >>= 8; } if (z >= 0x00010) { n += 4; z >>= 4; } if (z >= 0x00004) { n += 2; z >>= 2; } if (z >= 0x00002) { n += 1;/* >>= 1;*/ } return n; } static int stbi__bitcount(unsigned int a) { a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits a = (a + (a >> 8)); // max 16 per 8 bits a = (a + (a >> 16)); // max 32 per 8 bits return a & 0xff; } // extract an arbitrarily-aligned N-bit value (N=bits) // from v, and then make it 8-bits long and fractionally // extend it to full full range. static int stbi__shiftsigned(unsigned int v, int shift, int bits) { static unsigned int mul_table[9] = { 0, 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, }; static unsigned int shift_table[9] = { 0, 0,0,1,0,2,4,6,0, }; if (shift < 0) v <<= -shift; else v >>= shift; STBI_ASSERT(v < 256); v >>= (8-bits); STBI_ASSERT(bits >= 0 && bits <= 8); return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; } typedef struct { int bpp, offset, hsz; unsigned int mr,mg,mb,ma, all_a; int extra_read; } stbi__bmp_data; static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) { // BI_BITFIELDS specifies masks explicitly, don't override if (compress == 3) return 1; if (compress == 0) { if (info->bpp == 16) { info->mr = 31u << 10; info->mg = 31u << 5; info->mb = 31u << 0; } else if (info->bpp == 32) { info->mr = 0xffu << 16; info->mg = 0xffu << 8; info->mb = 0xffu << 0; info->ma = 0xffu << 24; info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 } else { // otherwise, use defaults, which is all-0 info->mr = info->mg = info->mb = info->ma = 0; } return 1; } return 0; // error } static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) { int hsz; if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); stbi__get32le(s); // discard filesize stbi__get16le(s); // discard reserved stbi__get16le(s); // discard reserved info->offset = stbi__get32le(s); info->hsz = hsz = stbi__get32le(s); info->mr = info->mg = info->mb = info->ma = 0; info->extra_read = 14; if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); if (hsz == 12) { s->img_x = stbi__get16le(s); s->img_y = stbi__get16le(s); } else { s->img_x = stbi__get32le(s); s->img_y = stbi__get32le(s); } if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); info->bpp = stbi__get16le(s); if (hsz != 12) { int compress = stbi__get32le(s); if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel stbi__get32le(s); // discard sizeof stbi__get32le(s); // discard hres stbi__get32le(s); // discard vres stbi__get32le(s); // discard colorsused stbi__get32le(s); // discard max important if (hsz == 40 || hsz == 56) { if (hsz == 56) { stbi__get32le(s); stbi__get32le(s); stbi__get32le(s); stbi__get32le(s); } if (info->bpp == 16 || info->bpp == 32) { if (compress == 0) { stbi__bmp_set_mask_defaults(info, compress); } else if (compress == 3) { info->mr = stbi__get32le(s); info->mg = stbi__get32le(s); info->mb = stbi__get32le(s); info->extra_read += 12; // not documented, but generated by photoshop and handled by mspaint if (info->mr == info->mg && info->mg == info->mb) { // ?!?!? return stbi__errpuc("bad BMP", "bad BMP"); } } else return stbi__errpuc("bad BMP", "bad BMP"); } } else { // V4/V5 header int i; if (hsz != 108 && hsz != 124) return stbi__errpuc("bad BMP", "bad BMP"); info->mr = stbi__get32le(s); info->mg = stbi__get32le(s); info->mb = stbi__get32le(s); info->ma = stbi__get32le(s); if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs stbi__bmp_set_mask_defaults(info, compress); stbi__get32le(s); // discard color space for (i=0; i < 12; ++i) stbi__get32le(s); // discard color space parameters if (hsz == 124) { stbi__get32le(s); // discard rendering intent stbi__get32le(s); // discard offset of profile data stbi__get32le(s); // discard size of profile data stbi__get32le(s); // discard reserved } } } return (void *) 1; } static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *out; unsigned int mr=0,mg=0,mb=0,ma=0, all_a; stbi_uc pal[256][4]; int psize=0,i,j,width; int flip_vertically, pad, target; stbi__bmp_data info; STBI_NOTUSED(ri); info.all_a = 255; if (stbi__bmp_parse_header(s, &info) == NULL) return NULL; // error code already set flip_vertically = ((int) s->img_y) > 0; s->img_y = abs((int) s->img_y); if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); mr = info.mr; mg = info.mg; mb = info.mb; ma = info.ma; all_a = info.all_a; if (info.hsz == 12) { if (info.bpp < 24) psize = (info.offset - info.extra_read - 24) / 3; } else { if (info.bpp < 16) psize = (info.offset - info.extra_read - info.hsz) >> 2; } if (psize == 0) { // accept some number of extra bytes after the header, but if the offset points either to before // the header ends or implies a large amount of extra data, reject the file as malformed int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); int header_limit = 1024; // max we actually read is below 256 bytes currently. int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { return stbi__errpuc("bad header", "Corrupt BMP"); } // we established that bytes_read_so_far is positive and sensible. // the first half of this test rejects offsets that are either too small positives, or // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn // ensures the number computed in the second half of the test can't overflow. if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { return stbi__errpuc("bad offset", "Corrupt BMP"); } else { stbi__skip(s, info.offset - bytes_read_so_far); } } if (info.bpp == 24 && ma == 0xff000000) s->img_n = 3; else s->img_n = ma ? 4 : 3; if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 target = req_comp; else target = s->img_n; // if they want monochrome, we'll post-convert // sanity-check size if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) return stbi__errpuc("too large", "Corrupt BMP"); out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); if (info.bpp < 16) { int z=0; if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } for (i=0; i < psize; ++i) { pal[i][2] = stbi__get8(s); pal[i][1] = stbi__get8(s); pal[i][0] = stbi__get8(s); if (info.hsz != 12) stbi__get8(s); pal[i][3] = 255; } stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); if (info.bpp == 1) width = (s->img_x + 7) >> 3; else if (info.bpp == 4) width = (s->img_x + 1) >> 1; else if (info.bpp == 8) width = s->img_x; else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } pad = (-width)&3; if (info.bpp == 1) { for (j=0; j < (int) s->img_y; ++j) { int bit_offset = 7, v = stbi__get8(s); for (i=0; i < (int) s->img_x; ++i) { int color = (v>>bit_offset)&0x1; out[z++] = pal[color][0]; out[z++] = pal[color][1]; out[z++] = pal[color][2]; if (target == 4) out[z++] = 255; if (i+1 == (int) s->img_x) break; if((--bit_offset) < 0) { bit_offset = 7; v = stbi__get8(s); } } stbi__skip(s, pad); } } else { for (j=0; j < (int) s->img_y; ++j) { for (i=0; i < (int) s->img_x; i += 2) { int v=stbi__get8(s),v2=0; if (info.bpp == 4) { v2 = v & 15; v >>= 4; } out[z++] = pal[v][0]; out[z++] = pal[v][1]; out[z++] = pal[v][2]; if (target == 4) out[z++] = 255; if (i+1 == (int) s->img_x) break; v = (info.bpp == 8) ? stbi__get8(s) : v2; out[z++] = pal[v][0]; out[z++] = pal[v][1]; out[z++] = pal[v][2]; if (target == 4) out[z++] = 255; } stbi__skip(s, pad); } } } else { int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; int z = 0; int easy=0; stbi__skip(s, info.offset - info.extra_read - info.hsz); if (info.bpp == 24) width = 3 * s->img_x; else if (info.bpp == 16) width = 2*s->img_x; else /* bpp = 32 and pad = 0 */ width=0; pad = (-width) & 3; if (info.bpp == 24) { easy = 1; } else if (info.bpp == 32) { if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) easy = 2; } if (!easy) { if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } // right shift amt to put high bit in position #7 rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } } for (j=0; j < (int) s->img_y; ++j) { if (easy) { for (i=0; i < (int) s->img_x; ++i) { unsigned char a; out[z+2] = stbi__get8(s); out[z+1] = stbi__get8(s); out[z+0] = stbi__get8(s); z += 3; a = (easy == 2 ? stbi__get8(s) : 255); all_a |= a; if (target == 4) out[z++] = a; } } else { int bpp = info.bpp; for (i=0; i < (int) s->img_x; ++i) { stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); unsigned int a; out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); all_a |= a; if (target == 4) out[z++] = STBI__BYTECAST(a); } } stbi__skip(s, pad); } } // if alpha channel is all 0s, replace with all 255s if (target == 4 && all_a == 0) for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) out[i] = 255; if (flip_vertically) { stbi_uc t; for (j=0; j < (int) s->img_y>>1; ++j) { stbi_uc *p1 = out + j *s->img_x*target; stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; for (i=0; i < (int) s->img_x*target; ++i) { t = p1[i]; p1[i] = p2[i]; p2[i] = t; } } } if (req_comp && req_comp != target) { out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); if (out == NULL) return out; // stbi__convert_format frees input on failure } *x = s->img_x; *y = s->img_y; if (comp) *comp = s->img_n; return out; } #endif // Targa Truevision - TGA // by Jonathan Dummer #ifndef STBI_NO_TGA // returns STBI_rgb or whatever, 0 on error static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) { // only RGB or RGBA (incl. 16bit) or grey allowed if (is_rgb16) *is_rgb16 = 0; switch(bits_per_pixel) { case 8: return STBI_grey; case 16: if(is_grey) return STBI_grey_alpha; // fallthrough case 15: if(is_rgb16) *is_rgb16 = 1; return STBI_rgb; case 24: // fallthrough case 32: return bits_per_pixel/8; default: return 0; } } static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) { int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; int sz, tga_colormap_type; stbi__get8(s); // discard Offset tga_colormap_type = stbi__get8(s); // colormap type if( tga_colormap_type > 1 ) { stbi__rewind(s); return 0; // only RGB or indexed allowed } tga_image_type = stbi__get8(s); // image type if ( tga_colormap_type == 1 ) { // colormapped (paletted) image if (tga_image_type != 1 && tga_image_type != 9) { stbi__rewind(s); return 0; } stbi__skip(s,4); // skip index of first colormap entry and number of entries sz = stbi__get8(s); // check bits per palette color entry if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { stbi__rewind(s); return 0; } stbi__skip(s,4); // skip image x and y origin tga_colormap_bpp = sz; } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { stbi__rewind(s); return 0; // only RGB or grey allowed, +/- RLE } stbi__skip(s,9); // skip colormap specification and image x/y origin tga_colormap_bpp = 0; } tga_w = stbi__get16le(s); if( tga_w < 1 ) { stbi__rewind(s); return 0; // test width } tga_h = stbi__get16le(s); if( tga_h < 1 ) { stbi__rewind(s); return 0; // test height } tga_bits_per_pixel = stbi__get8(s); // bits per pixel stbi__get8(s); // ignore alpha bits if (tga_colormap_bpp != 0) { if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { // when using a colormap, tga_bits_per_pixel is the size of the indexes // I don't think anything but 8 or 16bit indexes makes sense stbi__rewind(s); return 0; } tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); } else { tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); } if(!tga_comp) { stbi__rewind(s); return 0; } if (x) *x = tga_w; if (y) *y = tga_h; if (comp) *comp = tga_comp; return 1; // seems to have passed everything } static int stbi__tga_test(stbi__context *s) { int res = 0; int sz, tga_color_type; stbi__get8(s); // discard Offset tga_color_type = stbi__get8(s); // color type if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed sz = stbi__get8(s); // image type if ( tga_color_type == 1 ) { // colormapped (paletted) image if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 stbi__skip(s,4); // skip index of first colormap entry and number of entries sz = stbi__get8(s); // check bits per palette color entry if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; stbi__skip(s,4); // skip image x and y origin } else { // "normal" image w/o colormap if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE stbi__skip(s,9); // skip colormap specification and image x/y origin } if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height sz = stbi__get8(s); // bits per pixel if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; res = 1; // if we got this far, everything's good and we can return 1 instead of 0 errorEnd: stbi__rewind(s); return res; } // read 16bit value and convert to 24bit RGB static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) { stbi__uint16 px = (stbi__uint16)stbi__get16le(s); stbi__uint16 fiveBitMask = 31; // we have 3 channels with 5bits each int r = (px >> 10) & fiveBitMask; int g = (px >> 5) & fiveBitMask; int b = px & fiveBitMask; // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later out[0] = (stbi_uc)((r * 255)/31); out[1] = (stbi_uc)((g * 255)/31); out[2] = (stbi_uc)((b * 255)/31); // some people claim that the most significant bit might be used for alpha // (possibly if an alpha-bit is set in the "image descriptor byte") // but that only made 16bit test images completely translucent.. // so let's treat all 15 and 16bit TGAs as RGB with no alpha. } static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { // read in the TGA header stuff int tga_offset = stbi__get8(s); int tga_indexed = stbi__get8(s); int tga_image_type = stbi__get8(s); int tga_is_RLE = 0; int tga_palette_start = stbi__get16le(s); int tga_palette_len = stbi__get16le(s); int tga_palette_bits = stbi__get8(s); int tga_x_origin = stbi__get16le(s); int tga_y_origin = stbi__get16le(s); int tga_width = stbi__get16le(s); int tga_height = stbi__get16le(s); int tga_bits_per_pixel = stbi__get8(s); int tga_comp, tga_rgb16=0; int tga_inverted = stbi__get8(s); // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) // image data unsigned char *tga_data; unsigned char *tga_palette = NULL; int i, j; unsigned char raw_data[4] = {0}; int RLE_count = 0; int RLE_repeating = 0; int read_next_pixel = 1; STBI_NOTUSED(ri); STBI_NOTUSED(tga_x_origin); // @TODO STBI_NOTUSED(tga_y_origin); // @TODO if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); // do a tiny bit of precessing if ( tga_image_type >= 8 ) { tga_image_type -= 8; tga_is_RLE = 1; } tga_inverted = 1 - ((tga_inverted >> 5) & 1); // If I'm paletted, then I'll use the number of bits from the palette if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); // tga info *x = tga_width; *y = tga_height; if (comp) *comp = tga_comp; if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) return stbi__errpuc("too large", "Corrupt TGA"); tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); // skip to the data's starting position (offset usually = 0) stbi__skip(s, tga_offset ); if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { for (i=0; i < tga_height; ++i) { int row = tga_inverted ? tga_height -i - 1 : i; stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; stbi__getn(s, tga_row, tga_width * tga_comp); } } else { // do I need to load a palette? if ( tga_indexed) { if (tga_palette_len == 0) { /* you have to have at least one entry! */ STBI_FREE(tga_data); return stbi__errpuc("bad palette", "Corrupt TGA"); } // any data to skip? (offset usually = 0) stbi__skip(s, tga_palette_start ); // load the palette tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); if (!tga_palette) { STBI_FREE(tga_data); return stbi__errpuc("outofmem", "Out of memory"); } if (tga_rgb16) { stbi_uc *pal_entry = tga_palette; STBI_ASSERT(tga_comp == STBI_rgb); for (i=0; i < tga_palette_len; ++i) { stbi__tga_read_rgb16(s, pal_entry); pal_entry += tga_comp; } } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { STBI_FREE(tga_data); STBI_FREE(tga_palette); return stbi__errpuc("bad palette", "Corrupt TGA"); } } // load the data for (i=0; i < tga_width * tga_height; ++i) { // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? if ( tga_is_RLE ) { if ( RLE_count == 0 ) { // yep, get the next byte as a RLE command int RLE_cmd = stbi__get8(s); RLE_count = 1 + (RLE_cmd & 127); RLE_repeating = RLE_cmd >> 7; read_next_pixel = 1; } else if ( !RLE_repeating ) { read_next_pixel = 1; } } else { read_next_pixel = 1; } // OK, if I need to read a pixel, do it now if ( read_next_pixel ) { // load however much data we did have if ( tga_indexed ) { // read in index, then perform the lookup int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); if ( pal_idx >= tga_palette_len ) { // invalid index pal_idx = 0; } pal_idx *= tga_comp; for (j = 0; j < tga_comp; ++j) { raw_data[j] = tga_palette[pal_idx+j]; } } else if(tga_rgb16) { STBI_ASSERT(tga_comp == STBI_rgb); stbi__tga_read_rgb16(s, raw_data); } else { // read in the data raw for (j = 0; j < tga_comp; ++j) { raw_data[j] = stbi__get8(s); } } // clear the reading flag for the next pixel read_next_pixel = 0; } // end of reading a pixel // copy data for (j = 0; j < tga_comp; ++j) tga_data[i*tga_comp+j] = raw_data[j]; // in case we're in RLE mode, keep counting down --RLE_count; } // do I need to invert the image? if ( tga_inverted ) { for (j = 0; j*2 < tga_height; ++j) { int index1 = j * tga_width * tga_comp; int index2 = (tga_height - 1 - j) * tga_width * tga_comp; for (i = tga_width * tga_comp; i > 0; --i) { unsigned char temp = tga_data[index1]; tga_data[index1] = tga_data[index2]; tga_data[index2] = temp; ++index1; ++index2; } } } // clear my palette, if I had one if ( tga_palette != NULL ) { STBI_FREE( tga_palette ); } } // swap RGB - if the source data was RGB16, it already is in the right order if (tga_comp >= 3 && !tga_rgb16) { unsigned char* tga_pixel = tga_data; for (i=0; i < tga_width * tga_height; ++i) { unsigned char temp = tga_pixel[0]; tga_pixel[0] = tga_pixel[2]; tga_pixel[2] = temp; tga_pixel += tga_comp; } } // convert to target component count if (req_comp && req_comp != tga_comp) tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); // the things I do to get rid of an error message, and yet keep // Microsoft's C compilers happy... [8^( tga_palette_start = tga_palette_len = tga_palette_bits = tga_x_origin = tga_y_origin = 0; STBI_NOTUSED(tga_palette_start); // OK, done return tga_data; } #endif // ************************************************************************************************* // Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB #ifndef STBI_NO_PSD static int stbi__psd_test(stbi__context *s) { int r = (stbi__get32be(s) == 0x38425053); stbi__rewind(s); return r; } static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) { int count, nleft, len; count = 0; while ((nleft = pixelCount - count) > 0) { len = stbi__get8(s); if (len == 128) { // No-op. } else if (len < 128) { // Copy next len+1 bytes literally. len++; if (len > nleft) return 0; // corrupt data count += len; while (len) { *p = stbi__get8(s); p += 4; len--; } } else if (len > 128) { stbi_uc val; // Next -len+1 bytes in the dest are replicated from next source byte. // (Interpret len as a negative 8-bit int.) len = 257 - len; if (len > nleft) return 0; // corrupt data val = stbi__get8(s); count += len; while (len) { *p = val; p += 4; len--; } } } return 1; } static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) { int pixelCount; int channelCount, compression; int channel, i; int bitdepth; int w,h; stbi_uc *out; STBI_NOTUSED(ri); // Check identifier if (stbi__get32be(s) != 0x38425053) // "8BPS" return stbi__errpuc("not PSD", "Corrupt PSD image"); // Check file type version. if (stbi__get16be(s) != 1) return stbi__errpuc("wrong version", "Unsupported version of PSD image"); // Skip 6 reserved bytes. stbi__skip(s, 6 ); // Read the number of channels (R, G, B, A, etc). channelCount = stbi__get16be(s); if (channelCount < 0 || channelCount > 16) return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); // Read the rows and columns of the image. h = stbi__get32be(s); w = stbi__get32be(s); if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); // Make sure the depth is 8 bits. bitdepth = stbi__get16be(s); if (bitdepth != 8 && bitdepth != 16) return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); // Make sure the color mode is RGB. // Valid options are: // 0: Bitmap // 1: Grayscale // 2: Indexed color // 3: RGB color // 4: CMYK color // 7: Multichannel // 8: Duotone // 9: Lab color if (stbi__get16be(s) != 3) return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) stbi__skip(s,stbi__get32be(s) ); // Skip the image resources. (resolution, pen tool paths, etc) stbi__skip(s, stbi__get32be(s) ); // Skip the reserved data. stbi__skip(s, stbi__get32be(s) ); // Find out if the data is compressed. // Known values: // 0: no compression // 1: RLE compressed compression = stbi__get16be(s); if (compression > 1) return stbi__errpuc("bad compression", "PSD has an unknown compression format"); // Check size if (!stbi__mad3sizes_valid(4, w, h, 0)) return stbi__errpuc("too large", "Corrupt PSD"); // Create the destination image. if (!compression && bitdepth == 16 && bpc == 16) { out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); ri->bits_per_channel = 16; } else out = (stbi_uc *) stbi__malloc(4 * w*h); if (!out) return stbi__errpuc("outofmem", "Out of memory"); pixelCount = w*h; // Initialize the data to zero. //memset( out, 0, pixelCount * 4 ); // Finally, the image data. if (compression) { // RLE as used by .PSD and .TIFF // Loop until you get the number of unpacked bytes you are expecting: // Read the next source byte into n. // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. // Else if n is 128, noop. // Endloop // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, // which we're going to just skip. stbi__skip(s, h * channelCount * 2 ); // Read the RLE data by channel. for (channel = 0; channel < 4; channel++) { stbi_uc *p; p = out+channel; if (channel >= channelCount) { // Fill this channel with default data. for (i = 0; i < pixelCount; i++, p += 4) *p = (channel == 3 ? 255 : 0); } else { // Read the RLE data. if (!stbi__psd_decode_rle(s, p, pixelCount)) { STBI_FREE(out); return stbi__errpuc("corrupt", "bad RLE data"); } } } } else { // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. // Read the data by channel. for (channel = 0; channel < 4; channel++) { if (channel >= channelCount) { // Fill this channel with default data. if (bitdepth == 16 && bpc == 16) { stbi__uint16 *q = ((stbi__uint16 *) out) + channel; stbi__uint16 val = channel == 3 ? 65535 : 0; for (i = 0; i < pixelCount; i++, q += 4) *q = val; } else { stbi_uc *p = out+channel; stbi_uc val = channel == 3 ? 255 : 0; for (i = 0; i < pixelCount; i++, p += 4) *p = val; } } else { if (ri->bits_per_channel == 16) { // output bpc stbi__uint16 *q = ((stbi__uint16 *) out) + channel; for (i = 0; i < pixelCount; i++, q += 4) *q = (stbi__uint16) stbi__get16be(s); } else { stbi_uc *p = out+channel; if (bitdepth == 16) { // input bpc for (i = 0; i < pixelCount; i++, p += 4) *p = (stbi_uc) (stbi__get16be(s) >> 8); } else { for (i = 0; i < pixelCount; i++, p += 4) *p = stbi__get8(s); } } } } } // remove weird white matte from PSD if (channelCount >= 4) { if (ri->bits_per_channel == 16) { for (i=0; i < w*h; ++i) { stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; if (pixel[3] != 0 && pixel[3] != 65535) { float a = pixel[3] / 65535.0f; float ra = 1.0f / a; float inv_a = 65535.0f * (1 - ra); pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); } } } else { for (i=0; i < w*h; ++i) { unsigned char *pixel = out + 4*i; if (pixel[3] != 0 && pixel[3] != 255) { float a = pixel[3] / 255.0f; float ra = 1.0f / a; float inv_a = 255.0f * (1 - ra); pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); } } } } // convert to desired output format if (req_comp && req_comp != 4) { if (ri->bits_per_channel == 16) out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); else out = stbi__convert_format(out, 4, req_comp, w, h); if (out == NULL) return out; // stbi__convert_format frees input on failure } if (comp) *comp = 4; *y = h; *x = w; return out; } #endif // ************************************************************************************************* // Softimage PIC loader // by Tom Seddon // // See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format // See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ #ifndef STBI_NO_PIC static int stbi__pic_is4(stbi__context *s,const char *str) { int i; for (i=0; i<4; ++i) if (stbi__get8(s) != (stbi_uc)str[i]) return 0; return 1; } static int stbi__pic_test_core(stbi__context *s) { int i; if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) return 0; for(i=0;i<84;++i) stbi__get8(s); if (!stbi__pic_is4(s,"PICT")) return 0; return 1; } typedef struct { stbi_uc size,type,channel; } stbi__pic_packet; static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) { int mask=0x80, i; for (i=0; i<4; ++i, mask>>=1) { if (channel & mask) { if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); dest[i]=stbi__get8(s); } } return dest; } static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) { int mask=0x80,i; for (i=0;i<4; ++i, mask>>=1) if (channel&mask) dest[i]=src[i]; } static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) { int act_comp=0,num_packets=0,y,chained; stbi__pic_packet packets[10]; // this will (should...) cater for even some bizarre stuff like having data // for the same channel in multiple packets. do { stbi__pic_packet *packet; if (num_packets==sizeof(packets)/sizeof(packets[0])) return stbi__errpuc("bad format","too many packets"); packet = &packets[num_packets++]; chained = stbi__get8(s); packet->size = stbi__get8(s); packet->type = stbi__get8(s); packet->channel = stbi__get8(s); act_comp |= packet->channel; if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); } while (chained); *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? for(y=0; ytype) { default: return stbi__errpuc("bad format","packet has bad compression type"); case 0: {//uncompressed int x; for(x=0;xchannel,dest)) return 0; break; } case 1://Pure RLE { int left=width, i; while (left>0) { stbi_uc count,value[4]; count=stbi__get8(s); if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); if (count > left) count = (stbi_uc) left; if (!stbi__readval(s,packet->channel,value)) return 0; for(i=0; ichannel,dest,value); left -= count; } } break; case 2: {//Mixed RLE int left=width; while (left>0) { int count = stbi__get8(s), i; if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); if (count >= 128) { // Repeated stbi_uc value[4]; if (count==128) count = stbi__get16be(s); else count -= 127; if (count > left) return stbi__errpuc("bad file","scanline overrun"); if (!stbi__readval(s,packet->channel,value)) return 0; for(i=0;ichannel,dest,value); } else { // Raw ++count; if (count>left) return stbi__errpuc("bad file","scanline overrun"); for(i=0;ichannel,dest)) return 0; } left-=count; } break; } } } } return result; } static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) { stbi_uc *result; int i, x,y, internal_comp; STBI_NOTUSED(ri); if (!comp) comp = &internal_comp; for (i=0; i<92; ++i) stbi__get8(s); x = stbi__get16be(s); y = stbi__get16be(s); if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); stbi__get32be(s); //skip `ratio' stbi__get16be(s); //skip `fields' stbi__get16be(s); //skip `pad' // intermediate buffer is RGBA result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); if (!result) return stbi__errpuc("outofmem", "Out of memory"); memset(result, 0xff, x*y*4); if (!stbi__pic_load_core(s,x,y,comp, result)) { STBI_FREE(result); result=0; } *px = x; *py = y; if (req_comp == 0) req_comp = *comp; result=stbi__convert_format(result,4,req_comp,x,y); return result; } static int stbi__pic_test(stbi__context *s) { int r = stbi__pic_test_core(s); stbi__rewind(s); return r; } #endif // ************************************************************************************************* // GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb #ifndef STBI_NO_GIF typedef struct { stbi__int16 prefix; stbi_uc first; stbi_uc suffix; } stbi__gif_lzw; typedef struct { int w,h; stbi_uc *out; // output buffer (always 4 components) stbi_uc *background; // The current "background" as far as a gif is concerned stbi_uc *history; int flags, bgindex, ratio, transparent, eflags; stbi_uc pal[256][4]; stbi_uc lpal[256][4]; stbi__gif_lzw codes[8192]; stbi_uc *color_table; int parse, step; int lflags; int start_x, start_y; int max_x, max_y; int cur_x, cur_y; int line_size; int delay; } stbi__gif; static int stbi__gif_test_raw(stbi__context *s) { int sz; if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; sz = stbi__get8(s); if (sz != '9' && sz != '7') return 0; if (stbi__get8(s) != 'a') return 0; return 1; } static int stbi__gif_test(stbi__context *s) { int r = stbi__gif_test_raw(s); stbi__rewind(s); return r; } static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) { int i; for (i=0; i < num_entries; ++i) { pal[i][2] = stbi__get8(s); pal[i][1] = stbi__get8(s); pal[i][0] = stbi__get8(s); pal[i][3] = transp == i ? 0 : 255; } } static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) { stbi_uc version; if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return stbi__err("not GIF", "Corrupt GIF"); version = stbi__get8(s); if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); stbi__g_failure_reason = ""; g->w = stbi__get16le(s); g->h = stbi__get16le(s); g->flags = stbi__get8(s); g->bgindex = stbi__get8(s); g->ratio = stbi__get8(s); g->transparent = -1; if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments if (is_info) return 1; if (g->flags & 0x80) stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); return 1; } static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) { stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); if (!g) return stbi__err("outofmem", "Out of memory"); if (!stbi__gif_header(s, g, comp, 1)) { STBI_FREE(g); stbi__rewind( s ); return 0; } if (x) *x = g->w; if (y) *y = g->h; STBI_FREE(g); return 1; } static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) { stbi_uc *p, *c; int idx; // recurse to decode the prefixes, since the linked-list is backwards, // and working backwards through an interleaved image would be nasty if (g->codes[code].prefix >= 0) stbi__out_gif_code(g, g->codes[code].prefix); if (g->cur_y >= g->max_y) return; idx = g->cur_x + g->cur_y; p = &g->out[idx]; g->history[idx / 4] = 1; c = &g->color_table[g->codes[code].suffix * 4]; if (c[3] > 128) { // don't render transparent pixels; p[0] = c[2]; p[1] = c[1]; p[2] = c[0]; p[3] = c[3]; } g->cur_x += 4; if (g->cur_x >= g->max_x) { g->cur_x = g->start_x; g->cur_y += g->step; while (g->cur_y >= g->max_y && g->parse > 0) { g->step = (1 << g->parse) * g->line_size; g->cur_y = g->start_y + (g->step >> 1); --g->parse; } } } static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) { stbi_uc lzw_cs; stbi__int32 len, init_code; stbi__uint32 first; stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; stbi__gif_lzw *p; lzw_cs = stbi__get8(s); if (lzw_cs > 12) return NULL; clear = 1 << lzw_cs; first = 1; codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; bits = 0; valid_bits = 0; for (init_code = 0; init_code < clear; init_code++) { g->codes[init_code].prefix = -1; g->codes[init_code].first = (stbi_uc) init_code; g->codes[init_code].suffix = (stbi_uc) init_code; } // support no starting clear code avail = clear+2; oldcode = -1; len = 0; for(;;) { if (valid_bits < codesize) { if (len == 0) { len = stbi__get8(s); // start new block if (len == 0) return g->out; } --len; bits |= (stbi__int32) stbi__get8(s) << valid_bits; valid_bits += 8; } else { stbi__int32 code = bits & codemask; bits >>= codesize; valid_bits -= codesize; // @OPTIMIZE: is there some way we can accelerate the non-clear path? if (code == clear) { // clear code codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; avail = clear + 2; oldcode = -1; first = 0; } else if (code == clear + 1) { // end of stream code stbi__skip(s, len); while ((len = stbi__get8(s)) > 0) stbi__skip(s,len); return g->out; } else if (code <= avail) { if (first) { return stbi__errpuc("no clear code", "Corrupt GIF"); } if (oldcode >= 0) { p = &g->codes[avail++]; if (avail > 8192) { return stbi__errpuc("too many codes", "Corrupt GIF"); } p->prefix = (stbi__int16) oldcode; p->first = g->codes[oldcode].first; p->suffix = (code == avail) ? p->first : g->codes[code].first; } else if (code == avail) return stbi__errpuc("illegal code in raster", "Corrupt GIF"); stbi__out_gif_code(g, (stbi__uint16) code); if ((avail & codemask) == 0 && avail <= 0x0FFF) { codesize++; codemask = (1 << codesize) - 1; } oldcode = code; } else { return stbi__errpuc("illegal code in raster", "Corrupt GIF"); } } } } // this function is designed to support animated gifs, although stb_image doesn't support it // two back is the image from two frames ago, used for a very specific disposal format static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) { int dispose; int first_frame; int pi; int pcount; STBI_NOTUSED(req_comp); // on first frame, any non-written pixels get the background colour (non-transparent) first_frame = 0; if (g->out == 0) { if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) return stbi__errpuc("too large", "GIF image is too large"); pcount = g->w * g->h; g->out = (stbi_uc *) stbi__malloc(4 * pcount); g->background = (stbi_uc *) stbi__malloc(4 * pcount); g->history = (stbi_uc *) stbi__malloc(pcount); if (!g->out || !g->background || !g->history) return stbi__errpuc("outofmem", "Out of memory"); // image is treated as "transparent" at the start - ie, nothing overwrites the current background; // background colour is only used for pixels that are not rendered first frame, after that "background" // color refers to the color that was there the previous frame. memset(g->out, 0x00, 4 * pcount); memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) memset(g->history, 0x00, pcount); // pixels that were affected previous frame first_frame = 1; } else { // second frame - how do we dispose of the previous one? dispose = (g->eflags & 0x1C) >> 2; pcount = g->w * g->h; if ((dispose == 3) && (two_back == 0)) { dispose = 2; // if I don't have an image to revert back to, default to the old background } if (dispose == 3) { // use previous graphic for (pi = 0; pi < pcount; ++pi) { if (g->history[pi]) { memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); } } } else if (dispose == 2) { // restore what was changed last frame to background before that frame; for (pi = 0; pi < pcount; ++pi) { if (g->history[pi]) { memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); } } } else { // This is a non-disposal case eithe way, so just // leave the pixels as is, and they will become the new background // 1: do not dispose // 0: not specified. } // background is what out is after the undoing of the previou frame; memcpy( g->background, g->out, 4 * g->w * g->h ); } // clear my history; memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame for (;;) { int tag = stbi__get8(s); switch (tag) { case 0x2C: /* Image Descriptor */ { stbi__int32 x, y, w, h; stbi_uc *o; x = stbi__get16le(s); y = stbi__get16le(s); w = stbi__get16le(s); h = stbi__get16le(s); if (((x + w) > (g->w)) || ((y + h) > (g->h))) return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); g->line_size = g->w * 4; g->start_x = x * 4; g->start_y = y * g->line_size; g->max_x = g->start_x + w * 4; g->max_y = g->start_y + h * g->line_size; g->cur_x = g->start_x; g->cur_y = g->start_y; // if the width of the specified rectangle is 0, that means // we may not see *any* pixels or the image is malformed; // to make sure this is caught, move the current y down to // max_y (which is what out_gif_code checks). if (w == 0) g->cur_y = g->max_y; g->lflags = stbi__get8(s); if (g->lflags & 0x40) { g->step = 8 * g->line_size; // first interlaced spacing g->parse = 3; } else { g->step = g->line_size; g->parse = 0; } if (g->lflags & 0x80) { stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); g->color_table = (stbi_uc *) g->lpal; } else if (g->flags & 0x80) { g->color_table = (stbi_uc *) g->pal; } else return stbi__errpuc("missing color table", "Corrupt GIF"); o = stbi__process_gif_raster(s, g); if (!o) return NULL; // if this was the first frame, pcount = g->w * g->h; if (first_frame && (g->bgindex > 0)) { // if first frame, any pixel not drawn to gets the background color for (pi = 0; pi < pcount; ++pi) { if (g->history[pi] == 0) { g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); } } } return o; } case 0x21: // Comment Extension. { int len; int ext = stbi__get8(s); if (ext == 0xF9) { // Graphic Control Extension. len = stbi__get8(s); if (len == 4) { g->eflags = stbi__get8(s); g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. // unset old transparent if (g->transparent >= 0) { g->pal[g->transparent][3] = 255; } if (g->eflags & 0x01) { g->transparent = stbi__get8(s); if (g->transparent >= 0) { g->pal[g->transparent][3] = 0; } } else { // don't need transparent stbi__skip(s, 1); g->transparent = -1; } } else { stbi__skip(s, len); break; } } while ((len = stbi__get8(s)) != 0) { stbi__skip(s, len); } break; } case 0x3B: // gif stream termination code return (stbi_uc *) s; // using '1' causes warning on some compilers default: return stbi__errpuc("unknown code", "Corrupt GIF"); } } } static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) { STBI_FREE(g->out); STBI_FREE(g->history); STBI_FREE(g->background); if (out) STBI_FREE(out); if (delays && *delays) STBI_FREE(*delays); return stbi__errpuc("outofmem", "Out of memory"); } static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) { if (stbi__gif_test(s)) { int layers = 0; stbi_uc *u = 0; stbi_uc *out = 0; stbi_uc *two_back = 0; stbi__gif g; int stride; int out_size = 0; int delays_size = 0; STBI_NOTUSED(out_size); STBI_NOTUSED(delays_size); memset(&g, 0, sizeof(g)); if (delays) { *delays = 0; } do { u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); if (u == (stbi_uc *) s) u = 0; // end of animated gif marker if (u) { *x = g.w; *y = g.h; ++layers; stride = g.w * g.h * 4; if (out) { void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); if (!tmp) return stbi__load_gif_main_outofmem(&g, out, delays); else { out = (stbi_uc*) tmp; out_size = layers * stride; } if (delays) { int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); if (!new_delays) return stbi__load_gif_main_outofmem(&g, out, delays); *delays = new_delays; delays_size = layers * sizeof(int); } } else { out = (stbi_uc*)stbi__malloc( layers * stride ); if (!out) return stbi__load_gif_main_outofmem(&g, out, delays); out_size = layers * stride; if (delays) { *delays = (int*) stbi__malloc( layers * sizeof(int) ); if (!*delays) return stbi__load_gif_main_outofmem(&g, out, delays); delays_size = layers * sizeof(int); } } memcpy( out + ((layers - 1) * stride), u, stride ); if (layers >= 2) { two_back = out - 2 * stride; } if (delays) { (*delays)[layers - 1U] = g.delay; } } } while (u != 0); // free temp buffer; STBI_FREE(g.out); STBI_FREE(g.history); STBI_FREE(g.background); // do the final conversion after loading everything; if (req_comp && req_comp != 4) out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); *z = layers; return out; } else { return stbi__errpuc("not GIF", "Image was not as a gif type."); } } static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *u = 0; stbi__gif g; memset(&g, 0, sizeof(g)); STBI_NOTUSED(ri); u = stbi__gif_load_next(s, &g, comp, req_comp, 0); if (u == (stbi_uc *) s) u = 0; // end of animated gif marker if (u) { *x = g.w; *y = g.h; // moved conversion to after successful load so that the same // can be done for multiple frames. if (req_comp && req_comp != 4) u = stbi__convert_format(u, 4, req_comp, g.w, g.h); } else if (g.out) { // if there was an error and we allocated an image buffer, free it! STBI_FREE(g.out); } // free buffers needed for multiple frame loading; STBI_FREE(g.history); STBI_FREE(g.background); return u; } static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) { return stbi__gif_info_raw(s,x,y,comp); } #endif // ************************************************************************************************* // Radiance RGBE HDR loader // originally by Nicolas Schulz #ifndef STBI_NO_HDR static int stbi__hdr_test_core(stbi__context *s, const char *signature) { int i; for (i=0; signature[i]; ++i) if (stbi__get8(s) != signature[i]) return 0; stbi__rewind(s); return 1; } static int stbi__hdr_test(stbi__context* s) { int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); stbi__rewind(s); if(!r) { r = stbi__hdr_test_core(s, "#?RGBE\n"); stbi__rewind(s); } return r; } #define STBI__HDR_BUFLEN 1024 static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) { int len=0; char c = '\0'; c = (char) stbi__get8(z); while (!stbi__at_eof(z) && c != '\n') { buffer[len++] = c; if (len == STBI__HDR_BUFLEN-1) { // flush to end of line while (!stbi__at_eof(z) && stbi__get8(z) != '\n') ; break; } c = (char) stbi__get8(z); } buffer[len] = 0; return buffer; } static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) { if ( input[3] != 0 ) { float f1; // Exponent f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); if (req_comp <= 2) output[0] = (input[0] + input[1] + input[2]) * f1 / 3; else { output[0] = input[0] * f1; output[1] = input[1] * f1; output[2] = input[2] * f1; } if (req_comp == 2) output[1] = 1; if (req_comp == 4) output[3] = 1; } else { switch (req_comp) { case 4: output[3] = 1; /* fallthrough */ case 3: output[0] = output[1] = output[2] = 0; break; case 2: output[1] = 1; /* fallthrough */ case 1: output[0] = 0; break; } } } static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { char buffer[STBI__HDR_BUFLEN]; char *token; int valid = 0; int width, height; stbi_uc *scanline; float *hdr_data; int len; unsigned char count, value; int i, j, k, c1,c2, z; const char *headerToken; STBI_NOTUSED(ri); // Check identifier headerToken = stbi__hdr_gettoken(s,buffer); if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) return stbi__errpf("not HDR", "Corrupt HDR image"); // Parse header for(;;) { token = stbi__hdr_gettoken(s,buffer); if (token[0] == 0) break; if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; } if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); // Parse width and height // can't use sscanf() if we're not using stdio! token = stbi__hdr_gettoken(s,buffer); if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); token += 3; height = (int) strtol(token, &token, 10); while (*token == ' ') ++token; if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); token += 3; width = (int) strtol(token, NULL, 10); if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); *x = width; *y = height; if (comp) *comp = 3; if (req_comp == 0) req_comp = 3; if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) return stbi__errpf("too large", "HDR image is too large"); // Read data hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); if (!hdr_data) return stbi__errpf("outofmem", "Out of memory"); // Load image data // image data is stored as some number of sca if ( width < 8 || width >= 32768) { // Read flat data for (j=0; j < height; ++j) { for (i=0; i < width; ++i) { stbi_uc rgbe[4]; main_decode_loop: stbi__getn(s, rgbe, 4); stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); } } } else { // Read RLE-encoded data scanline = NULL; for (j = 0; j < height; ++j) { c1 = stbi__get8(s); c2 = stbi__get8(s); len = stbi__get8(s); if (c1 != 2 || c2 != 2 || (len & 0x80)) { // not run-length encoded, so we have to actually use THIS data as a decoded // pixel (note this can't be a valid pixel--one of RGB must be >= 128) stbi_uc rgbe[4]; rgbe[0] = (stbi_uc) c1; rgbe[1] = (stbi_uc) c2; rgbe[2] = (stbi_uc) len; rgbe[3] = (stbi_uc) stbi__get8(s); stbi__hdr_convert(hdr_data, rgbe, req_comp); i = 1; j = 0; STBI_FREE(scanline); goto main_decode_loop; // yes, this makes no sense } len <<= 8; len |= stbi__get8(s); if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } if (scanline == NULL) { scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); if (!scanline) { STBI_FREE(hdr_data); return stbi__errpf("outofmem", "Out of memory"); } } for (k = 0; k < 4; ++k) { int nleft; i = 0; while ((nleft = width - i) > 0) { count = stbi__get8(s); if (count > 128) { // Run value = stbi__get8(s); count -= 128; if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = value; } else { // Dump if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = stbi__get8(s); } } } for (i=0; i < width; ++i) stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); } if (scanline) STBI_FREE(scanline); } return hdr_data; } static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) { char buffer[STBI__HDR_BUFLEN]; char *token; int valid = 0; int dummy; if (!x) x = &dummy; if (!y) y = &dummy; if (!comp) comp = &dummy; if (stbi__hdr_test(s) == 0) { stbi__rewind( s ); return 0; } for(;;) { token = stbi__hdr_gettoken(s,buffer); if (token[0] == 0) break; if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; } if (!valid) { stbi__rewind( s ); return 0; } token = stbi__hdr_gettoken(s,buffer); if (strncmp(token, "-Y ", 3)) { stbi__rewind( s ); return 0; } token += 3; *y = (int) strtol(token, &token, 10); while (*token == ' ') ++token; if (strncmp(token, "+X ", 3)) { stbi__rewind( s ); return 0; } token += 3; *x = (int) strtol(token, NULL, 10); *comp = 3; return 1; } #endif // STBI_NO_HDR #ifndef STBI_NO_BMP static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) { void *p; stbi__bmp_data info; info.all_a = 255; p = stbi__bmp_parse_header(s, &info); if (p == NULL) { stbi__rewind( s ); return 0; } if (x) *x = s->img_x; if (y) *y = s->img_y; if (comp) { if (info.bpp == 24 && info.ma == 0xff000000) *comp = 3; else *comp = info.ma ? 4 : 3; } return 1; } #endif #ifndef STBI_NO_PSD static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) { int channelCount, dummy, depth; if (!x) x = &dummy; if (!y) y = &dummy; if (!comp) comp = &dummy; if (stbi__get32be(s) != 0x38425053) { stbi__rewind( s ); return 0; } if (stbi__get16be(s) != 1) { stbi__rewind( s ); return 0; } stbi__skip(s, 6); channelCount = stbi__get16be(s); if (channelCount < 0 || channelCount > 16) { stbi__rewind( s ); return 0; } *y = stbi__get32be(s); *x = stbi__get32be(s); depth = stbi__get16be(s); if (depth != 8 && depth != 16) { stbi__rewind( s ); return 0; } if (stbi__get16be(s) != 3) { stbi__rewind( s ); return 0; } *comp = 4; return 1; } static int stbi__psd_is16(stbi__context *s) { int channelCount, depth; if (stbi__get32be(s) != 0x38425053) { stbi__rewind( s ); return 0; } if (stbi__get16be(s) != 1) { stbi__rewind( s ); return 0; } stbi__skip(s, 6); channelCount = stbi__get16be(s); if (channelCount < 0 || channelCount > 16) { stbi__rewind( s ); return 0; } STBI_NOTUSED(stbi__get32be(s)); STBI_NOTUSED(stbi__get32be(s)); depth = stbi__get16be(s); if (depth != 16) { stbi__rewind( s ); return 0; } return 1; } #endif #ifndef STBI_NO_PIC static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) { int act_comp=0,num_packets=0,chained,dummy; stbi__pic_packet packets[10]; if (!x) x = &dummy; if (!y) y = &dummy; if (!comp) comp = &dummy; if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { stbi__rewind(s); return 0; } stbi__skip(s, 88); *x = stbi__get16be(s); *y = stbi__get16be(s); if (stbi__at_eof(s)) { stbi__rewind( s); return 0; } if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { stbi__rewind( s ); return 0; } stbi__skip(s, 8); do { stbi__pic_packet *packet; if (num_packets==sizeof(packets)/sizeof(packets[0])) return 0; packet = &packets[num_packets++]; chained = stbi__get8(s); packet->size = stbi__get8(s); packet->type = stbi__get8(s); packet->channel = stbi__get8(s); act_comp |= packet->channel; if (stbi__at_eof(s)) { stbi__rewind( s ); return 0; } if (packet->size != 8) { stbi__rewind( s ); return 0; } } while (chained); *comp = (act_comp & 0x10 ? 4 : 3); return 1; } #endif // ************************************************************************************************* // Portable Gray Map and Portable Pixel Map loader // by Ken Miller // // PGM: http://netpbm.sourceforge.net/doc/pgm.html // PPM: http://netpbm.sourceforge.net/doc/ppm.html // // Known limitations: // Does not support comments in the header section // Does not support ASCII image data (formats P2 and P3) #ifndef STBI_NO_PNM static int stbi__pnm_test(stbi__context *s) { char p, t; p = (char) stbi__get8(s); t = (char) stbi__get8(s); if (p != 'P' || (t != '5' && t != '6')) { stbi__rewind( s ); return 0; } return 1; } static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *out; STBI_NOTUSED(ri); ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); if (ri->bits_per_channel == 0) return 0; if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); *x = s->img_x; *y = s->img_y; if (comp) *comp = s->img_n; if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) return stbi__errpuc("too large", "PNM too large"); out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { STBI_FREE(out); return stbi__errpuc("bad PNM", "PNM file truncated"); } if (req_comp && req_comp != s->img_n) { if (ri->bits_per_channel == 16) { out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); } else { out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); } if (out == NULL) return out; // stbi__convert_format frees input on failure } return out; } static int stbi__pnm_isspace(char c) { return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; } static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) { for (;;) { while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) *c = (char) stbi__get8(s); if (stbi__at_eof(s) || *c != '#') break; while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) *c = (char) stbi__get8(s); } } static int stbi__pnm_isdigit(char c) { return c >= '0' && c <= '9'; } static int stbi__pnm_getinteger(stbi__context *s, char *c) { int value = 0; while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { value = value*10 + (*c - '0'); *c = (char) stbi__get8(s); if((value > 214748364) || (value == 214748364 && *c > '7')) return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); } return value; } static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) { int maxv, dummy; char c, p, t; if (!x) x = &dummy; if (!y) y = &dummy; if (!comp) comp = &dummy; stbi__rewind(s); // Get identifier p = (char) stbi__get8(s); t = (char) stbi__get8(s); if (p != 'P' || (t != '5' && t != '6')) { stbi__rewind(s); return 0; } *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm c = (char) stbi__get8(s); stbi__pnm_skip_whitespace(s, &c); *x = stbi__pnm_getinteger(s, &c); // read width if(*x == 0) return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); *y = stbi__pnm_getinteger(s, &c); // read height if (*y == 0) return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); maxv = stbi__pnm_getinteger(s, &c); // read max value if (maxv > 65535) return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); else if (maxv > 255) return 16; else return 8; } static int stbi__pnm_is16(stbi__context *s) { if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) return 1; return 0; } #endif static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) { #ifndef STBI_NO_JPEG if (stbi__jpeg_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_PNG if (stbi__png_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_GIF if (stbi__gif_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_BMP if (stbi__bmp_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_PSD if (stbi__psd_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_PIC if (stbi__pic_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_PNM if (stbi__pnm_info(s, x, y, comp)) return 1; #endif #ifndef STBI_NO_HDR if (stbi__hdr_info(s, x, y, comp)) return 1; #endif // test tga last because it's a crappy test! #ifndef STBI_NO_TGA if (stbi__tga_info(s, x, y, comp)) return 1; #endif return stbi__err("unknown image type", "Image not of any known type, or corrupt"); } static int stbi__is_16_main(stbi__context *s) { #ifndef STBI_NO_PNG if (stbi__png_is16(s)) return 1; #endif #ifndef STBI_NO_PSD if (stbi__psd_is16(s)) return 1; #endif #ifndef STBI_NO_PNM if (stbi__pnm_is16(s)) return 1; #endif return 0; } #ifndef STBI_NO_STDIO STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) { FILE *f = stbi__fopen(filename, "rb"); int result; if (!f) return stbi__err("can't fopen", "Unable to open file"); result = stbi_info_from_file(f, x, y, comp); fclose(f); return result; } STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) { int r; stbi__context s; long pos = ftell(f); stbi__start_file(&s, f); r = stbi__info_main(&s,x,y,comp); fseek(f,pos,SEEK_SET); return r; } STBIDEF int stbi_is_16_bit(char const *filename) { FILE *f = stbi__fopen(filename, "rb"); int result; if (!f) return stbi__err("can't fopen", "Unable to open file"); result = stbi_is_16_bit_from_file(f); fclose(f); return result; } STBIDEF int stbi_is_16_bit_from_file(FILE *f) { int r; stbi__context s; long pos = ftell(f); stbi__start_file(&s, f); r = stbi__is_16_main(&s); fseek(f,pos,SEEK_SET); return r; } #endif // !STBI_NO_STDIO STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) { stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__info_main(&s,x,y,comp); } STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); return stbi__info_main(&s,x,y,comp); } STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) { stbi__context s; stbi__start_mem(&s,buffer,len); return stbi__is_16_main(&s); } STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); return stbi__is_16_main(&s); } #endif // STB_IMAGE_IMPLEMENTATION /* revision history: 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs 2.19 (2018-02-11) fix warning 2.18 (2018-01-30) fix warnings 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug 1-bit BMP *_is_16_bit api avoid warnings 2.16 (2017-07-23) all functions have 16-bit variants; STBI_NO_STDIO works again; compilation fixes; fix rounding in unpremultiply; optimize vertical flip; disable raw_len validation; documentation fixes 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; warning fixes; disable run-time SSE detection on gcc; uniform handling of optional "return" values; thread-safe initialization of zlib tables 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 2.11 (2016-04-02) allocate large structures on the stack remove white matting for transparent PSD fix reported channel count for PNG & BMP re-enable SSE2 in non-gcc 64-bit support RGB-formatted JPEG read 16-bit PNGs (only as 8-bit) 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED 2.09 (2016-01-16) allow comments in PNM files 16-bit-per-pixel TGA (not bit-per-component) info() for TGA could break due to .hdr handling info() for BMP to shares code instead of sloppy parse can use STBI_REALLOC_SIZED if allocator doesn't support realloc code cleanup 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA 2.07 (2015-09-13) fix compiler warnings partial animated GIF support limited 16-bpc PSD support #ifdef unused functions bug with < 92 byte PIC,PNM,HDR,TGA 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit 2.03 (2015-04-12) extra corruption checking (mmozeiko) stbi_set_flip_vertically_on_load (nguillemot) fix NEON support; fix mingw support 2.02 (2015-01-19) fix incorrect assert, fix warning 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) progressive JPEG (stb) PGM/PPM support (Ken Miller) STBI_MALLOC,STBI_REALLOC,STBI_FREE GIF bugfix -- seemingly never worked STBI_NO_*, STBI_ONLY_* 1.48 (2014-12-14) fix incorrectly-named assert() 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) optimize PNG (ryg) fix bug in interlaced PNG with user-specified channel count (stb) 1.46 (2014-08-26) fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG 1.45 (2014-08-16) fix MSVC-ARM internal compiler error by wrapping malloc 1.44 (2014-08-07) various warning fixes from Ronny Chevalier 1.43 (2014-07-15) fix MSVC-only compiler problem in code changed in 1.42 1.42 (2014-07-09) don't define _CRT_SECURE_NO_WARNINGS (affects user code) fixes to stbi__cleanup_jpeg path added STBI_ASSERT to avoid requiring assert.h 1.41 (2014-06-25) fix search&replace from 1.36 that messed up comments/error messages 1.40 (2014-06-22) fix gcc struct-initialization warning 1.39 (2014-06-15) fix to TGA optimization when req_comp != number of components in TGA; fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) add support for BMP version 5 (more ignored fields) 1.38 (2014-06-06) suppress MSVC warnings on integer casts truncating values fix accidental rename of 'skip' field of I/O 1.37 (2014-06-04) remove duplicate typedef 1.36 (2014-06-03) convert to header file single-file library if de-iphone isn't set, load iphone images color-swapped instead of returning NULL 1.35 (2014-05-27) various warnings fix broken STBI_SIMD path fix bug where stbi_load_from_file no longer left file pointer in correct place fix broken non-easy path for 32-bit BMP (possibly never used) TGA optimization by Arseny Kapoulkine 1.34 (unknown) use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case 1.33 (2011-07-14) make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements 1.32 (2011-07-13) support for "info" function for all supported filetypes (SpartanJ) 1.31 (2011-06-20) a few more leak fixes, bug in PNG handling (SpartanJ) 1.30 (2011-06-11) added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) removed deprecated format-specific test/load functions removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) fix inefficiency in decoding 32-bit BMP (David Woo) 1.29 (2010-08-16) various warning fixes from Aurelien Pocheville 1.28 (2010-08-01) fix bug in GIF palette transparency (SpartanJ) 1.27 (2010-08-01) cast-to-stbi_uc to fix warnings 1.26 (2010-07-24) fix bug in file buffering for PNG reported by SpartanJ 1.25 (2010-07-17) refix trans_data warning (Won Chun) 1.24 (2010-07-12) perf improvements reading from files on platforms with lock-heavy fgetc() minor perf improvements for jpeg deprecated type-specific functions so we'll get feedback if they're needed attempt to fix trans_data warning (Won Chun) 1.23 fixed bug in iPhone support 1.22 (2010-07-10) removed image *writing* support stbi_info support from Jetro Lauha GIF support from Jean-Marc Lienher iPhone PNG-extensions from James Brown warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) 1.21 fix use of 'stbi_uc' in header (reported by jon blow) 1.20 added support for Softimage PIC, by Tom Seddon 1.19 bug in interlaced PNG corruption check (found by ryg) 1.18 (2008-08-02) fix a threading bug (local mutable static) 1.17 support interlaced PNG 1.16 major bugfix - stbi__convert_format converted one too many pixels 1.15 initialize some fields for thread safety 1.14 fix threadsafe conversion bug header-file-only version (#define STBI_HEADER_FILE_ONLY before including) 1.13 threadsafe 1.12 const qualifiers in the API 1.11 Support installable IDCT, colorspace conversion routines 1.10 Fixes for 64-bit (don't use "unsigned long") optimized upsampling by Fabian "ryg" Giesen 1.09 Fix format-conversion for PSD code (bad global variables!) 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz 1.07 attempt to fix C++ warning/errors again 1.06 attempt to fix C++ warning/errors again 1.05 fix TGA loading to return correct *comp and use good luminance calc 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR 1.02 support for (subset of) HDR files, float interface for preferred access to them 1.01 fix bug: possible bug in handling right-side up bmps... not sure fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all 1.00 interface to zlib that skips zlib header 0.99 correct handling of alpha in palette 0.98 TGA loader by lonesock; dynamically add loaders (untested) 0.97 jpeg errors on too large a file; also catch another malloc failure 0.96 fix detection of invalid v value - particleman@mollyrocket forum 0.95 during header scan, seek to markers in case of padding 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same 0.93 handle jpegtran output; verbose errors 0.92 read 4,8,16,24,32-bit BMP files of several formats 0.91 output 24-bit Windows 3.0 BMP files 0.90 fix a few more warnings; bump version number to approach 1.0 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd 0.60 fix compiling as c++ 0.59 fix warnings: merge Dave Moore's -Wall fixes 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available 0.56 fix bug: zlib uncompressed mode len vs. nlen 0.55 fix bug: restart_interval not initialized to 0 0.54 allow NULL for 'int *comp' 0.53 fix bug in png 3->4; speedup png decoding 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments 0.51 obey req_comp requests, 1-component jpegs return as 1-component, on 'test' only check type, not whether we support this variant 0.50 (2006-11-19) first released version */ /* ------------------------------------------------------------------------------ This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License Copyright (c) 2017 Sean Barrett Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ ALTERNATIVE B - Public Domain (www.unlicense.org) This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ */ ggml-org-ggml-3678254/examples/stb_image_write.h000066400000000000000000002130651512524704700215260ustar00rootroot00000000000000/* stb_image_write - v1.16 - public domain - http://nothings.org/stb writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015 no warranty implied; use at your own risk Before #including, #define STB_IMAGE_WRITE_IMPLEMENTATION in the file that you want to have the implementation. Will probably not work correctly with strict-aliasing optimizations. ABOUT: This header file is a library for writing images to C stdio or a callback. The PNG output is not optimal; it is 20-50% larger than the file written by a decent optimizing implementation; though providing a custom zlib compress function (see STBIW_ZLIB_COMPRESS) can mitigate that. This library is designed for source code compactness and simplicity, not optimal image file size or run-time performance. BUILDING: You can #define STBIW_ASSERT(x) before the #include to avoid using assert.h. You can #define STBIW_MALLOC(), STBIW_REALLOC(), and STBIW_FREE() to replace malloc,realloc,free. You can #define STBIW_MEMMOVE() to replace memmove() You can #define STBIW_ZLIB_COMPRESS to use a custom zlib-style compress function for PNG compression (instead of the builtin one), it must have the following signature: unsigned char * my_compress(unsigned char *data, int data_len, int *out_len, int quality); The returned data will be freed with STBIW_FREE() (free() by default), so it must be heap allocated with STBIW_MALLOC() (malloc() by default), UNICODE: If compiling for Windows and you wish to use Unicode filenames, compile with #define STBIW_WINDOWS_UTF8 and pass utf8-encoded filenames. Call stbiw_convert_wchar_to_utf8 to convert Windows wchar_t filenames to utf8. USAGE: There are five functions, one for each image file format: int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); int stbi_write_jpg(char const *filename, int w, int h, int comp, const void *data, int quality); int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); void stbi_flip_vertically_on_write(int flag); // flag is non-zero to flip data vertically There are also five equivalent functions that use an arbitrary write function. You are expected to open/close your file-equivalent before and after calling these: int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); where the callback is: void stbi_write_func(void *context, void *data, int size); You can configure it with these global variables: int stbi_write_tga_with_rle; // defaults to true; set to 0 to disable RLE int stbi_write_png_compression_level; // defaults to 8; set to higher for more compression int stbi_write_force_png_filter; // defaults to -1; set to 0..5 to force a filter mode You can define STBI_WRITE_NO_STDIO to disable the file variant of these functions, so the library will not use stdio.h at all. However, this will also disable HDR writing, because it requires stdio for formatted output. Each function returns 0 on failure and non-0 on success. The functions create an image file defined by the parameters. The image is a rectangle of pixels stored from left-to-right, top-to-bottom. Each pixel contains 'comp' channels of data stored interleaved with 8-bits per channel, in the following order: 1=Y, 2=YA, 3=RGB, 4=RGBA. (Y is monochrome color.) The rectangle is 'w' pixels wide and 'h' pixels tall. The *data pointer points to the first byte of the top-left-most pixel. For PNG, "stride_in_bytes" is the distance in bytes from the first byte of a row of pixels to the first byte of the next row of pixels. PNG creates output files with the same number of components as the input. The BMP format expands Y to RGB in the file format and does not output alpha. PNG supports writing rectangles of data even when the bytes storing rows of data are not consecutive in memory (e.g. sub-rectangles of a larger image), by supplying the stride between the beginning of adjacent rows. The other formats do not. (Thus you cannot write a native-format BMP through the BMP writer, both because it is in BGR order and because it may have padding at the end of the line.) PNG allows you to set the deflate compression level by setting the global variable 'stbi_write_png_compression_level' (it defaults to 8). HDR expects linear float data. Since the format is always 32-bit rgb(e) data, alpha (if provided) is discarded, and for monochrome data it is replicated across all three channels. TGA supports RLE or non-RLE compressed data. To use non-RLE-compressed data, set the global variable 'stbi_write_tga_with_rle' to 0. JPEG does ignore alpha channels in input data; quality is between 1 and 100. Higher quality looks better but results in a bigger image. JPEG baseline (no JPEG progressive). CREDITS: Sean Barrett - PNG/BMP/TGA Baldur Karlsson - HDR Jean-Sebastien Guay - TGA monochrome Tim Kelsey - misc enhancements Alan Hickman - TGA RLE Emmanuel Julien - initial file IO callback implementation Jon Olick - original jo_jpeg.cpp code Daniel Gibson - integrate JPEG, allow external zlib Aarni Koskela - allow choosing PNG filter bugfixes: github:Chribba Guillaume Chereau github:jry2 github:romigrou Sergio Gonzalez Jonas Karlsson Filip Wasil Thatcher Ulrich github:poppolopoppo Patrick Boettcher github:xeekworx Cap Petschulat Simon Rodriguez Ivan Tikhonov github:ignotion Adam Schackart Andrew Kensler LICENSE See end of file for license information. */ #ifndef INCLUDE_STB_IMAGE_WRITE_H #define INCLUDE_STB_IMAGE_WRITE_H #include // if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline' #ifndef STBIWDEF #ifdef STB_IMAGE_WRITE_STATIC #define STBIWDEF static #else #ifdef __cplusplus #define STBIWDEF extern "C" #else #define STBIWDEF extern #endif #endif #endif #ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations STBIWDEF int stbi_write_tga_with_rle; STBIWDEF int stbi_write_png_compression_level; STBIWDEF int stbi_write_force_png_filter; #endif #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); #ifdef STBIW_WINDOWS_UTF8 STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); #endif #endif typedef void stbi_write_func(void *context, void *data, int size); STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); #endif//INCLUDE_STB_IMAGE_WRITE_H #ifdef STB_IMAGE_WRITE_IMPLEMENTATION #ifdef _WIN32 #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifndef _CRT_NONSTDC_NO_DEPRECATE #define _CRT_NONSTDC_NO_DEPRECATE #endif #endif #ifndef STBI_WRITE_NO_STDIO #include #endif // STBI_WRITE_NO_STDIO #include #include #include #include #if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED)) // ok #elif !defined(STBIW_MALLOC) && !defined(STBIW_FREE) && !defined(STBIW_REALLOC) && !defined(STBIW_REALLOC_SIZED) // ok #else #error "Must define all or none of STBIW_MALLOC, STBIW_FREE, and STBIW_REALLOC (or STBIW_REALLOC_SIZED)." #endif #ifndef STBIW_MALLOC #define STBIW_MALLOC(sz) malloc(sz) #define STBIW_REALLOC(p,newsz) realloc(p,newsz) #define STBIW_FREE(p) free(p) #endif #ifndef STBIW_REALLOC_SIZED #define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz) #endif #ifndef STBIW_MEMMOVE #define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz) #endif #ifndef STBIW_ASSERT #include #define STBIW_ASSERT(x) assert(x) #endif #define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff) #ifdef STB_IMAGE_WRITE_STATIC static int stbi_write_png_compression_level = 8; static int stbi_write_tga_with_rle = 1; static int stbi_write_force_png_filter = -1; #else int stbi_write_png_compression_level = 8; int stbi_write_tga_with_rle = 1; int stbi_write_force_png_filter = -1; #endif static int stbi__flip_vertically_on_write = 0; STBIWDEF void stbi_flip_vertically_on_write(int flag) { stbi__flip_vertically_on_write = flag; } typedef struct { stbi_write_func *func; void *context; unsigned char buffer[64]; int buf_used; } stbi__write_context; // initialize a callback-based context static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context) { s->func = c; s->context = context; } #ifndef STBI_WRITE_NO_STDIO static void stbi__stdio_write(void *context, void *data, int size) { fwrite(data,1,size,(FILE*) context); } #if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) #ifdef __cplusplus #define STBIW_EXTERN extern "C" #else #define STBIW_EXTERN extern #endif STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) { return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); } #endif static FILE *stbiw__fopen(char const *filename, char const *mode) { FILE *f; #if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) wchar_t wMode[64]; wchar_t wFilename[1024]; if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) return 0; if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) return 0; #if defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != _wfopen_s(&f, wFilename, wMode)) f = 0; #else f = _wfopen(wFilename, wMode); #endif #elif defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != fopen_s(&f, filename, mode)) f=0; #else f = fopen(filename, mode); #endif return f; } static int stbi__start_write_file(stbi__write_context *s, const char *filename) { FILE *f = stbiw__fopen(filename, "wb"); stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f); return f != NULL; } static void stbi__end_write_file(stbi__write_context *s) { fclose((FILE *)s->context); } #endif // !STBI_WRITE_NO_STDIO typedef unsigned int stbiw_uint32; typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1]; static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v) { while (*fmt) { switch (*fmt++) { case ' ': break; case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); s->func(s->context,&x,1); break; } case '2': { int x = va_arg(v,int); unsigned char b[2]; b[0] = STBIW_UCHAR(x); b[1] = STBIW_UCHAR(x>>8); s->func(s->context,b,2); break; } case '4': { stbiw_uint32 x = va_arg(v,int); unsigned char b[4]; b[0]=STBIW_UCHAR(x); b[1]=STBIW_UCHAR(x>>8); b[2]=STBIW_UCHAR(x>>16); b[3]=STBIW_UCHAR(x>>24); s->func(s->context,b,4); break; } default: STBIW_ASSERT(0); return; } } } static void stbiw__writef(stbi__write_context *s, const char *fmt, ...) { va_list v; va_start(v, fmt); stbiw__writefv(s, fmt, v); va_end(v); } static void stbiw__write_flush(stbi__write_context *s) { if (s->buf_used) { s->func(s->context, &s->buffer, s->buf_used); s->buf_used = 0; } } static void stbiw__putc(stbi__write_context *s, unsigned char c) { s->func(s->context, &c, 1); } static void stbiw__write1(stbi__write_context *s, unsigned char a) { if ((size_t)s->buf_used + 1 > sizeof(s->buffer)) stbiw__write_flush(s); s->buffer[s->buf_used++] = a; } static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c) { int n; if ((size_t)s->buf_used + 3 > sizeof(s->buffer)) stbiw__write_flush(s); n = s->buf_used; s->buf_used = n+3; s->buffer[n+0] = a; s->buffer[n+1] = b; s->buffer[n+2] = c; } static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d) { unsigned char bg[3] = { 255, 0, 255}, px[3]; int k; if (write_alpha < 0) stbiw__write1(s, d[comp - 1]); switch (comp) { case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case case 1: if (expand_mono) stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp else stbiw__write1(s, d[0]); // monochrome TGA break; case 4: if (!write_alpha) { // composite against pink background for (k = 0; k < 3; ++k) px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); break; } /* FALLTHROUGH */ case 3: stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); break; } if (write_alpha > 0) stbiw__write1(s, d[comp - 1]); } static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono) { stbiw_uint32 zero = 0; int i,j, j_end; if (y <= 0) return; if (stbi__flip_vertically_on_write) vdir *= -1; if (vdir < 0) { j_end = -1; j = y-1; } else { j_end = y; j = 0; } for (; j != j_end; j += vdir) { for (i=0; i < x; ++i) { unsigned char *d = (unsigned char *) data + (j*x+i)*comp; stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); } stbiw__write_flush(s); s->func(s->context, &zero, scanline_pad); } } static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...) { if (y < 0 || x < 0) { return 0; } else { va_list v; va_start(v, fmt); stbiw__writefv(s, fmt, v); va_end(v); stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono); return 1; } } static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data) { if (comp != 4) { // write RGB bitmap int pad = (-x*3) & 3; return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad, "11 4 22 4" "4 44 22 444444", 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header } else { // RGBA bitmaps need a v4 header // use BI_BITFIELDS mode with 32bpp and alpha mask // (straight BI_RGB with alpha mask doesn't work in most readers) return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *)data,1,0, "11 4 22 4" "4 44 22 444444 4444 4 444 444 444 444", 'B', 'M', 14+108+x*y*4, 0, 0, 14+108, // file header 108, x,y, 1,32, 3,0,0,0,0,0, 0xff0000,0xff00,0xff,0xff000000u, 0, 0,0,0, 0,0,0, 0,0,0, 0,0,0); // bitmap V4 header } } STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { stbi__write_context s = { 0 }; stbi__start_write_callbacks(&s, func, context); return stbi_write_bmp_core(&s, x, y, comp, data); } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data) { stbi__write_context s = { 0 }; if (stbi__start_write_file(&s,filename)) { int r = stbi_write_bmp_core(&s, x, y, comp, data); stbi__end_write_file(&s); return r; } else return 0; } #endif //!STBI_WRITE_NO_STDIO static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data) { int has_alpha = (comp == 2 || comp == 4); int colorbytes = has_alpha ? comp-1 : comp; int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 if (y < 0 || x < 0) return 0; if (!stbi_write_tga_with_rle) { return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0, "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); } else { int i,j,k; int jend, jdir; stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8); if (stbi__flip_vertically_on_write) { j = 0; jend = y; jdir = 1; } else { j = y-1; jend = -1; jdir = -1; } for (; j != jend; j += jdir) { unsigned char *row = (unsigned char *) data + j * x * comp; int len; for (i = 0; i < x; i += len) { unsigned char *begin = row + i * comp; int diff = 1; len = 1; if (i < x - 1) { ++len; diff = memcmp(begin, row + (i + 1) * comp, comp); if (diff) { const unsigned char *prev = begin; for (k = i + 2; k < x && len < 128; ++k) { if (memcmp(prev, row + k * comp, comp)) { prev += comp; ++len; } else { --len; break; } } } else { for (k = i + 2; k < x && len < 128; ++k) { if (!memcmp(begin, row + k * comp, comp)) { ++len; } else { break; } } } } if (diff) { unsigned char header = STBIW_UCHAR(len - 1); stbiw__write1(s, header); for (k = 0; k < len; ++k) { stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); } } else { unsigned char header = STBIW_UCHAR(len - 129); stbiw__write1(s, header); stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); } } } stbiw__write_flush(s); } return 1; } STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { stbi__write_context s = { 0 }; stbi__start_write_callbacks(&s, func, context); return stbi_write_tga_core(&s, x, y, comp, (void *) data); } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data) { stbi__write_context s = { 0 }; if (stbi__start_write_file(&s,filename)) { int r = stbi_write_tga_core(&s, x, y, comp, (void *) data); stbi__end_write_file(&s); return r; } else return 0; } #endif // ************************************************************************************************* // Radiance RGBE HDR writer // by Baldur Karlsson #define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) #ifndef STBI_WRITE_NO_STDIO static void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear) { int exponent; float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); if (maxcomp < 1e-32f) { rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; } else { float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp; rgbe[0] = (unsigned char)(linear[0] * normalize); rgbe[1] = (unsigned char)(linear[1] * normalize); rgbe[2] = (unsigned char)(linear[2] * normalize); rgbe[3] = (unsigned char)(exponent + 128); } } static void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte) { unsigned char lengthbyte = STBIW_UCHAR(length+128); STBIW_ASSERT(length+128 <= 255); s->func(s->context, &lengthbyte, 1); s->func(s->context, &databyte, 1); } static void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data) { unsigned char lengthbyte = STBIW_UCHAR(length); STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code s->func(s->context, &lengthbyte, 1); s->func(s->context, data, length); } static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline) { unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; unsigned char rgbe[4]; float linear[3]; int x; scanlineheader[2] = (width&0xff00)>>8; scanlineheader[3] = (width&0x00ff); /* skip RLE for images too small or large */ if (width < 8 || width >= 32768) { for (x=0; x < width; x++) { switch (ncomp) { case 4: /* fallthrough */ case 3: linear[2] = scanline[x*ncomp + 2]; linear[1] = scanline[x*ncomp + 1]; linear[0] = scanline[x*ncomp + 0]; break; default: linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; break; } stbiw__linear_to_rgbe(rgbe, linear); s->func(s->context, rgbe, 4); } } else { int c,r; /* encode into scratch buffer */ for (x=0; x < width; x++) { switch(ncomp) { case 4: /* fallthrough */ case 3: linear[2] = scanline[x*ncomp + 2]; linear[1] = scanline[x*ncomp + 1]; linear[0] = scanline[x*ncomp + 0]; break; default: linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; break; } stbiw__linear_to_rgbe(rgbe, linear); scratch[x + width*0] = rgbe[0]; scratch[x + width*1] = rgbe[1]; scratch[x + width*2] = rgbe[2]; scratch[x + width*3] = rgbe[3]; } s->func(s->context, scanlineheader, 4); /* RLE each component separately */ for (c=0; c < 4; c++) { unsigned char *comp = &scratch[width*c]; x = 0; while (x < width) { // find first run r = x; while (r+2 < width) { if (comp[r] == comp[r+1] && comp[r] == comp[r+2]) break; ++r; } if (r+2 >= width) r = width; // dump up to first run while (x < r) { int len = r-x; if (len > 128) len = 128; stbiw__write_dump_data(s, len, &comp[x]); x += len; } // if there's a run, output it if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd // find next byte after run while (r < width && comp[r] == comp[x]) ++r; // output run up to r while (x < r) { int len = r-x; if (len > 127) len = 127; stbiw__write_run_data(s, len, comp[x]); x += len; } } } } } } static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data) { if (y <= 0 || x <= 0 || data == NULL) return 0; else { // Each component is stored separately. Allocate scratch space for full output scanline. unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4); int i, len; char buffer[128]; char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; s->func(s->context, header, sizeof(header)-1); #ifdef __STDC_LIB_EXT1__ len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); #else len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); #endif s->func(s->context, buffer, len); for(i=0; i < y; i++) stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)); STBIW_FREE(scratch); return 1; } } STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data) { stbi__write_context s = { 0 }; stbi__start_write_callbacks(&s, func, context); return stbi_write_hdr_core(&s, x, y, comp, (float *) data); } STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data) { stbi__write_context s = { 0 }; if (stbi__start_write_file(&s,filename)) { int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data); stbi__end_write_file(&s); return r; } else return 0; } #endif // STBI_WRITE_NO_STDIO ////////////////////////////////////////////////////////////////////////////// // // PNG writer // #ifndef STBIW_ZLIB_COMPRESS // stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size() #define stbiw__sbraw(a) ((int *) (void *) (a) - 2) #define stbiw__sbm(a) stbiw__sbraw(a)[0] #define stbiw__sbn(a) stbiw__sbraw(a)[1] #define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a)) #define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0) #define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a))) #define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v)) #define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) #define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0) static void *stbiw__sbgrowf(void **arr, int increment, int itemsize) { int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1; void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2); STBIW_ASSERT(p); if (p) { if (!*arr) ((int *) p)[1] = 0; *arr = (void *) ((int *) p + 2); stbiw__sbm(*arr) = m; } return *arr; } static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount) { while (*bitcount >= 8) { stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); *bitbuffer >>= 8; *bitcount -= 8; } return data; } static int stbiw__zlib_bitrev(int code, int codebits) { int res=0; while (codebits--) { res = (res << 1) | (code & 1); code >>= 1; } return res; } static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit) { int i; for (i=0; i < limit && i < 258; ++i) if (a[i] != b[i]) break; return i; } static unsigned int stbiw__zhash(unsigned char *data) { stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); hash ^= hash << 3; hash += hash >> 5; hash ^= hash << 4; hash += hash >> 17; hash ^= hash << 25; hash += hash >> 6; return hash; } #define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount)) #define stbiw__zlib_add(code,codebits) \ (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush()) #define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c) // default huffman tables #define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8) #define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9) #define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7) #define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8) #define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n)) #define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n)) #define stbiw__ZHASH 16384 #endif // STBIW_ZLIB_COMPRESS STBIWDEF unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) { #ifdef STBIW_ZLIB_COMPRESS // user provided a zlib compress implementation, use that return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); #else // use builtin static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; unsigned int bitbuf=0; int i,j, bitcount=0; unsigned char *out = NULL; unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**)); if (hash_table == NULL) return NULL; if (quality < 5) quality = 5; stbiw__sbpush(out, 0x78); // DEFLATE 32K window stbiw__sbpush(out, 0x5e); // FLEVEL = 1 stbiw__zlib_add(1,1); // BFINAL = 1 stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman for (i=0; i < stbiw__ZHASH; ++i) hash_table[i] = NULL; i=0; while (i < data_len-3) { // hash next 3 bytes of data to be compressed int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3; unsigned char *bestloc = 0; unsigned char **hlist = hash_table[h]; int n = stbiw__sbcount(hlist); for (j=0; j < n; ++j) { if (hlist[j]-data > i-32768) { // if entry lies within window int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i); if (d >= best) { best=d; bestloc=hlist[j]; } } } // when hash table entry is too long, delete half the entries if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) { STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality); stbiw__sbn(hash_table[h]) = quality; } stbiw__sbpush(hash_table[h],data+i); if (bestloc) { // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1); hlist = hash_table[h]; n = stbiw__sbcount(hlist); for (j=0; j < n; ++j) { if (hlist[j]-data > i-32767) { int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1); if (e > best) { // if next match is better, bail on current match bestloc = NULL; break; } } } } if (bestloc) { int d = (int) (data+i - bestloc); // distance back STBIW_ASSERT(d <= 32767 && best <= 258); for (j=0; best > lengthc[j+1]-1; ++j); stbiw__zlib_huff(j+257); if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); for (j=0; d > distc[j+1]-1; ++j); stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5); if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); i += best; } else { stbiw__zlib_huffb(data[i]); ++i; } } // write out final bytes for (;i < data_len; ++i) stbiw__zlib_huffb(data[i]); stbiw__zlib_huff(256); // end of block // pad with 0 bits to byte boundary while (bitcount) stbiw__zlib_add(0,1); for (i=0; i < stbiw__ZHASH; ++i) (void) stbiw__sbfree(hash_table[i]); STBIW_FREE(hash_table); // store uncompressed instead if compression was worse if (stbiw__sbn(out) > data_len + 2 + ((data_len+32766)/32767)*5) { stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1 for (j = 0; j < data_len;) { int blocklen = data_len - j; if (blocklen > 32767) blocklen = 32767; stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8)); stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8)); memcpy(out+stbiw__sbn(out), data+j, blocklen); stbiw__sbn(out) += blocklen; j += blocklen; } } { // compute adler32 on input unsigned int s1=1, s2=0; int blocklen = (int) (data_len % 5552); j=0; while (j < data_len) { for (i=0; i < blocklen; ++i) { s1 += data[j+i]; s2 += s1; } s1 %= 65521; s2 %= 65521; j += blocklen; blocklen = 5552; } stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); stbiw__sbpush(out, STBIW_UCHAR(s2)); stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); stbiw__sbpush(out, STBIW_UCHAR(s1)); } *out_len = stbiw__sbn(out); // make returned pointer freeable STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); return (unsigned char *) stbiw__sbraw(out); #endif // STBIW_ZLIB_COMPRESS } static unsigned int stbiw__crc32(unsigned char *buffer, int len) { #ifdef STBIW_CRC32 return STBIW_CRC32(buffer, len); #else static unsigned int crc_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; unsigned int crc = ~0u; int i; for (i=0; i < len; ++i) crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; return ~crc; #endif } #define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4) #define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v)); #define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3]) static void stbiw__wpcrc(unsigned char **data, int len) { unsigned int crc = stbiw__crc32(*data - len - 4, len+4); stbiw__wp32(*data, crc); } static unsigned char stbiw__paeth(int a, int b, int c) { int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c); if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); if (pb <= pc) return STBIW_UCHAR(b); return STBIW_UCHAR(c); } // @OPTIMIZE: provide an option that always forces left-predict or paeth predict static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer) { static int mapping[] = { 0,1,2,3,4 }; static int firstmap[] = { 0,1,0,5,6 }; int *mymap = (y != 0) ? mapping : firstmap; int i; int type = mymap[filter_type]; unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y); int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; if (type==0) { memcpy(line_buffer, z, width*n); return; } // first loop isn't optimized since it's just one pixel for (i = 0; i < n; ++i) { switch (type) { case 1: line_buffer[i] = z[i]; break; case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break; case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break; case 5: line_buffer[i] = z[i]; break; case 6: line_buffer[i] = z[i]; break; } } switch (type) { case 1: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-n]; break; case 2: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-signed_stride]; break; case 3: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break; case 4: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break; case 5: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - (z[i-n]>>1); break; case 6: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break; } } STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len) { int force_filter = stbi_write_force_png_filter; int ctype[5] = { -1, 0, 4, 2, 6 }; unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; unsigned char *out,*o, *filt, *zlib; signed char *line_buffer; int j,zlen; if (stride_bytes == 0) stride_bytes = x * n; if (force_filter >= 5) { force_filter = -1; } filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0; line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } for (j=0; j < y; ++j) { int filter_type; if (force_filter > -1) { filter_type = force_filter; stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); } else { // Estimate the best filter by running through all of them: int best_filter = 0, best_filter_val = 0x7fffffff, est, i; for (filter_type = 0; filter_type < 5; filter_type++) { stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); // Estimate the entropy of the line using this filter; the less, the better. est = 0; for (i = 0; i < x*n; ++i) { est += abs((signed char) line_buffer[i]); } if (est < best_filter_val) { best_filter_val = est; best_filter = filter_type; } } if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); filter_type = best_filter; } } // when we get here, filter_type contains the filter type, and line_buffer contains the data filt[j*(x*n+1)] = (unsigned char) filter_type; STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n); } STBIW_FREE(line_buffer); zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level); STBIW_FREE(filt); if (!zlib) return 0; // each tag requires 12 bytes of overhead out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12); if (!out) return 0; *out_len = 8 + 12+13 + 12+zlen + 12; o=out; STBIW_MEMMOVE(o,sig,8); o+= 8; stbiw__wp32(o, 13); // header length stbiw__wptag(o, "IHDR"); stbiw__wp32(o, x); stbiw__wp32(o, y); *o++ = 8; *o++ = STBIW_UCHAR(ctype[n]); *o++ = 0; *o++ = 0; *o++ = 0; stbiw__wpcrc(&o,13); stbiw__wp32(o, zlen); stbiw__wptag(o, "IDAT"); STBIW_MEMMOVE(o, zlib, zlen); o += zlen; STBIW_FREE(zlib); stbiw__wpcrc(&o, zlen); stbiw__wp32(o,0); stbiw__wptag(o, "IEND"); stbiw__wpcrc(&o,0); STBIW_ASSERT(o == out + *out_len); return out; } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes) { FILE *f; int len; unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); if (png == NULL) return 0; f = stbiw__fopen(filename, "wb"); if (!f) { STBIW_FREE(png); return 0; } fwrite(png, 1, len, f); fclose(f); STBIW_FREE(png); return 1; } #endif STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes) { int len; unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); if (png == NULL) return 0; func(context, png, len); STBIW_FREE(png); return 1; } /* *************************************************************************** * * JPEG writer * * This is based on Jon Olick's jo_jpeg.cpp: * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html */ static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18, 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 }; static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) { int bitBuf = *bitBufP, bitCnt = *bitCntP; bitCnt += bs[1]; bitBuf |= bs[0] << (24 - bitCnt); while(bitCnt >= 8) { unsigned char c = (bitBuf >> 16) & 255; stbiw__putc(s, c); if(c == 255) { stbiw__putc(s, 0); } bitBuf <<= 8; bitCnt -= 8; } *bitBufP = bitBuf; *bitCntP = bitCnt; } static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) { float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; float z1, z2, z3, z4, z5, z11, z13; float tmp0 = d0 + d7; float tmp7 = d0 - d7; float tmp1 = d1 + d6; float tmp6 = d1 - d6; float tmp2 = d2 + d5; float tmp5 = d2 - d5; float tmp3 = d3 + d4; float tmp4 = d3 - d4; // Even part float tmp10 = tmp0 + tmp3; // phase 2 float tmp13 = tmp0 - tmp3; float tmp11 = tmp1 + tmp2; float tmp12 = tmp1 - tmp2; d0 = tmp10 + tmp11; // phase 3 d4 = tmp10 - tmp11; z1 = (tmp12 + tmp13) * 0.707106781f; // c4 d2 = tmp13 + z1; // phase 5 d6 = tmp13 - z1; // Odd part tmp10 = tmp4 + tmp5; // phase 2 tmp11 = tmp5 + tmp6; tmp12 = tmp6 + tmp7; // The rotator is modified from fig 4-8 to avoid extra negations. z5 = (tmp10 - tmp12) * 0.382683433f; // c6 z2 = tmp10 * 0.541196100f + z5; // c2-c6 z4 = tmp12 * 1.306562965f + z5; // c2+c6 z3 = tmp11 * 0.707106781f; // c4 z11 = tmp7 + z3; // phase 5 z13 = tmp7 - z3; *d5p = z13 + z2; // phase 6 *d3p = z13 - z2; *d1p = z11 + z4; *d7p = z11 - z4; *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; } static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) { int tmp1 = val < 0 ? -val : val; val = val < 0 ? val-1 : val; bits[1] = 1; while(tmp1 >>= 1) { ++bits[1]; } bits[0] = val & ((1<0)&&(DU[end0pos]==0); --end0pos) { } // end0pos = first element in reverse order !=0 if(end0pos == 0) { stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); return DU[0]; } for(i = 1; i <= end0pos; ++i) { int startpos = i; int nrzeroes; unsigned short bits[2]; for (; DU[i]==0 && i<=end0pos; ++i) { } nrzeroes = i-startpos; if ( nrzeroes >= 16 ) { int lng = nrzeroes>>4; int nrmarker; for (nrmarker=1; nrmarker <= lng; ++nrmarker) stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); nrzeroes &= 15; } stbiw__jpg_calcBits(DU[i], bits); stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]); stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); } if(end0pos != 63) { stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); } return DU[0]; } static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) { // Constants that don't pollute global namespace static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0}; static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d}; static const unsigned char std_ac_luminance_values[] = { 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa }; static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0}; static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77}; static const unsigned char std_ac_chrominance_values[] = { 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa }; // Huffman tables static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}}; static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}}; static const unsigned short YAC_HT[256][2] = { {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} }; static const unsigned short UVAC_HT[256][2] = { {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} }; static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99}; static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99}; static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; int row, col, i, k, subsample; float fdtbl_Y[64], fdtbl_UV[64]; unsigned char YTable[64], UVTable[64]; if(!data || !width || !height || comp > 4 || comp < 1) { return 0; } quality = quality ? quality : 90; subsample = quality <= 90 ? 1 : 0; quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; quality = quality < 50 ? 5000 / quality : 200 - quality * 2; for(i = 0; i < 64; ++i) { int uvti, yti = (YQT[i]*quality+50)/100; YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti); uvti = (UVQT[i]*quality+50)/100; UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); } for(row = 0, k = 0; row < 8; ++row) { for(col = 0; col < 8; ++col, ++k) { fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); } } // Write Headers { static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width), 3,1,(unsigned char)(subsample?0x22:0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; s->func(s->context, (void*)head0, sizeof(head0)); s->func(s->context, (void*)YTable, sizeof(YTable)); stbiw__putc(s, 1); s->func(s->context, UVTable, sizeof(UVTable)); s->func(s->context, (void*)head1, sizeof(head1)); s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1); s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); stbiw__putc(s, 0x10); // HTYACinfo s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1); s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); stbiw__putc(s, 1); // HTUDCinfo s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1); s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); stbiw__putc(s, 0x11); // HTUACinfo s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1); s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); s->func(s->context, (void*)head2, sizeof(head2)); } // Encode 8x8 macroblocks { static const unsigned short fillBits[] = {0x7F, 7}; int DCY=0, DCU=0, DCV=0; int bitBuf=0, bitCnt=0; // comp == 2 is grey+alpha (alpha is ignored) int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; const unsigned char *dataR = (const unsigned char *)data; const unsigned char *dataG = dataR + ofsG; const unsigned char *dataB = dataR + ofsB; int x, y, pos; if(subsample) { for(y = 0; y < height; y += 16) { for(x = 0; x < width; x += 16) { float Y[256], U[256], V[256]; for(row = y, pos = 0; row < y+16; ++row) { // row >= height => use last input row int clamped_row = (row < height) ? row : height - 1; int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; for(col = x; col < x+16; ++col, ++pos) { // if col >= width => use pixel from last input column int p = base_p + ((col < width) ? col : (width-1))*comp; float r = dataR[p], g = dataG[p], b = dataB[p]; Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; } } DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); // subsample U,V { float subU[64], subV[64]; int yy, xx; for(yy = 0, pos = 0; yy < 8; ++yy) { for(xx = 0; xx < 8; ++xx, ++pos) { int j = yy*32+xx*2; subU[pos] = (U[j+0] + U[j+1] + U[j+16] + U[j+17]) * 0.25f; subV[pos] = (V[j+0] + V[j+1] + V[j+16] + V[j+17]) * 0.25f; } } DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); } } } } else { for(y = 0; y < height; y += 8) { for(x = 0; x < width; x += 8) { float Y[64], U[64], V[64]; for(row = y, pos = 0; row < y+8; ++row) { // row >= height => use last input row int clamped_row = (row < height) ? row : height - 1; int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; for(col = x; col < x+8; ++col, ++pos) { // if col >= width => use pixel from last input column int p = base_p + ((col < width) ? col : (width-1))*comp; float r = dataR[p], g = dataG[p], b = dataB[p]; Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; } } DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); } } } // Do the bit alignment of the EOI marker stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); } // EOI stbiw__putc(s, 0xFF); stbiw__putc(s, 0xD9); return 1; } STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality) { stbi__write_context s = { 0 }; stbi__start_write_callbacks(&s, func, context); return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality); } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality) { stbi__write_context s = { 0 }; if (stbi__start_write_file(&s,filename)) { int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); stbi__end_write_file(&s); return r; } else return 0; } #endif #endif // STB_IMAGE_WRITE_IMPLEMENTATION /* Revision history 1.16 (2021-07-11) make Deflate code emit uncompressed blocks when it would otherwise expand support writing BMPs with alpha channel 1.15 (2020-07-13) unknown 1.14 (2020-02-02) updated JPEG writer to downsample chroma channels 1.13 1.12 1.11 (2019-08-11) 1.10 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs 1.09 (2018-02-11) fix typo in zlib quality API, improve STB_I_W_STATIC in C++ 1.08 (2018-01-29) add stbi__flip_vertically_on_write, external zlib, zlib quality, choose PNG filter 1.07 (2017-07-24) doc fix 1.06 (2017-07-23) writing JPEG (using Jon Olick's code) 1.05 ??? 1.04 (2017-03-03) monochrome BMP expansion 1.03 ??? 1.02 (2016-04-02) avoid allocating large structures on the stack 1.01 (2016-01-16) STBIW_REALLOC_SIZED: support allocators with no realloc support avoid race-condition in crc initialization minor compile issues 1.00 (2015-09-14) installable file IO function 0.99 (2015-09-13) warning fixes; TGA rle support 0.98 (2015-04-08) added STBIW_MALLOC, STBIW_ASSERT etc 0.97 (2015-01-18) fixed HDR asserts, rewrote HDR rle logic 0.96 (2015-01-17) add HDR output fix monochrome BMP 0.95 (2014-08-17) add monochrome TGA output 0.94 (2014-05-31) rename private functions to avoid conflicts with stb_image.h 0.93 (2014-05-27) warning fixes 0.92 (2010-08-01) casts to unsigned char to fix warnings 0.91 (2010-07-17) first public release 0.90 first internal release */ /* ------------------------------------------------------------------------------ This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License Copyright (c) 2017 Sean Barrett Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ ALTERNATIVE B - Public Domain (www.unlicense.org) This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ */ ggml-org-ggml-3678254/examples/test-cmake/000077500000000000000000000000001512524704700202375ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/test-cmake/CMakeLists.txt000066400000000000000000000003661512524704700230040ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.14) project(ggml-simple) set(CMAKE_CXX_STANDARD 17) find_package(ggml CONFIG REQUIRED) set(TEST_TARGET test-cmake) add_executable(test-cmake test-cmake.cpp) target_link_libraries(test-cmake PRIVATE ggml::ggml) ggml-org-ggml-3678254/examples/test-cmake/README.md000066400000000000000000000001311512524704700215110ustar00rootroot00000000000000## cmake-test This directory can be built as a separate project with an installed ggml. ggml-org-ggml-3678254/examples/test-cmake/test-cmake.cpp000066400000000000000000000001311512524704700227730ustar00rootroot00000000000000#include "ggml-backend.h" int main(void) { ggml_backend_load_all(); return 0; } ggml-org-ggml-3678254/examples/yolo/000077500000000000000000000000001512524704700171645ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/yolo/CMakeLists.txt000066400000000000000000000002461512524704700217260ustar00rootroot00000000000000# # yolov3-tiny set(TEST_TARGET yolov3-tiny) add_executable(${TEST_TARGET} yolov3-tiny.cpp yolo-image.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml common) ggml-org-ggml-3678254/examples/yolo/README.md000066400000000000000000000040401512524704700204410ustar00rootroot00000000000000This example shows how to implement YOLO object detection with ggml using pretrained model. # YOLOv3-tiny Download the model weights: ```bash $ wget https://pjreddie.com/media/files/yolov3-tiny.weights $ sha1sum yolov3-tiny.weights 40f3c11883bef62fd850213bc14266632ed4414f yolov3-tiny.weights ``` Convert the weights to GGUF format: ```bash $ ./convert-yolov3-tiny.py yolov3-tiny.weights yolov3-tiny.weights converted to yolov3-tiny.gguf ``` Alternatively, you can download the converted model from [HuggingFace](https://huggingface.co/rgerganov/yolo-gguf/resolve/main/yolov3-tiny.gguf) Object detection: ```bash $ wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/dog.jpg $ ./yolov3-tiny -m yolov3-tiny.gguf -i dog.jpg load_model: using CUDA backend ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 1 CUDA devices: Device 0: NVIDIA T1200 Laptop GPU, compute capability 7.5, VMM: yes Layer 0 output shape: 416 x 416 x 16 x 1 Layer 1 output shape: 208 x 208 x 16 x 1 Layer 2 output shape: 208 x 208 x 32 x 1 Layer 3 output shape: 104 x 104 x 32 x 1 Layer 4 output shape: 104 x 104 x 64 x 1 Layer 5 output shape: 52 x 52 x 64 x 1 Layer 6 output shape: 52 x 52 x 128 x 1 Layer 7 output shape: 26 x 26 x 128 x 1 Layer 8 output shape: 26 x 26 x 256 x 1 Layer 9 output shape: 13 x 13 x 256 x 1 Layer 10 output shape: 13 x 13 x 512 x 1 Layer 11 output shape: 13 x 13 x 512 x 1 Layer 12 output shape: 13 x 13 x 1024 x 1 Layer 13 output shape: 13 x 13 x 256 x 1 Layer 14 output shape: 13 x 13 x 512 x 1 Layer 15 output shape: 13 x 13 x 255 x 1 Layer 18 output shape: 13 x 13 x 128 x 1 Layer 19 output shape: 26 x 26 x 128 x 1 Layer 20 output shape: 26 x 26 x 384 x 1 Layer 21 output shape: 26 x 26 x 256 x 1 Layer 22 output shape: 26 x 26 x 255 x 1 dog: 57% car: 52% truck: 56% car: 62% bicycle: 59% Detected objects saved in 'predictions.jpg' (time: 0.057000 sec.) ```ggml-org-ggml-3678254/examples/yolo/convert-yolov3-tiny.py000077500000000000000000000046471512524704700234460ustar00rootroot00000000000000#!/usr/bin/env python3 import sys import gguf import numpy as np def save_conv2d_layer(f, gguf_writer, prefix, inp_c, filters, size, batch_normalize=True): biases = np.fromfile(f, dtype=np.float32, count=filters) gguf_writer.add_tensor(prefix + "_biases", biases, raw_shape=(1, filters, 1, 1)) if batch_normalize: scales = np.fromfile(f, dtype=np.float32, count=filters) gguf_writer.add_tensor(prefix + "_scales", scales, raw_shape=(1, filters, 1, 1)) rolling_mean = np.fromfile(f, dtype=np.float32, count=filters) gguf_writer.add_tensor(prefix + "_rolling_mean", rolling_mean, raw_shape=(1, filters, 1, 1)) rolling_variance = np.fromfile(f, dtype=np.float32, count=filters) gguf_writer.add_tensor(prefix + "_rolling_variance", rolling_variance, raw_shape=(1, filters, 1, 1)) weights_count = filters * inp_c * size * size l0_weights = np.fromfile(f, dtype=np.float32, count=weights_count) ## ggml doesn't support f32 convolution yet, use f16 instead l0_weights = l0_weights.astype(np.float16) gguf_writer.add_tensor(prefix + "_weights", l0_weights, raw_shape=(filters, inp_c, size, size)) if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: %s " % sys.argv[0]) sys.exit(1) outfile = 'yolov3-tiny.gguf' gguf_writer = gguf.GGUFWriter(outfile, 'yolov3-tiny') f = open(sys.argv[1], 'rb') f.read(20) # skip header save_conv2d_layer(f, gguf_writer, "l0", 3, 16, 3) save_conv2d_layer(f, gguf_writer, "l1", 16, 32, 3) save_conv2d_layer(f, gguf_writer, "l2", 32, 64, 3) save_conv2d_layer(f, gguf_writer, "l3", 64, 128, 3) save_conv2d_layer(f, gguf_writer, "l4", 128, 256, 3) save_conv2d_layer(f, gguf_writer, "l5", 256, 512, 3) save_conv2d_layer(f, gguf_writer, "l6", 512, 1024, 3) save_conv2d_layer(f, gguf_writer, "l7", 1024, 256, 1) save_conv2d_layer(f, gguf_writer, "l8", 256, 512, 3) save_conv2d_layer(f, gguf_writer, "l9", 512, 255, 1, batch_normalize=False) save_conv2d_layer(f, gguf_writer, "l10", 256, 128, 1) save_conv2d_layer(f, gguf_writer, "l11", 384, 256, 3) save_conv2d_layer(f, gguf_writer, "l12", 256, 255, 1, batch_normalize=False) f.close() gguf_writer.write_header_to_file() gguf_writer.write_kv_data_to_file() gguf_writer.write_tensors_to_file() gguf_writer.close() print("{} converted to {}".format(sys.argv[1], outfile)) ggml-org-ggml-3678254/examples/yolo/data/000077500000000000000000000000001512524704700200755ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/yolo/data/coco.names000066400000000000000000000011611512524704700220440ustar00rootroot00000000000000person bicycle car motorbike aeroplane bus train truck boat traffic light fire hydrant stop sign parking meter bench bird cat dog horse sheep cow elephant bear zebra giraffe backpack umbrella handbag tie suitcase frisbee skis snowboard sports ball kite baseball bat baseball glove skateboard surfboard tennis racket bottle wine glass cup fork knife spoon bowl banana apple sandwich orange broccoli carrot hot dog pizza donut cake chair sofa pottedplant bed diningtable toilet tvmonitor laptop mouse remote keyboard cell phone microwave oven toaster sink refrigerator book clock vase scissors teddy bear hair drier toothbrush ggml-org-ggml-3678254/examples/yolo/data/labels/000077500000000000000000000000001512524704700213375ustar00rootroot00000000000000ggml-org-ggml-3678254/examples/yolo/data/labels/100_0.png000066400000000000000000000005001512524704700225570ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  %5IDATc΋g(u16޽b4Bkz'btWL F pWF%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabeldn IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/100_1.png000066400000000000000000000005711512524704700225700ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TnIDATc `fgxk8,wAy?kb`}dXx`?vu ^#D΄ijZ^5*07BP]?x6w7%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabeldn IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/100_2.png000066400000000000000000000007031512524704700225660ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  /UDIDAT(ci$ دC[% 0Ckd)p sy #/L*fkabb0@%zvĠb7ab3V|m2@|nj`y[q3B?o e.F {6~#r~nm %tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabeldn IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/100_3.png000066400000000000000000000007741512524704700225770ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  9ZIDAT8cit nQ 1uΑ@@y.<,̀mH"|au< pO w yA$ 1/1!!d.d |.қЂ`R(vx_",x?$& $?AH2o" qu)[!% T2b$L<%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabeldn IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/100_4.png000066400000000000000000000011011512524704700225610ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !K6IDAT8c uB07Jn5v|,⮓>`~|aHb@ȿhxA #AYVMiҌ??U>xtgr :)c@{ϊ$,]H}`Hu`=ҥH`Hҝ`cұHK+usd1!IqD!P̫˞gG 1-^B~k dXҀaFh2' Db*GM-fҐeuߠ3ɍ;lv27u} -JVCjb%tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabeldn IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/100_7.png000066400000000000000000000013611512524704700225740ustar00rootroot00000000000000PNG  IHDR0T gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATXO(ag{Hi?sO?9 khS.r$9E.Qāh5Qʟly=}~>mۃvbHteSl5y_Ho!1|"w- LsH8$n- 1kC:W6R?ɓB*@vs/d vư,  Km\Hg9H|^1DCcEpHo~ҕdhFDRf4zHj=3x(n&ij@3 8@) Z n[']9-8]jZo i| |vLFiӆb~wH^|eBO!&;oBنyk!Oo OsZwIzk^@|ܾ=pf=~. ~ ,*IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TyIDATc 7׌q'!aZr6 2C} f2{=~0t6H_ƿr1Lީ10r20D&G|U}㰊`Q|5HP?`Z!+`t:p&E.F6Q~Ԅ;˺Q58l`W[%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabele^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/101_6.png000066400000000000000000000013261512524704700225750ustar00rootroot00000000000000PNG  IHDR)Jk"gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO,`U9rT%UU='ʧ`91_'+2 ݪ?8Tf@ |̀?Py]+ EK$&*J0[`[r.LP"UebJϐ6,NUD EP,J{HZFLWVhG P Pgt* %2 ҀJ+J*u*TGyJ3ʉUAUbr ,mPI DOcJg CBEe\vߑn愉)2Wo L4n&<}y/H 'HQ'‹ *8>Y8 cQUU*LT^?F+JsL8a%S7FZ"FU*, Ԥ %tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabele^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/101_7.png000066400000000000000000000014441512524704700225770ustar00rootroot00000000000000PNG  IHDR/T71rgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATHO(eQ>"zi^(S IV,0a!HDBd#Ȕ$"RnۘfDH4<=>wJ| oxuXMv\d^1y{f $`b/$*kZSv*8HC@3* ^;ArtZ~Nj[t+]]J;vyQ$us7hOȻJͶ 1T{,+G7/FX\/oJS׶tA;.I n"o͠' }e߯o˾U_}.Dfx|І}1ަO`/] ]KE8Ǒ£ܝq!@jMy &aVGU]v};BӏT- ?ȣ>BL۱_όȨ#2%4ԕ~bKt>?駊40xf5y*R{N$Zh07~u\wrŖϡ>_kGH7`xÿ+"FqM%tEXtdate:create2016-11-05T07:33:36-07:00i#Q`%tEXtdate:modify2016-11-05T07:33:36-07:00~tEXtlabele^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/102_0.png000066400000000000000000000004611512524704700225670ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %&IDATc??A@*N*6'u5%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabelf9&IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/102_1.png000066400000000000000000000005241512524704700225700ustar00rootroot00000000000000PNG  IHDR 7zgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TIIDATc~SNpZuWĪgFcgs32F%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabelf9&IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/102_2.png000066400000000000000000000005421512524704700225710ustar00rootroot00000000000000PNG  IHDR pvgAMA a cHRMz&u0`:pQ<bKGD̿tIME  /UDWIDATc$YD#g3:``04s(9+0m ~gBgwABWCb@wp_ b -|F8&8!Qh%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabelf9&IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/102_6.png000066400000000000000000000006461512524704700226020ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c~o6b㒶IIblG[]&V\2Zd2ɂ Όp/P_pSdB%\ʸG9)iÐ i2/A*ˀ2A!36*3*C 0*3TeUI0%tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabelf9&IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/102_7.png000066400000000000000000000006721512524704700226020ustar00rootroot00000000000000PNG  IHDRTtgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATHcnOc4 k 3 K^ fdit, Hb't1>"pqfw.}pYk`/ˑz/#L&6Lz>ViaMҰdMLlҿaH&@LZ(~pC\zGɨ4qҸ4դ&(,%tEXtdate:create2016-11-05T07:33:36-07:00i#Q`%tEXtdate:modify2016-11-05T07:33:36-07:00~tEXtlabelf9&IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_0.png000066400000000000000000000005151512524704700225700ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %BIDATc tOK^F (55c< /\[9tO%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_1.png000066400000000000000000000006371512524704700225760ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TIDATc [d{7WQ{?7Cx ?:&@x q !_-Lq00msvv4 X,\00 dt eаd u $D5Beq θ=~JS%tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_3.png000066400000000000000000000011121512524704700225650ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  9Z?IDAT8c 0{Le @p?&sY \d*$j`1".$.km``2r *V#1mn5@,1'eZ2@̃pMf}^ThT#d.50ߊH2"kɀ9@ 2ʙtu H%~]1D>)^ձ*Q `ÚvR2 Pǁ+LF( i*qĩlqygTlPHP1{5gBO` Fo! &<@a~KXz#%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_4.png000066400000000000000000000012571512524704700226000ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KIDAT8c0JMS8Tb6'$: P{g0!+ѥATi @E&Dc˿VBS<P\A~ ýzYze`NRHCs!IbDKGh9$i [%UAlmiU$is[EZI:f${`N$Nd`?Bwp0 DB0 *-A!d?WФ%VT~(,KO_$Ye)\N3JݥNIw`F\$Xz.iHW Ґy@|P`iX`_yri[ u9h[#I${?40gb]ڡRPV2w;%tEXtdate:create2016-11-05T07:33:07-07:00A]7%tEXtdate:modify2016-11-05T07:33:07-07:000tEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_5.png000066400000000000000000000014151512524704700225750ustar00rootroot00000000000000PNG  IHDR#@ґqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !v/hIDATHcO0UC75:|%y"ån3O0|AR3 ؾ a}π `Ie@jfc(a`BUs *X!$Aja͆;+"3Lr#=p%hwV#3T[8 CV‘Dd5 K453pEVsnd5!45j~CҖ&3Q^(y‚&@QSˠ5j>BJ.HJa59jl`j޽jASc/AVS;C(9;05S~N~Us.4)y Uě:M`!?P? ?jP 4 ?6ɡ` :%<vfJ3 OXwd%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/103_6.png000066400000000000000000000015321512524704700225760ustar00rootroot00000000000000PNG  IHDR)Jk"gAMA a cHRMz&u0`:pQ<bKGD̿tIME !OIDATH_HQ;-K+*U  ^$`iT`NE͗$"(BC/ѲdA:isw9weio EYEMRo`׵e2R%dT~V)-z,x[u94ViCvTxG&㕪"2; `hR}3-2N _O9vVG'6H;-{v4]~<$g-(YU[t.s_!oVYe}8Sl:8@bjNl2H[S=>L fR"EpJ%gkάD2/JdE9X\,ta=?MopmMZ{'H[arMP)Ne de Iє7+s|M*mx/6~O)3VX'N %tEXtdate:create2016-11-05T07:33:36-07:00i#Q`%tEXtdate:modify2016-11-05T07:33:36-07:00~tEXtlabelgN?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/104_0.png000066400000000000000000000004731512524704700225740ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %0IDATcN t'[_ -:] ? iiݙIgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TNIDATc Swؓ yo )9ˈ.UP0Fé+&PC!1l@=V;lʏqP Sojl(0po`gJؕG y-EV!P"3\9/b<.`.ʟ¥(\%aRʅ0Tɕb*wT+T&Sd:0AFL2Áb&zD:m%"%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabelihIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/105_4.png000066400000000000000000000005401512524704700225740ustar00rootroot00000000000000PNG  IHDR 5N'gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KUIDAT(c Si] U>`q04 fKp| [d(ZaN}6ܠ0%tEXtdate:create2016-11-05T07:33:07-07:00A]7%tEXtdate:modify2016-11-05T07:33:07-07:000tEXtlabelihIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/105_5.png000066400000000000000000000005501512524704700225760ustar00rootroot00000000000000PNG  IHDR @FgAMA a cHRMz&u0`:pQ<bKGD̿tIME !v/h]IDAT(c ~Ki/@`-w̓Mg+`,(w1%̛5c$bBa0h>CY=XSyLO!gKn%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabelihIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/105_6.png000066400000000000000000000005731512524704700226040ustar00rootroot00000000000000PNG  IHDRJA(QIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !pIDAT8c YVAdIo@M [B? |y(oBP\0?˲="tsP|vG }r>o;=%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelihIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/105_7.png000066400000000000000000000006071512524704700226030ustar00rootroot00000000000000PNG  IHDRTAgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%W|IDAT8c֨.!ˎgE :T T!*r& ᆊ\CED`vM ?K Fšਮ}@EfC_;,4~ nz攑+")%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabelihIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_0.png000066400000000000000000000004451512524704700225750ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fpIDATc s@b?A2"ȷ%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_1.png000066400000000000000000000004631512524704700225760ustar00rootroot00000000000000PNG  IHDRqgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉T(IDATc C,  ޢ *fӘBo_t_B%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_2.png000066400000000000000000000004771512524704700226040ustar00rootroot00000000000000PNG  IHDR  f gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]I4IDATc p'O F  bc4Pʀqc%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_3.png000066400000000000000000000005171512524704700226000ustar00rootroot00000000000000PNG  IHDR +gAMA a cHRMz&u0`:pQ<bKGD̿tIME  9ZDIDATc Hn<, 5%<?@Y!zmn ty֣, fXlh%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_4.png000066400000000000000000000005341512524704700226000ustar00rootroot00000000000000PNG  IHDR 5PkU^gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KQIDAT(cP9f&߀r8100\p0@C8u`1ę 59x?lP w G9C+oy5{%tEXtdate:create2016-11-05T07:33:07-07:00A]7%tEXtdate:modify2016-11-05T07:33:07-07:000tEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_5.png000066400000000000000000000005531512524704700226020ustar00rootroot00000000000000PNG  IHDR @FgAMA a cHRMz&u0`:pQ<bKGD̿tIME !v/h`IDAT(c Yr?0/O fy `06&AgqCmbpPwc>|C\v}7ʣGla!\ %tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_6.png000066400000000000000000000005661512524704700226070ustar00rootroot00000000000000PNG  IHDRJA(QIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !kIDAT8c HoOgd5PMV03@F Ư`   p߽koQ*WN%01Q(PƕJwA%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/106_7.png000066400000000000000000000006061512524704700226030ustar00rootroot00000000000000PNG  IHDRT8gAMA a cHRMz&u0`:pQ<bKGD̿tIME !%W{IDAT8c(Ϯ I ~1@AL"LH  Fm =B]o: ZpRNH vFNH Pp.t8BBBB؅0!l Mظ%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabelj0aC IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_0.png000066400000000000000000000004721512524704700225760ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fp/IDATcخ0QV"Ң!;.BM}?=ڗ%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_1.png000066400000000000000000000005461512524704700226010ustar00rootroot00000000000000PNG  IHDR S"gAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉T[IDATcdbà*͉A5TY;kVpBdM{)`n%^p{631#\0AʣF%%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_2.png000066400000000000000000000006321512524704700225760ustar00rootroot00000000000000PNG  IHDR  .gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDAT(c ^030 2 WDH00GRsVA9 9dwNv300Aʀe{+00pFqMK(n> yn~@ >(( Yw%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_3.png000066400000000000000000000006761512524704700226070ustar00rootroot00000000000000PNG  IHDR+Y;'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDAT(cxW @yTA c+. g@q4C^YH)Es2 ́@B%`w|@e _HG"@;0a{-z>1(X0{;ȫ 0aͼ#@Σ@XB˻əa%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_4.png000066400000000000000000000007521512524704700226030ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KIDAT8c c @ {^bsakXa1a[žf0c 4\=u`q,g L_.`R0MXSL>&O L"w$v-31_=X6"9__Hǖ&ٜǰgr@--]_`KgY\/yqk9sMYf*HZ3Vg 13!+>c('yeb|i&Pohh˷ jN:Js~.C-+`/\O,RJ>`o)Jn`+ؑw(YQT{lĕ4`N+:`u1%-ګQ˕x529u 3C%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/107_7.png000066400000000000000000000011411512524704700225770ustar00rootroot00000000000000PNG  IHDR+T>BgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%WVIDATHO(aw?hvaAqvM’$ZBNqEePQ+{\ˣ#SKZk֣͌d5,sJӤUHw&w[!}Xusg<hCb4pH6LL^=^Lv6u'jEebgOJXԚcUbeT0՘bַK<y3pSX9pnޯB쯗lމ'SJ[rurʩ.XsC쿐u-9zO(}=Zk>0Y%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabelkGfsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_0.png000066400000000000000000000004351512524704700225760ustar00rootroot00000000000000PNG  IHDR hgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fpIDATcXnۥ OO%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_1.png000066400000000000000000000004401512524704700225730ustar00rootroot00000000000000PNG  IHDRqgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉TIDATc CKb9%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_2.png000066400000000000000000000004501512524704700225750ustar00rootroot00000000000000PNG  IHDR =gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDATc| ȥ%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_3.png000066400000000000000000000004521512524704700226000ustar00rootroot00000000000000PNG  IHDR +gAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDATc8ϰ~HczI%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_4.png000066400000000000000000000004521512524704700226010ustar00rootroot00000000000000PNG  IHDR 5N'gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KIDAT(cX 7X? 66Omd>%tEXtdate:create2016-11-05T07:33:07-07:00A]7%tEXtdate:modify2016-11-05T07:33:07-07:000tEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_5.png000066400000000000000000000004541512524704700226040ustar00rootroot00000000000000PNG  IHDR @FgAMA a cHRMz&u0`:pQ<bKGD̿tIME !v/h!IDAT(c . `P= d0x.Şy%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_6.png000066400000000000000000000004561512524704700226070ustar00rootroot00000000000000PNG  IHDRJ:wgAMA a cHRMz&u0`:pQ<bKGD̿tIME !#IDAT8cxy> {;F8H)ch7 %tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/108_7.png000066400000000000000000000004611512524704700226040ustar00rootroot00000000000000PNG  IHDRT8gAMA a cHRMz&u0`:pQ<bKGD̿tIME !%W&IDAT8cxq ;B1^}%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabell8IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_0.png000066400000000000000000000005011512524704700225710ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fp6IDATc ؙM" M0\-pt@3I\hZW+%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_1.png000066400000000000000000000005631512524704700226020ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  &׉ThIDATc !fϰ|!H/ERLbLB, FR w b6f`py?#|pH2tAjaO@ʝ" 30 Q%tEXtdate:create2016-11-05T07:32:38-07:00A%tEXtdate:modify2016-11-05T07:32:38-07:00tEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_2.png000066400000000000000000000006521512524704700226020ustar00rootroot00000000000000PNG  IHDR \gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDAT(c0 _{ۗom"w^i y DVV0`Qa00݂*v,dH2l`r/@O &Иd\: ke[dar"ˁo`V6y!)7 UgewC`%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_3.png000066400000000000000000000007331512524704700226030ustar00rootroot00000000000000PNG  IHDR#+ngAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDAT8cO0|j~n4m(–9`jf %` $5``1po(m?a#\ kڲK#B 3wLcqrjv-h^ l1j x 5K t('Eq.@9]@0\ +,:Aj^C9(jDa RQ5!%͏Eb%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_4.png000066400000000000000000000010201512524704700225720ustar00rootroot00000000000000PNG  IHDR+5MgAMA a cHRMz&u0`:pQ<bKGD̿tIME !KIDATHcO<`U;j_=@j3A&VI 2Īq1ngObg 0>;c@3YPԦ2 (|HjX $!e`{ 7*[%}dl~u*mAFe%D/$8>"qr < P.g[ #* "2,w߈ 6,ߌ6 6, htGՎPjG" ug#%tEXtdate:create2016-11-05T07:33:07-07:00A]7%tEXtdate:modify2016-11-05T07:33:07-07:000tEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_5.png000066400000000000000000000011131512524704700225760ustar00rootroot00000000000000PNG  IHDR3@|gAMA a cHRMz&u0`:pQ<bKGD̿tIME !v/h@IDATHcO:`3gTVe#ȫ\L(+R_p=Go`D\Ai L~9l+R4=PPߠ'&?] E F8@$ED LESkH{ sB3{~k\Ec&5p(z#LY1ezX?"@D"%^\d=i"6Ih/Dh6Td=HBWiD$t)z!b 虇Ewz3'3G%tEXtdate:create2016-11-05T07:33:17-07:00q]%tEXtdate:modify2016-11-05T07:33:17-07:00,tEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_6.png000066400000000000000000000011641512524704700226050ustar00rootroot00000000000000PNG  IHDR<JhrgAMA a cHRMz&u0`:pQ<bKGD̿tIME !iIDATXcO`Px4sQTx+wwjև ECTͰ~F5'JQͣG5l)G5j  5%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelm֮IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/109_7.png000066400000000000000000000012661512524704700226110ustar00rootroot00000000000000PNG  IHDRETlgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%WIDATXcO 0jʨ)2jʨ)T6T"#o=T%84|'>E5o11 ;d &À xBȅ&Ź)u w R밚rOS)IXd?`1V jߖ*0Ӕ%6ؕ2T!ci&d,,bBRo ŃRmXC) 0=Di!e>-Bl?@Dh݂y,nb il6dpD,$HG3SNE U74E-[&el+)h,!`NI` M9L+?v4#r MWz4RM&s]F)IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'JIDATc g*lʋabc``1/ CRcŐŁx )^ ΐ ᕓţ`6lD%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabeln7 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/110_2.png000066400000000000000000000005721512524704700225730ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IoIDAT(cУE-;A~300~ VÅv0@IGTۄI +&| 7 Lb*L "T  Z$B)\} %tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabeln7 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/110_3.png000066400000000000000000000006361512524704700225750ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDAT8c0^{7L|1X@@ZAp_Ȁl 9Z@pq?`Sׂ|2ΰ`-ėx'a;Ap0oaQ]OWC|*$%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabeln7 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/110_4.png000066400000000000000000000006741512524704700226000ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !DIDAT8c0~ρyJAc{7d1 ;Wd@ᨲG8P̏db`UENg =dgHCEN(TD ؀6e \!27‹h@ `[ipv";L4CZp}]jĤBբ[ tuGP=@U -EB%^AOIPC%+@W]]TfKǮn)ꖏUGEuF utv6 %tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabeln7 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/110_7.png000066400000000000000000000010401512524704700225670ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%WIDATHcO`U>|Trʕܘg-#0^$ДCe&7p2O8_fwq(ˀ h}Ū| vMV?b*ЌZa*#Np +O@2<5JN0$~cQ-]R(GO SR(_&\joЕITm`rS1c*MT^&׏|&v<ʷc*G L(NǣG*LQH9F ޡS<%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabeln7 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_0.png000066400000000000000000000005001512524704700225610ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  fp5IDATc X=|w2 /2(>9[J9TJ`%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_1.png000066400000000000000000000006141512524704700225700ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'IDATc [b87';Wu/C& A  5~1ԀxMib`$h6208|ؾҎdg3s"%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_2.png000066400000000000000000000007271512524704700225760ustar00rootroot00000000000000PNG  IHDR 7 jgAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDAT(c0 )\E|&E&xulq /~S```bd`{ o dVxA-e` 30XÜR 4a`p 200 20 00(U."v!,CS I7 ? JX&7lGȣXfq{cѽAhPSE%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_3.png000066400000000000000000000010261512524704700225700ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  :È IDAT8c 0#so۾Q7QdΨ0KR6d sEv4D򃥚2/@< އg 2 o X'05I]GV  ;?$ bG V ?g)B+9be"d _@/H?9 f5Lb,yY -pS7'S, %(a?'✽e8o=tpcIocDitd!UQO%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_4.png000066400000000000000000000011471512524704700225750ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !D\IDAT8c0w,d"<dEŀ ,Hd@Ry1`tx_Ax`[!o :&AP`73< ;<?4 uASt@ 2k9'QBU$$l_T `&j4ׁ促ڨ``AL?(``y0%XXnG~ X^|4X( `^%Bz$.@g?r|o4r]O\'4QhkB<w%}kbF!d`o ;C]ڬx V -F%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/111_5.png000066400000000000000000000012561512524704700225770ustar00rootroot00000000000000PNG  IHDR%@6gAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9IDATHcO `U5jP:-) f1.Uߧ26)0'~3`#h>0`lQT}6e x"6[pa|USb aZwACD8\X LUT`r@UPC{4xATB'BTM#x ^kq90]O8'DD+YeH|g(6BG] 5Dhx=FS .=4֠*uh<6C(}փz/P5*bg` G'"BA=Kh<˙ r%X72.S3lJU01`὘J_Z3`,Е>Pb*}&$*̉J%xbqm$pa70#>ýi"3L 9lVgÄbP|##T\7T}', & =)@eJa#-́J1+ s}fVz f,I*W VzPtҭHT2E)6]PIC0|Ģ`aJ`QK+6c'XXc*=$DixC+TJ/DsXAWfh4 lFUy 0`0#"Ôoqς+ZOJ3Ś/ ҈^Lj)ߖ! E{BmLl: UL;(I'Ƌc9ğzylêz$M;C)ԟ`NuR:9_9&nE&4r%NzW&_"` )äK`3 iA(>ߒ&(i4~")iI.L"¯wZ&.Fd%$eR/o.w1bO Hq3>qyH ߱ pDJ*í.&[an%FBp%J:թնRA2[T/:гwV緸[Yj@2g|'Ƞ૸C./9<+)f_"1lNvV?qR}룎 U_ ą}E ?L@ Kn3%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabelo@ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_0.png000066400000000000000000000005021512524704700225640ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fp7IDATc r@|-F mtRft Q#<"%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_1.png000066400000000000000000000005761512524704700226000ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'sIDATc g(g!Ysda`a`113IfPA- 2o'0OW 5%ӟu &' k n܂2"yx(bFО%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_2.png000066400000000000000000000007021512524704700225700ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDAT(cп f``|VH2Gus1@@9\+ Zbh``P 26<19 *t"ij.CT"򅑁*,4sD H7B@(&LA,nmw9pPBzEt%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_3.png000066400000000000000000000007731512524704700226010ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDAT8c04?b x:fX*qĻB3}J&X !Π{S bs=Z`λyů}a_2 # *.^3$\<dxL#73ބWx3s!2oT03>fH)!.{9^~7 EY;5OZ=~1PcjHE%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_4.png000066400000000000000000000010701512524704700225710ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !D-IDAT8c0~Se$ vIdYo 0=Đ-d@mhSzY^+ 溎, Bπsa_YPÚ MȲW8Y6`}G`Y) uC=mAȆ#˾#de߀eCȲY֯HH>Z$Y$k.,9ge#| X`l C?sF$YV4%ԠJ梥*F$ؿ)vLNv5fzXekLבeQYd~Y3\Lu4o%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_5.png000066400000000000000000000012011512524704700225660ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9vIDATHcO0*UBe% @E.AG(J>3A?%)6%O*L%oPA//tЉDB ( 0@U+>E^Me>+5)T MI ›/!BPhF P](JVDYO%3!nJP#XTYI=ZʁEYLASd(m$%hJ!§$)1GR;+X+r]FQ"(J+aTp9jL}xh!T#KH!FˣŰC :AM*,~e{ h*Ԟa(*_`dFdHEcQc ~A.!4`T@+OPG Y X_%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_6.png000066400000000000000000000013061512524704700225750ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHM(agyiiJK(J. ⦴Hrp0sV&PӬa={mg)P*<mZg\Z,$<(+l(cI}B.Hh0"I"{O'Ӌ?YNJ/9Wו?[•oၰD!4x?!ax\bl@l[7٤{}lbЏ@*X!@.c!ul'zRS`n`xzHv"HfnK7~r3xN1$Qh2gM+s3^R Rש~<)!p]ii`s5oluo? aҨP2V3~n @%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelpwIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/112_7.png000066400000000000000000000013761512524704700226050ustar00rootroot00000000000000PNG  IHDR/T71rgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%WIDATH?H[A :(JYA+JA"R!ЂJ,:(-!PDyXi m1N-Kr=ZEw{w{ f{wwBm u/geM-qր:~}QͲlXSp0!kb>{ľOrgc_3gzё?ٕvMz[|[em]T6ǕU zcŨ ]t/*D90}Po(X\{S/ шcC?BkIgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'wIDATc {'{˝Ļ+"Fگ7 Ӂ:y' 30\OL S\N1eNifNyI'..Pۡ\R;V%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_2.png000066400000000000000000000007021512524704700225710ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIDAT(cЋ%-so# }cc``` ` zoF0! '&_*t (2}f`PZ Re` UABP! PT(7`P@]?Yl`odɷla zH&9X&@pfj# AvEB%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_3.png000066400000000000000000000007721512524704700226010ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  :ÈIDAT8c0J{S O%| @p?*qQ  Kg*'evv]1B ZqBB\ u|uȿp 4K} 1"D=.. b=K̃K,%rA"Oi? :TɃ e$ׂ 6[sL%/g@ fA{ FQcn,Xt @"1;%a%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_4.png000066400000000000000000000010741512524704700225760ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !D1IDAT8c0osy /7+ (x3&ً$ʾ7g@KQdP>,[dUsNC]Yv|jdL0;d!n8B H &?$`)rF>3eo#dYȲ_d]H1ue[d-@L'HH5`v!Br;rX^ d`%8 |6Thχ"ERc>j{g$i-M3P),ƔE"~YKHB%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_5.png000066400000000000000000000011671512524704700226020ustar00rootroot00000000000000PNG  IHDR#@ґqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9lIDATHcO0UC/5fp\>3jNe_0ˑܵf@ 9+ʀb9ǀ87Ȁ4D$t2~RPGsKt5PDm 59d^F ~5 <#(jEQᕠ9&›o0Zz`CVc Q M O{JasiT5P@xPGQ CU!*&8ՂpBv(O=D({*Ll7-+ȯsZjPӵJA^%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_6.png000066400000000000000000000012761512524704700226040ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO$`U8pT! 5Kp+rb@ng@)\ƀ(\̀lPx:Mt-rѫ|xkYE>L Rc(<yVIt0OC]KT&dЋ̆S *<2}EWEBi$pTBcs S)\ UP EB?L( Ab<_a)3%hQ(o@W荦0GbDS PsEO=8ϐ`tC:DG(ym?!hoGP 2 RI LKZMeASXuP)m|@+]X2Ls~ǩ>}B40pT! Q &/6d%tEXtdate:create2016-11-05T07:33:27-07:00ZJ%tEXtdate:modify2016-11-05T07:33:27-07:00rtEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/113_7.png000066400000000000000000000013701512524704700226000ustar00rootroot00000000000000PNG  IHDR/T71rgAMA a cHRMz&u0`:pQ<bKGD̿tIME !%WIDATHK(DQ3Ƙ"I#F,XĬXBV$!IPBb!ư!d$w#|1YXn{ν ѽu?'U޿XHW;?h(wZ5}|zoA&R}"-D2qmAUJXWj6  |N#xOl>PSzzh"G)["}8ϼ/0{+}y ׸xѝ.Tt>}U䣁\l̎άzY^ً["PE%tEXtdate:create2016-11-05T07:33:37-07:00TZ%tEXtdate:modify2016-11-05T07:33:37-07:00 htEXtlabelqIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_0.png000066400000000000000000000004531512524704700225730ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  fp IDATc r]0 ;X#H c/kpA%tEXtdate:create2016-11-05T07:32:29-07:00J)%tEXtdate:modify2016-11-05T07:32:29-07:00^tEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_1.png000066400000000000000000000004751512524704700226000ustar00rootroot00000000000000PNG  IHDR MRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  '2IDATc d2KX!L% &׃ j+*$fR^bG%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_2.png000066400000000000000000000005241512524704700225740ustar00rootroot00000000000000PNG  IHDR 청HgAMA a cHRMz&u0`:pQ<bKGD̿tIME  0#]IIIDATc was-` U1 PhJ0S@ȼ((](T9`'Z4U%tEXtdate:create2016-11-05T07:32:48-07:00H%tEXtdate:modify2016-11-05T07:32:48-07:00FtEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_3.png000066400000000000000000000005531512524704700225770ustar00rootroot00000000000000PNG  IHDR+T%WgAMA a cHRMz&u0`:pQ<bKGD̿tIME  :È`IDAT(cm]_3K iLd HZXO! a X?lt~ |Bx?,nI}=L*V%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_4.png000066400000000000000000000006071512524704700226000ustar00rootroot00000000000000PNG  IHDR5dBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !D|IDAT8c0 #x4\i&'n17*4bH*+b  L j ;}d(N"8=,b$ZP F Dl%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_5.png000066400000000000000000000006351512524704700226020ustar00rootroot00000000000000PNG  IHDR@%gAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9IDAT8c0J ` X3dQ$` (:N%v#Kd#KȀy3Mf(@v'D?T$>.QEBqHH%QKD).Q $" ~p%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelr# [IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/114_6.png000066400000000000000000000006711512524704700226030ustar00rootroot00000000000000PNG  IHDRJompgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHc0ʍʑ+y<jЬ P $7]?Geb8*W$eE In986fa`*B3̎] n*S>FBF%_FC %zPwB{`( z"AbO5f,nnȀlbrIدXx10No Db;R|4uɽA6':L!-%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabelsT IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/115_4.png000066400000000000000000000010501512524704700225720ustar00rootroot00000000000000PNG  IHDR5~.tgAMA a cHRMz&u0`:pQ<bKGD̿tIME !DIDAT8c0J\hU>벫$`B{8; Hb@ \6:t,Kl@K"\ 鏉`J1%^ec< NYP%Z%H>"gs%eaPh~6D#IČ5`-`a!bA% Qp, d C!/`V"7 q$/(Qξ 'L#&^ݑ(%Sx%}%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelsT IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/115_5.png000066400000000000000000000011521512524704700225760ustar00rootroot00000000000000PNG  IHDR@,*HgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9_IDAT8c0J<_kE伛KeF(Uc#4|# T䂲^$[ ~Թשc Ldd,, sb>$=rJCz|&~i߰ |i0$!A!ZE : IgZ|>HDf\N6˲L&׋Чf^bM` Z $_ndL]!n,$ Q+&,<~J&C<|oJ!+`>ЃÈlFM.Rؾ+BZemȴec`um<ٸ`3Βkȯ%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelsT IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/115_6.png000066400000000000000000000012571512524704700226050ustar00rootroot00000000000000PNG  IHDR J~ gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO0*U0h\l-{ ]B1T Yl IyMpAo/i[ \ LzÿvsMx ufeH UPZdo.*%Bt L·pŅ538̀y9"V?g2->lkcD^h}OFZ$z5EE9 c*h̰)a0+?zV!(0)\A$W68! b ~A= a QFv5aFX@]ǖC<y{H:]!jz8ρ,7/F.NS)d4*U `uMe{%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabelsT IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/115_7.png000066400000000000000000000013761512524704700226100ustar00rootroot00000000000000PNG  IHDR&T˼gAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒIDATHcO`U6lT9vVW/( ʮ`Wâl4:`_ 6UvU E(6PtI_5uD7"Dp* 50Up \$ B-)$#b%NaSv?xׂ3=Gw^1$u 8QR6e Gz,?vH17U]aLeAS}O3FU2 P.2`ZW2 W7 `ʜ) Q:.fdekUE~(k){Pv@QU A$#H$ s,vR <<0jBĞ}Zpsם=8 ˀ (%"?+pcb`4L]-- |6lTj;%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabelsT IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_0.png000066400000000000000000000004531512524704700225750ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  D IDATc {) CE8!v%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_1.png000066400000000000000000000004721512524704700225770ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  '/IDATc HA̱33@g| *& ~$tY%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_2.png000066400000000000000000000005001512524704700225700ustar00rootroot00000000000000PNG  IHDR pvgAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZy5IDATcafff``N>'18y*@z6U6p#\K/ǰJ%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_3.png000066400000000000000000000005041512524704700225750ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME  :È9IDAT(c o^ķf``(8ÿ ;{OC>E+E%%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_4.png000066400000000000000000000005041512524704700225760ustar00rootroot00000000000000PNG  IHDR5i gAMA a cHRMz&u0`:pQ<bKGD̿tIME !D9IDAT(c t{=lAfA/d΀ nS$fmm1OG* Bgg%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_5.png000066400000000000000000000005101512524704700225740ustar00rootroot00000000000000PNG  IHDR@?cgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9=IDAT8c 0 r`U:f|Zpkè耉QQj#? o%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_6.png000066400000000000000000000005141512524704700226010ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD̿tIME !AIDAT8c 0 Oh0”QgQbeA"#́H3`"36*3*C 0*3TeȄ^gW %tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/116_7.png000066400000000000000000000005211512524704700226000ustar00rootroot00000000000000PNG  IHDRTmIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒFIDATHc0J"$oX$ ܙ z0H0"̀ hEA𣒣#R+$BY~ %tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabeltn~nIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_0.png000066400000000000000000000004721512524704700225770ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  D/IDATc b7#NkpR#@u"XAi)+@(Y%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_1.png000066400000000000000000000005361512524704700226010ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'SIDATc sf dr@<7v/ KfHx+6>6ی!%:##ĤC%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_2.png000066400000000000000000000006021512524704700225740ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZywIDAT(csv;<݅@BOQ4@B_B B `K30tA<B RA"uuq/o2 hH(CΏC:g;b`/ȕ%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_3.png000066400000000000000000000006451512524704700226040ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;IDAT8c0J/ n.+6L` K . $,1K,I|K1o$H ~bk`h{i_;vR_:%Q (` <?9#7#E9PQ&ǃhqdY>9] J PHe%tEXtdate:create2016-11-05T07:32:58-07:00H%tEXtdate:modify2016-11-05T07:32:58-07:00a8tEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_4.png000066400000000000000000000007261512524704700226050ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !DIDAT8c0uY} F`lDV${,"k b!^ˮȆ؆H m 6GlHDt\дK|0W6X&DYX:x?UR$#GT8VHCypϟ C%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_5.png000066400000000000000000000010031512524704700225730ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9IDATHcO0*UB%`>0?X~T0Rr gx@\(,QE?F`Df0EK %u<%!JAăyLOA* !#2U g WDu;gOY優a`䗗`qÕogD? )E۾HJP!~?3"+/{]Of&Ks2𩸔lU2JtǸBģ%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_6.png000066400000000000000000000010451512524704700226020ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO$`U8pT!3@stkQ /P)2*'e*| ". SUx ]D\pTtBq?KPMh@k` s@L.*\S*rU+T>\a;T7-0pOb^C {AOM}Ƈ* Iy8`UFp*#)_€ H|BQU:MQ]%h `Tr?/%r*Ӟ?6^/+o8zǨQ 8a"%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/117_7.png000066400000000000000000000011101512524704700225740ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&Β=IDATHcO`U>|Tr+gm,P9G*(_ W@Mpǰ|\TD8spP!,aʟ;BE`*ʱ+πpSTJ?\d շaRQ`bjʛaR‚a)r0abQ$T#) e:{@.\{S# pAg(70 E}5qd(3"є?ŧ|4VCJ83ǢBVlMǪ'5hj9v' z,WMa.&V^imXR G*9O ω+J%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabeluiNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_0.png000066400000000000000000000004731512524704700226010ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  D0IDATc E?_2( yU<<9N"%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_1.png000066400000000000000000000005611512524704700226000ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'fIDATc (ȰİaH509XxĐW -/ˣ a JX"x/8Rz,NG%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_2.png000066400000000000000000000006721512524704700226040ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyIDAT(c XO-H> WAjDC,޷; @L]J @70L f`@%#fL *ʐB%FT200pD;+FbbA%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_3.png000066400000000000000000000007661512524704700226110ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;IDAT8c0J/q5 n7X3%&a}KYtA)2J٢_{AqpWYy`^7%.edUڑa@Z3Vߧ]tM"_i"2C&lE +UOhk@܌t?bJ0P\Z h>`{6z#1!2  $x?$%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_4.png000066400000000000000000000011001512524704700225710ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !D5IDAT8c0J3: @ I HH7XrI,2b_ G\(H@7`s @? \U w|}P٣`~+`Ϳ=$t,ę,@B`ZSJM~~3Q4H?ԗrPO@Q]$ %#@A@4&vfD|wؒK&&cOL(Z#XzRqIccx$_ үU?^ԐVY%tEXtdate:create2016-11-05T07:33:08-07:00-%tEXtdate:modify2016-11-05T07:33:08-07:00ΕbtEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_5.png000066400000000000000000000012151512524704700226010ustar00rootroot00000000000000PNG  IHDR$@WgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9IDATHcO`U4hp*bTE_D<'(6Cìk+QA7}!3B8 k`O`HAPѷhD`r8 2ކ5Esb`?TE¼;L`j{ s !1XB/"+1S Nz s:KRK9Y` l);8V`ɺ^`z$߃ Ye`ǡ+HZD  ,S=?O*Z`5*)V4?^E7@@EsPtA7!EC'/Š_OF"|4.Ut%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_6.png000066400000000000000000000012521512524704700226030ustar00rootroot00000000000000PNG  IHDR)Jk"gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATH9KA&B F4DA--*X X+xDQ<""X JPDgJSdߏ2̕te5ep uh =,sAkN.gy-jLKF}Pz^jxIp@s3TrFb:zuw4Ni<%. Z2o7,Sò &+VW䱉EjHS7"Ka2_i7J$O N,$]6q ֤#:/((mdɸ 62le^`+I0 $wvp% !CBXd6K !. [AS EVÕo($lL%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/118_7.png000066400000000000000000000013561512524704700226110ustar00rootroot00000000000000PNG  IHDR0T gAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒIDATX+Caø+IH)"JRK-?Iu\ .Ȕ …X_Mq[)Y6J@Q8 SgH~V/BF xX`F]ju2p] PZlL<f%36^IhjDX&lWDBK`Xy4`,L=e*Mԁ3|I~EJ[}i/|Z< `NYtfic‹.H/e /U!W6x; Z%$󅏓@|b gxUpP>Hʷ:^o RnbPſ4Y@ x3rL56@?4]%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabelv$`BIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_0.png000066400000000000000000000005161512524704700226000ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  DCIDATc hQ}9 u{LoV]tOywth۹Cpa3+^sq%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_1.png000066400000000000000000000006601512524704700226010ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  'IDATc h$+/C" "@b؞EHwn3T?a 0| o-e V0T0igٻ܁ ` spXqI30= $7Xjظ D8,MCe0i%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_2.png000066400000000000000000000010461512524704700226010ustar00rootroot00000000000000PNG  IHDR \gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyIDAT(c0$}hr6BWC810XC_Ul11600=** b20Brrj bsC k``a?߲ y?da`5C "ї3@k10^FS>1TPlЉ׀@94``4|z n0cI4zw178Z:hp00a-@@b@\5Cm@18HtF@`cIMbۉm`=U A &% EtAak؁X ' Y,j00XA?E a&XQVctU9}ь8}"<`Tq@V@ȓ%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_4.png000066400000000000000000000013671512524704700226110ustar00rootroot00000000000000PNG  IHDR,56VgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! eCLIDATHcO`UUrU2 V!jt|@:/3auZ{4#vO) e;6 8HFY0e#̃BDģsO ;HŒASXPE:#@.7)9XZr#cS9X;½]_uS#!g؅ ]pn+{J :[oR\$n6(q*)+{?n4ħ)&Fu%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_5.png000066400000000000000000000015661512524704700226130ustar00rootroot00000000000000PNG  IHDR5@b`gAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9kIDATH]HQ"(AbR* /E>d>b`RD"DDHBP"AQdQ&)Yn3sg8;>܇;~|s!<˳tQ{(h=g4A 9YPNɾWty*;n!r lhù0n3`<GڃJ٭'tϐ6e/2BR@9J5OnWA At܊IVBLBr s;$Z-Ě;ipѰr4"nhMkV2:Oݭ(lEj~DTt6 *S[{8qxg_s;wn%tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_6.png000066400000000000000000000017211512524704700226050ustar00rootroot00000000000000PNG  IHDR=J-.LgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATXOHQ߶i E< ֢]*Ȋu$,e2c@y PAI \ qvߝ /}3ye0|۷}۷/o&Ӣ=6V{i}`cet;#X/j!}!9"#rJ]BT2OribA.O{~nf qd[=@wM64i޵t'zp~Lҍz:YhDcD%ɆjX^!lvZof==?d6?.* ;p!8Dx=/ &bvjwQ,ys_DTa`ff\U /+Jmm3NzxT-܁r9,JӼ)x7'~GR(~ eqG*pytR΁>*cp ^bn, kue;8K5 <v 4G:^ᱴ o«zHk]<| > fSezo4j\G mZ.- N.:JW6R.o]lUF..6O4]D,7OakՆKjR{2pRx;p-UX'{bQ8fK=`[ TG{|!۾/h3%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/119_7.png000066400000000000000000000020761512524704700226120ustar00rootroot00000000000000PNG  IHDRGTugAMA a cHRMz&u0`:pQ<bKGD̿tIME !&Β3IDATXkHQǯ>+2,BHI J"3K%T=( E h)KFPkP2TR2IMC|v3sl`\+ f{l=r0,OER#ˬ\LP,zC8B\:cwMӌj2xX0Ci'`|fHհ_ K@n4e$ҸL('L-[0lAݴaoü+GL*WF(KpE8Z@4 @<͘H -k7EiG~zj1vI'.Ni?ƯTM{)%o |_^q{3n0LJbjaY au,>YURy1'%GZx|TG㉟+|EZv8z,kuz{êgbOGٓV=pK7&8u7&1A#WjO|h%ˆP4OYa{l=/ "v%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabelwSg/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_0.png000066400000000000000000000004741512524704700225730ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  D1IDATc j"s4ğo8 j"kU~U;L{-%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_1.png000066400000000000000000000005741512524704700225750ustar00rootroot00000000000000PNG  IHDR S"gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SqIDATc(t1H?׊!RG`e0̇ 3 =g C^0`A+_z @PQP9ڌò%tEXtdate:create2016-11-05T07:32:39-07:00pJ%tEXtdate:modify2016-11-05T07:32:39-07:00 tEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_2.png000066400000000000000000000007041512524704700225710ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyIDAT(c?@`U׹Z2'Ps.?nVO= x:#|B(b9$B h'?O 1p~Sݵ*t z{`o 4d` Jg`|pfD{V%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_3.png000066400000000000000000000007751512524704700226020ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;IDAT8c0J q!{&dr6%,;0~y % vcÑ- 1,HʽGqUPH~/`:w2@A A~a.E`6HϿutbHΣI7g"@,@2YP mH@i_%p> 䉽zg@%@-e\l`=@v&-O<_tK>CJN\Za-p%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_4.png000066400000000000000000000011041512524704700225660ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! eCL9IDAT8c0A\*U0݌ 7  D{nYVO@ؤHjC Fv_[٧'bIE>֏9 q l vAFh!^ /b ɽP\tU6"kn]XduUH[DD]ֱ L>~osnC#l?X2Ĝ f!^(~ xe-;x. ?VUW\a KO~VeD}J[qQ$H%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_5.png000066400000000000000000000011721512524704700225740ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&9oIDATHcO0*U2J Q:7)@Lk1!E>5E0%~[^Bu0AUU/>ʀ)!hk!*04+XB/U0Cfе@4wuAXB XJLb| p{-`9+CaJp ׸2KI¹K+9„P"J`IN0dUySF7B`t%Du9ДAKAd U2"yL!JB?!u%B%0%!"aJA$]70!NZ-ü"QR̃љhU29I %tEXtdate:create2016-11-05T07:33:18-07:00{9-@%tEXtdate:modify2016-11-05T07:33:18-07:00 dtEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_6.png000066400000000000000000000013021512524704700225700ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO$`U8pT!r` ?*0@@&'PiHVf1E(Fqc%TTn, PPMh|.#xAe?x< KsC, QʝAXcfTV9hhYмpW58)$' ~G:hP+La MO}^ (k}v {\ ϳAT@UFPCjI3ʕ :e =Ŧp/#D*]a*|/ lr^C91 1-P5t+2\ P/TOъJP*\paX|YD,Q SPxKЅ 2g1ʰ * ڠb/! 2CxZcU鋞Q Bkz\*.%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabelx2EIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/120_7.png000066400000000000000000000014051512524704700225750ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒIDATHK(DQo3a&ȳ&yll,'RJY` ïsTN[j<0>{&4JB8 |ΈN6dIGE @CY%|Y>oq7SLhq[1}ťkEqH_2,~Ǚxn?!}i)C\ʶvT-jGSérTp]sNE~:}r VuBO `~$$P6̞ |"d7C] ݂ZOk0#`n6G%A\/ɫ`g#B!H "bG +1+[Fo1 @,;Ȳ@b1@[n+5Ab<_o1n>cCDG`0H4b֫費A]@T(FS%̴XRkv(8-]EA%}CPSXe `b8]H?v`q$cֺ,X;p$cֺ,X첐|1HRvY˚$ r+TZ%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelyIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/121_5.png000066400000000000000000000013261512524704700225760ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !! 6IDATHO(am?i$"9:99,_Ie` Q\)qUJN.(Jqp˶&3?qq}y~}fȐ I3Zvo ~A7G8Fj$@ga(.MU|4&'%>Kxh޵=%qBۗk#u1,-uXt9vw[E<,~M !d*Z٤w4p ^mNv=s0Br;Cw 2QE*kʉ)%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelyIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/121_6.png000066400000000000000000000014361512524704700226010ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATH]HSa9u)2J3PZ^B aUtE "(FA!Cb ]FD*#EC"Qx1 T#L{b{;;ٞtf{<=elYYgHR5Z1ņES7!4IA;wkzrjy4\C ] ș8Ůt7%QaPjH?i H!Jtw4RuPz8%@򢧄xE;|nEv[|$K 6Iα!xx]_+x.o]- ~$e #^'*ONLVن2J<%tEXtdate:create2016-11-05T07:33:28-07:00*%tEXtdate:modify2016-11-05T07:33:28-07:00tEXtlabelyIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/121_7.png000066400000000000000000000015421512524704700226000ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒWIDATHOHaTdiY$졢EEBy萡VR^,:ԡB A菇CHAHb:Oȼ73;n]v Lk%ȋ78-MOȪѓ 1+>ZΨ9c`>FI7 7g,w9d})EɀŻV8~b}3Q}8==1Keg6;7lZUg*x9Ms9do:XI2z?Kd 96:Om7";RzM}ؿڽn=MS#8YgcޛBǁ֙Fj!Z7En<_J=SD%wPJn5y܄޺|zU.ϴyb8[zD,x7b[kt҉h5 >D]Ptꌆqx;>,]>( 8A\|T-%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_1.png000066400000000000000000000005401512524704700225700ustar00rootroot00000000000000PNG  IHDR S"gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SUIDATc(3@|'A63H1L`qf'96= a l@|UZM/  h%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_2.png000066400000000000000000000006041512524704700225720ustar00rootroot00000000000000PNG  IHDR  .gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyyIDAT(co`9.VGl >ɏ מ!V"'Y" C%!``ກ쯇 PkbH3 6 "]B%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_3.png000066400000000000000000000006451512524704700226000ustar00rootroot00000000000000PNG  IHDR+Y;'gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;IDAT(c0 c^̀ c^UuԐ0 ~nf(.t `8( ]xP4wnq} (j](u =L3geF&E *~.8,{ll9 D:,۲=%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_4.png000066400000000000000000000007121512524704700225740ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! eCLIDAT8c0 9oLqp&Wf )ܹI,` Iq``  )Xa I`b ``& )ї&'Y@RHJ=6l.lib'#H.K1<8 ǒ`C1{"S+n}u~yA'%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_5.png000066400000000000000000000007641512524704700226040ustar00rootroot00000000000000PNG  IHDR!@'ALgAMA a cHRMz&u0`:pQ<bKGD̿tIME !! 6IDATHcO0UAspT'd,Bxr(b1Na+d D_Tc 0]Q3_HébV17 g>+K?Gƽ8G?ĎT\+PKoSqhQ3,`?pVr?.;Rq(~RIb2*x W#?@@kqB*X@HB`THV\3T%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/122_6.png000066400000000000000000000010351512524704700225750ustar00rootroot00000000000000PNG  IHDR%JWPgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO `U5jTnU ebTu'B_"TqOPOx(/TF-s}^"їxT_lwAK!QSjN|(?UG QY ϦD+ޮOp9(ֈcPמ(ֈ%(ֈAFJF`88jd kFlY:;ֈ 8؀j_ü/t i%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabelz-SiIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_0.png000066400000000000000000000005001512524704700225640ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9x5IDATc WmQ2< 9s % $*GT!}5*#BF%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_1.png000066400000000000000000000006001512524704700225660ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%uIDATc @9W%ʮf`+mX3,gXpWfjڕ`x L?)psl6ˑang`*$s3_ȿ ]R%tEXtdate:create2016-11-05T07:32:45-07:00)%tEXtdate:modify2016-11-05T07:32:45-07:00̑ftEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_2.png000066400000000000000000000006761512524704700226040ustar00rootroot00000000000000PNG  IHDR G}ugAMA a cHRMz&u0`:pQ<bKGD̿tIME  79ܦIDATc ? 11?@_  w20$#Є̏g`h'Ё``%/24)g? }YAO o0 Efd~ C2A 1ɬp8M ml&h9%tEXtdate:create2016-11-05T07:32:55-07:00qf)D%tEXtdate:modify2016-11-05T07:32:55-07:00;tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_3.png000066400000000000000000000007671512524704700226060ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !lgIDAT(c }wV`dX$vY j3` H%Auؑ joP-@h,bT%ܒ&f@ ["v( .,ӚYpS]Ltw 1+Hq$F_TL<*G(օ0X.XP, MK, M(f&6(Ul+æD{&Fň!ά $D5cK%tEXtdate:create2016-11-05T07:33:05-07:00DL%tEXtdate:modify2016-11-05T07:33:05-07:00tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_4.png000066400000000000000000000010471512524704700225770ustar00rootroot00000000000000PNG  IHDR5~.tgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! UyIDAT8c0!gE@spI$H$ܰH؃$1%C C%8p#KH$X?txj>*?6 c7i 2 o%?Hׂ$ , o,cEbH‚*qNm Z2LP܇)q,{C'$;-G$El%.FD* ]|0uC%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_5.png000066400000000000000000000011311512524704700225720ustar00rootroot00000000000000PNG  IHDR@#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !оNIDAT8+DQzLb?lllԔXd4++YHGD)AR~ %+KV &uSz vk@=}/--.scǵFW5tv h QYVAs>֠ymr&#z[v¶ }?8!u SxY@;7Gʾ綇Sv &~aGy6A]fAw z'^֩j}zBK[^ߨ|SȄJN/tD/,MaRUXOk /n%)j,-se%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_6.png000066400000000000000000000012251512524704700225770ustar00rootroot00000000000000PNG  IHDR!Jb*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !" IDATHO(Q[ݓOQkS)\\v/Anprكl({mvwV5GLy]-{}\ğR4iڣZ|BFSIzXw/q"81^h9y,0n;GP_~Yr(vIq"E 4+ qŊ3ϤGabA?ORzكJޜCElЪը.fJ&‹Y7鍿9!D)n.6mr؈"X$l+g6B hRTb_A k|!E E m{V\EO(b0QRćt(K.ŷkeS0 sup[M( H˕%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/123_7.png000066400000000000000000000013101512524704700225730ustar00rootroot00000000000000PNG  IHDR&T˼gAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ IDATH+ak iܦDpANv9P+F[(sYVA}?Zg0syϫ<Ij=}]-c̩rLelQXNbaFb*K;M Mol(+`ChO/͡,leY`(;`'(vS`q{@ٸupaQEm3i"j$>]&\q E^YWe7 5SXBr~̪ h1y|WRsCY&N)X@YVMLyY(K;G3cO6J[eY99`sk(QR/!,&;LhHeh268H/nS O Uf. oBK[a%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel{ZcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_0.png000066400000000000000000000004361512524704700225750ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9xIDATc?I"v ~%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_1.png000066400000000000000000000004401512524704700225710ustar00rootroot00000000000000PNG  IHDRqgAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%IDATcW _/}%tEXtdate:create2016-11-05T07:32:45-07:00)%tEXtdate:modify2016-11-05T07:32:45-07:00̑ftEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_2.png000066400000000000000000000004371512524704700226000ustar00rootroot00000000000000PNG  IHDR =gAMA a cHRMz&u0`:pQ<bKGD̿tIME  79ܦIDATcg #ElQ%tEXtdate:create2016-11-05T07:32:55-07:00qf)D%tEXtdate:modify2016-11-05T07:32:55-07:00;tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_3.png000066400000000000000000000004461512524704700226010ustar00rootroot00000000000000PNG  IHDR +gAMA a cHRMz&u0`:pQ<bKGD̿tIME !lgIDATc误_GY'%tEXtdate:create2016-11-05T07:33:05-07:00DL%tEXtdate:modify2016-11-05T07:33:05-07:00tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_4.png000066400000000000000000000004451512524704700226010ustar00rootroot00000000000000PNG  IHDR 5N'gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! UyIDAT(cPg``H Q6gS^ q%tEXtdate:create2016-11-05T07:33:15-07:00L%tEXtdate:modify2016-11-05T07:33:15-07:00k<tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_5.png000066400000000000000000000004531512524704700226010ustar00rootroot00000000000000PNG  IHDR @FgAMA a cHRMz&u0`:pQ<bKGD̿tIME !о IDAT(c^{C>TFyQN%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/124_7.png000066400000000000000000000004551512524704700226050ustar00rootroot00000000000000PNG  IHDRTAgAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ "IDAT8c_=,"M>W>>%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel|ĵ\IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/125_0.png000066400000000000000000000004761512524704700226020ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9x3IDATc ;AS `YdfuяE! D.X=]#Xvn!W ]CV,"X)XlX5vمN.I*Д׼dAtmL!? :qnm'َee*$政,Jd] yՔ%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel}IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/125_6.png000066400000000000000000000012171512524704700226020ustar00rootroot00000000000000PNG  IHDR!Jb*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !" IDATHO(aVӖh;Er`7)7WH9a%9r9Hю+F)Qi~=?weJy吏y}I!O@q8w})I($T) 'rxlD3YL dd""E- E(8P|Tr ;寸@RoW,0{Er1uoTE=TW]`MdƭW/23`o9qtm1 Q~H`V~}scS^\=_% )f wrh{gB1w5x4g@ 5hL Q`K] )V,OU>|TQhו)`%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel~*pIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/126_6.png000066400000000000000000000010141512524704700225760ustar00rootroot00000000000000PNG  IHDR6JgAMA a cHRMz&u0`:pQ<bKGD̿tIME !" IDATHcO`6mTۨQmCXǨ~*tUcV/Y ב`@V۾tVd@v 0ZM|AHa4&0`rv6\ 8a_a 3-4pƗΘ+TUOb2ytMHwHIt rT2stnLO4dgh؍5Mj6mTۨQmSwc:%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel~*pIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/126_7.png000066400000000000000000000010611512524704700226010ustar00rootroot00000000000000PNG  IHDR>TۤgAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ &IDATXcO`>}TQGҶk)/r]yU}˖=ƧM"C;?b5!ĀS & S-O-B 8UdC$c 'x џW a18Q0ҍ`hp.`ū;4K!E4C:vX"z9#fCȉ`'lf?^‹i~?ЌRxxCFd8?`QGj>}T!qj#%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel~*pIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_0.png000066400000000000000000000004301512524704700225050ustar00rootroot00000000000000PNG  IHDR t);gAMA a cHRMz&u0`:pQ<bKGD݊tIME  $9x IDATcÀw2 .Q%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_1.png000066400000000000000000000004311512524704700225070ustar00rootroot00000000000000PNG  IHDR ]0gAMA a cHRMz&u0`:pQ<bKGD݊tIME  .RtfIDATc2&ka'H%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_2.png000066400000000000000000000004311512524704700225100ustar00rootroot00000000000000PNG  IHDR  t:gAMA a cHRMz&u0`:pQ<bKGD݊tIME  79ܦIDATc5?a}%tEXtdate:create2016-11-05T07:32:55-07:00qf)D%tEXtdate:modify2016-11-05T07:32:55-07:00;tEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_3.png000066400000000000000000000004321512524704700225120ustar00rootroot00000000000000PNG  IHDR+]gAMA a cHRMz&u0`:pQ<bKGD݊tIME !lgIDATc&uy/%tEXtdate:create2016-11-05T07:33:05-07:00DL%tEXtdate:modify2016-11-05T07:33:05-07:00tEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_4.png000066400000000000000000000004321512524704700225130ustar00rootroot00000000000000PNG  IHDR55M0gAMA a cHRMz&u0`:pQ<bKGD݊tIME ! UyIDATc0-ˎX,%tEXtdate:create2016-11-05T07:33:15-07:00L%tEXtdate:modify2016-11-05T07:33:15-07:00k<tEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_5.png000066400000000000000000000004351512524704700225170ustar00rootroot00000000000000PNG  IHDR@1gAMA a cHRMz&u0`:pQ<bKGD݊tIME !x(IDATcQ\A%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_6.png000066400000000000000000000004351512524704700225200ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD݊tIME !#9IDATc?Q~&?ׯ%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/32_7.png000066400000000000000000000004351512524704700225210ustar00rootroot00000000000000PNG  IHDR$TYgAMA a cHRMz&u0`:pQ<bKGD݊tIME !-Y@IDATcFF靀LY%tEXtdate:create2016-11-05T07:33:45-07:00RB%tEXtdate:modify2016-11-05T07:33:45-07:00#SXtEXtlabel hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_0.png000066400000000000000000000004541512524704700225140ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  !Ii!IDATcp?6 r%Y@ #.8%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_1.png000066400000000000000000000004711512524704700225140ustar00rootroot00000000000000PNG  IHDRqgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?.IDATc Szen~Ho) 10Rt!bPr%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_2.png000066400000000000000000000005041512524704700225120ustar00rootroot00000000000000PNG  IHDR  f gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$09IDATc F ܩ8?jAm@F=7bG*yj%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_3.png000066400000000000000000000005231512524704700225140ustar00rootroot00000000000000PNG  IHDR +k-gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)HIDAT(c ~~!lخOP?ŧ\v PSea1s`DR %tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_4.png000066400000000000000000000005341512524704700225170ustar00rootroot00000000000000PNG  IHDR 5>`gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )QIDAT(c Py@ ~7xA`;+ lJ Cy ` f +<4e%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_5.png000066400000000000000000000005551512524704700225230ustar00rootroot00000000000000PNG  IHDR@r/gAMA a cHRMz&u0`:pQ<bKGD̿tIME !qBbIDAT(c ~?'d9ŕ@_d0n& po]qL``P53< \yj`j}#tkKTo%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_6.png000066400000000000000000000005761512524704700225270ustar00rootroot00000000000000PNG  IHDRJxOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !EsIDAT8c0E3@ȨȰyuNE"O.Y\Ng%]k"PD" ga"J00"?>{~6QD9N @`%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/33_7.png000066400000000000000000000006111512524704700225160ustar00rootroot00000000000000PNG  IHDRTHgAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-Є~IDAT8c0`t  NfQQQa$ IԸE( I$A b ;\!( Gw!1A !"&rG[]YR "vvvS#M%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabel!oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_0.png000066400000000000000000000004471512524704700225170ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiIDATc ݋7=G3@ĉ  2)//W%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_1.png000066400000000000000000000004541512524704700225160ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?!IDATc 3dɸm< }@Mk%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_2.png000066400000000000000000000004611512524704700225150ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0&IDATc 3(ӊ}al&4ٕ%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_3.png000066400000000000000000000004741512524704700225220ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)1IDAT(c wWիW30((H7/%tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_4.png000066400000000000000000000004761512524704700225250ustar00rootroot00000000000000PNG  IHDR5Ώ;gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )3IDAT8c {}@C *V dXQ1%<*6*6b]vY^%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_5.png000066400000000000000000000005001512524704700225120ustar00rootroot00000000000000PNG  IHDR@; gAMA a cHRMz&u0`:pQ<bKGD̿tIME !qB5IDAT8c0PE8YA 7p:lփΣ4e 09(wg+=%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_6.png000066400000000000000000000005001512524704700225130ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD̿tIME !E5IDAT8c 0K&Wؾ&Hƛ~8*3*3*3*Cg0N- |%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/34_7.png000066400000000000000000000005121512524704700225170ustar00rootroot00000000000000PNG  IHDRT[cwgAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-Є?IDATHc0 lU89;sYcps& GeGeR_r,c O5 %tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabel"HfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_0.png000066400000000000000000000005221512524704700225120ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiGIDATc @|W w/Xt< 'Xtw' ;b¼ @d~؜0;>0$s1ZX%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_1.png000066400000000000000000000006751512524704700225240ustar00rootroot00000000000000PNG  IHDR4gAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8IDATc 6# MbhC+!+4^`pߋa/L(^Aޞ>,`2p Av&aBLbZ,Πl GHB ?bC <?1`2p+w%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_2.png000066400000000000000000000010271512524704700225150ustar00rootroot00000000000000PNG  IHDR )7gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0 IDAT(Ւ?(70]2.)?%%np $HJg`ҍ )-tYRnA.J7|3ߨV~:PWӤfizN5RL km0N|3{X/%t #O"*LX3Xvo w>3d5-Ú1ulb6RIu0 G`BBAXljG=_O 5XWޅrMf++žWPS6LgyV%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_3.png000066400000000000000000000011741512524704700225210ustar00rootroot00000000000000PNG  IHDR!+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)qIDAT8+DQƟ,WR6&5%5 e&YXXL4 R d#&4isN%e]9=t8?~i䚁`rq &pDfLrJ`p5@[Т〝K;9ƭ@p@@^5²f@?XO[84ƅPn . LݥmMcf}@çPPUSvN \Z>ܸ%yL­zڅF1Ʈ[a?#nttfg0I16;BR1$2nv^.G1mv$D "<(;CO2%4,F(6Fl|4)d!jY`A|5s.Σlnu0c9ޥ !ܲb0KN^q`NN3!N#Yqa0U}<.be; ƭyϬ:"[T^XbSf⣃խz3t `Q|܁T wǾSq7Uq']`d6 gF!nP\ ~oRWT N9/6Uiem8UV̈́+Sh:c/R:kpq@r* W67!VQ,ڇU .|qƪQڎ1!yU7Xhw3Z$qI^|kWW"%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_5.png000066400000000000000000000014761512524704700225300ustar00rootroot00000000000000PNG  IHDR2@}{gAMA a cHRMz&u0`:pQ<bKGD̿tIME !K3IDATHMHTQǟ&?(̅BHc… Bl.Z C0x&s[E)"$k wN0DG7B(A})3rG3U7<ǜL1sw9NgVWe=]iЙ !z/3(tG(eQW^"4ŰCPdj)tna +k EtyiFU(Tez?+"H֕v(aU@Q?*k4I#*t4)d+zf+m5KU[_qm/e9"bP WٿȨBǠϺbkFW)'7A]YAPİˬ$RpJr {_c+_ʊE*^}=dVn1ҶVLj7ʄ|6H@u_ e!kTe'BUB}PBυו& |aXߨ 2 僮 #2AaTW(_.*))cuel"Aѕ C3]]V"lbAѕթr_]Zm~%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/35_6.png000066400000000000000000000016111512524704700225200ustar00rootroot00000000000000PNG  IHDR9J$ţ6gAMA a cHRMz&u0`:pQ<bKGD̿tIME !E~IDATXMHTQǟ̠҇FFTiQ*((!E- 2 L .FAj!} AYh#jB"X87Q,`WV%nN7%70703;ɯ/k>7~1 k\la~w-nVFRv=\Gɕ x~o1e#✏‡U-'jm\Q1Y Gԡ^DJ긄ISLAnW(ȤH$]qǶM k61TlH6[yg:Z+|{I֎{W=gr _#;ZGdΰqd%X~ ̤ ִԡ-"R NbG5d!2;8ZJ글{D7AUnnf6r"(Nӱ]oc㾵ܰ -|sZQ?᪷LvN.L}yTR8i Oq2OEVKǘ Qk{gx cvYNv\Ǥ'x@bz*ah;FTyGޑwyR_lGh%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabel#?a9IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_0.png000066400000000000000000000005301512524704700225120ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiMIDATcm(J}a@_u )  BK3 >/21zV0@9f M%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_1.png000066400000000000000000000006711512524704700225210ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8IDATc;C?Kk3@2WG'$pazn20򠦼ccP120(pD1w7P.͌ )p  kAXAz Q a>f`}܀2{ޅ=vH<8 I7P&%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_2.png000066400000000000000000000010051512524704700225120ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0IDAT(c?(U]!+  &8A~9 GXCv>f*X) !49YLPtS,ş{,r% Pe"H_԰ <~@g!b _1tA}&~ןasC P[ fjQG%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_3.png000066400000000000000000000011271512524704700225200ustar00rootroot00000000000000PNG  IHDR+G'gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)LIDAT8c }ɉ!?tG" x E>"$/7<'*c-#PATb#H\ ĹӸJ^hID"DNIXcvLb`hZCz 2B>yhpp4|G ݏP)3h("> UU0$ R%eɴbJIxa0I0Hg K1#A$>)x* Y -Db![r2 BV:ԃ|QwϿ!|E ],QfhF55/1Ab3ͩ_6%tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_4.png000066400000000000000000000012571512524704700225250ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )IDAT8c W 3MRMz&# -E~g̀&"I3Ge`>nH2ȥ@`|rOJ0JG؝ A K ?.j7ҋ!NK]+D>˄V%Ya2DLѥ{31'g%)+! 5Jo*Dv`iإhCXzBkdiQ! -H _HV#!3 Dr_ ɢ]* 11JucMfeՂ_Qt5X$g>tHܺO(eM5ۿ[9$moczK9%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/36_6.png000066400000000000000000000015471512524704700225310ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !E\IDATH_LQ2Zv6b7bؔ%5SKц$J1 B'-ڲxky-0lFL3~ϽM<~9{@F{%1Bő:(U [@*v5x*|ՠö ^M-VBAeSWſ/%| ]Zq߳2;KYvk), ZLWZ;a= aeId lEfn1^1ߙa?F89ɚ'=s]HjAH0xbLnhwe[ ]`5\W,u:J8~:$m_1uBYR K(N@x^1e1wFN g:.zGx+)=<f4c fRn*J,` &rStyhqgs?;[\R|3<=}`bpݚ"\t\3VLmΤ~ubZXzH7X 1tlYȣp̣ Wy)lHY9ݾ89nawYsY#,oS/ovx`^}Z{۠Mn=cЮ|Y;mA+_=VK7$)kgWi?3Cs+i7`6<%?E ^;bx>!Av1h,&En'Rt|Q=n2ۛ6<\CU3N5wa\ D9&3<oDN#|I#-%,.:Ά%[}j|^]~coF^%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabel$/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_0.png000066400000000000000000000005371512524704700225220ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiTIDATcb`x>gg ڸ ^ҢrEzOf֝t ļ^O3xZX\ {%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_1.png000066400000000000000000000007531512524704700225230ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8IDATc ,bbW_Ͱ\*7˷z2|Ef0ds3x½n^o #[Ȝ8˅?ߙ zb U7CIIp/R$ofXR 0[2tĂ݁*- ۠bX ='BcWgUt9E%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_2.png000066400000000000000000000012221512524704700225140ustar00rootroot00000000000000PNG  IHDR \gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0IDAT(ՒO(aǿ?mֆ$ȟZOjKsT`M+)).JE$YqܦaNf[x~M=y>}^ NHelXC% *E`kPx{ ; TD04¡Ib-OMK qeLDAFC5Dǀhci+ވSH`(D&g+r뷉NP%q3) ``k0ڹ,3FqGVh+ni)sɴA & ]Rֆ-6 P'a00w|HEd2Z}r`"[%N=Uɩ?RU3_ix_ :b}eZ%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_3.png000066400000000000000000000015321512524704700225210ustar00rootroot00000000000000PNG  IHDR$+5=ugAMA a cHRMz&u0`:pQ<bKGD̿tIME !)OIDAT8OHTQ?1,,Т?F]H,bMDAS(B4 Er ?.BBФp"4;}`YHHyr ?(!#$e%tR҂>fRϯjͳi}rn1< ׷D9XSζ2efpu;]s@+Cy+lyJq);|2װw7Se\(#oTYod^঩7;~Ky!0L2.7X3[r :,U)`[,`-sI9,Th %\$}W+*e'%r7xUy%0QK,K$rW1ۘXTC,S5z?|YA2p.r6nϟ)h"ʛ U/_FʊXrQ@)%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_5.png000066400000000000000000000022051512524704700225210ustar00rootroot00000000000000PNG  IHDR4@p gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KzIDATHYHTQǯ;nieQX-H/Y!փ䤥PbPR(XQXR{82wOu`03"قXW;~Apl uzQ] 0+qT8| GN{ma6ͲمX^"׀%K%5#<ĘјWGr8->纾-H-DU=F{&R7=S=iʄF:eaꁹtSI[w(mBy _)Uizڂ^Fft!|1RP4*6 f0D(]iBtG-tOӄ6_.Wr -!--U"Hyׂ5$Fj&PG/RrBPm(feJ>QCok>}VܣGbI9O鹣 A:hI STQ4bס 7qP.TGhF_.J D\æBPz>OnSPM)_$ 'ƕ+]uI >+qx%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_6.png000066400000000000000000000023531512524704700225260ustar00rootroot00000000000000PNG  IHDR=J-.LgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h IDATXYHTQϸ5NiVXXj9P=ddQa -neD!eXZJIڃFQiP˝=wsgΝ z's!"TNx_;MqZL/%ژ\%31B#F-Y\ ~ wS% m}2kܴ4 t A~hzz{ /=$IHNEK )#qcQ2M?\oQ n)fTHninܴ%XCb4kz;5TL!GWV ]z'\, &3:̛N 4)<Ԍ.jыw*rٔ Z:TX]LM60k4){i:j0V_hx&EKs` NY>Zꉦi8J"@l,jVP4!AiÅVV۬ΡhZj#rfb:;Mz9#MQmsk%@G+/=]^XWWv_V(4UA< .#Ʊ+ z(#3,TXj\œ;]LW2A(mVcd i.;| _@/Huh;|gSAox!C)LK=7ozMҜ^®5 H&N2魼 [=:ʌ=W:t3LTmH6'9&K7^wqqwF l1 ]Jb-P rP!͞ju` r!}#ʸ  ;(;,]^**O֨6<!J*fR9X4"uݠ0Vlȩm,Sp;p(`{F 3~#=ptV%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel% IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/37_7.png000066400000000000000000000027061512524704700225310ustar00rootroot00000000000000PNG  IHDRFT[gAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>IDATXkLWLJ],"iWT@LlZRM1hc(FتY,1b4jFD464H}EZ1!yVq;{gC?h49ss/M{A1uRP7ӡ{ ຬ\z%θJjYX8 )תT`٠tR--p 3 V/:W͙KVanɰcڔXϗ>”cxA}StJX@x'Љ窒['  MaOc&ScFc!4Z k'_ 6/Ikh^>ے:ZbZւ| hDF' +ª u 9b,Yj%;cؗX[PynquV[,^|" amR4NeN:5wY9cKQ!O1Q}SUחf0^ɯ!1Dv{۶ڦ?»` |vEGsXB+`t~b;֥$6 qX97梨]587~' Hc D8CxZIvD3t8`BA7i4򻩏T&PN=&r=KY GENs5;M9S̄~w>WpphG* ^+-ne'+&WIDAT(c0a̪]po@ Էfb`z"(>@ EAVa`F``ZP$vh*ETWKќLܼ[i  1aD&PL~#FH^&﵀auu < ^ _ Op308%~20Lk>f`x4i!ľ ' u karΆI\Z”h70@1Ȱ$[,^VOMlt5$*2lm %tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabel&O NIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/38_3.png000066400000000000000000000012721512524704700225230ustar00rootroot00000000000000PNG  IHDR+Jg`gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)IDAT8c0'ɒQ%V ;t)aH`(A ѥ@F HM//P)dyI?s, 4 d0W A[(yXR8 ]z*HT@Z+ 1$,PDNb ԃ,Yӻw93X{j 5:[^G=~k0Z~p&jœic _|g҅ Y@BK>Sx V[ vK1Alxn4XzH ԡĂ| el?KPhʾh>@3?8_ C K2]-,P{Pߜ e%]LI#0K!ޓ&hjy{c&6J&SA%tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel&O NIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/38_4.png000066400000000000000000000014671512524704700225320ustar00rootroot00000000000000PNG  IHDR&5GgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! ),IDATHcO`OZ[fWمXV0SٯlF8ۂCٿ@d 6e$GfVbS,e fcSV Q)+7,ؔŃ]8ؔe1ءؔ=bI1n2ln`9+Y؃3DV:l#kqz),1TSo8SoPE+ǩ.T-[DCٿDH KFVQĿ*7B*@^&u,u,\$$l?x@{5 K TIg!w[ǡ s3YH7* bz/! +~:ݠnAȪؠF/,.AQ bmxa1DHE3D|gFU~ XrCT<,w%U5@7!> D8P}lAd:J̀rߩ~CJ@՟`V!"HʠV0.ANXƷд̶9lĢ6Am⿌9t[.I/ ȕpAKB?!| <:Q]~#E5Q`TV |n%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel&O NIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/38_6.png000066400000000000000000000020351512524704700225240ustar00rootroot00000000000000PNG  IHDR5J>CgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h IDATHkHQ蚚fihV*RYJ;DBD%AQFiЃr?ZJ=(M "(R#1Z~ ik! ݛ̽wO7{gdQo}Y^/e/&JDPmTރ᱘^B&V!>craeqiW`3m yj{c 'bP^ }) @)PS-NrG@%\8sr! -k)1Wgy/ŏeexCBj 4TQ!E߸55XnbA. o*}SmF1v*Ƙ9LuQO)PXz tQ߹kmC06lTEW ±bwd8Tlօ+zX>}L:id7[YPiuJdF3mTQFim@>BZG8fȏ"fO;!;Pa VoX%U2JȈ5zź qGK{"h7?ggx;YHz>{mp2e:pǬ*:۽QSb.\Vz,f 25pTKY Kr)abKZXGEIO`LbV#u+bLf#|:Kѫdys|ÿ@Rq C=7rT{~{/%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel&O NIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/38_7.png000066400000000000000000000022071512524704700225260ustar00rootroot00000000000000PNG  IHDR=T`gAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>|IDATXkHQΙgSLR D%1-+? a*M$bb`D 0 H3MLh?L#l^I{wO}.!7'nkkOtF-rGxVLĄI1(tW m/to8UC #yt y\=ڜ`Hz7{IHt6錶-5<F:4Ar8Fi Iqt+i`jI(G?&v$ 8 o\5G;3PՓηNm')'8ӵp둞UQKγ#ǹUaƸ;Fꇘ 4tPrSUil bm=VqdS?Ê5pgCk5œxxՎ?mc?įJ~icHnha2]g8D_gIk_1Askcؒ{es*>zhDH$R%;ŔNs&[NSZWu;ur͞Tz'4crdqg/wm"m=j< cO!;`5vl\OY->Astx"қ:M~oX[2KHSEm#dދ\m2WQ)ɝÑ ,XtxY~*ATŧ+7$9L=Q;ܿO"5)gc)DdZYY{C5~Ȕ5v% /k\fkD6gŦvnWFA"jP1IC-?g!.ؚ"c*.kd @`r@H5?jsMjEoʽ=z>>/o0Iʇ%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel&O NIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_0.png000066400000000000000000000004541512524704700225220ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M!IDATc63<] ?!EC)hCX%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_1.png000066400000000000000000000004771512524704700225300ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +84IDATc`/"xQ3?T^%I%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_2.png000066400000000000000000000005241512524704700225220ustar00rootroot00000000000000PNG  IHDR n1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IIDATc @|"r'(3a&[A; 6 !jj{Pf C$p+~"6o=)3eS%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_3.png000066400000000000000000000005601512524704700225230ustar00rootroot00000000000000PNG  IHDR +i6gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)eIDAT(c@HCPuCL- Qa` JL +P 3z '! 7`K wšsV۝#`%tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_4.png000066400000000000000000000006041512524704700225230ustar00rootroot00000000000000PNG  IHDR5\]gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )yIDAT(c ?T;_8|G uDIoC\@  Ww!s%a``}J!ȿ߳S>1פ%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_5.png000066400000000000000000000006421512524704700225260ustar00rootroot00000000000000PNG  IHDR@2xwgAMA a cHRMz&u0`:pQ<bKGD̿tIME !KIDAT8cp 輅.S{Y;?pDZaHCVa$$l6cω QQQ! P[%>!Y%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_6.png000066400000000000000000000006741512524704700225340ustar00rootroot00000000000000PNG  IHDRJf+VgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h IDAT8c0 @/h'o=`t# BQXbfX)O`"bWQC( <",fMjW, 1(S 5HLF`j ĸޠ Q*H.9n, afexTpTpTp LJ%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/39_7.png000066400000000000000000000007261512524704700225330ustar00rootroot00000000000000PNG  IHDRT3gAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>IDATHc0-¶.V 1%@8_4&,t4!?$C5[^ ~&O"q(tHC wH.t/`q}`_ ѰC,.]-+X#g?cH؁%01#H9DXC }a@o0갨$QQQQQ JD>)?Q%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel'8 ~ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_0.png000066400000000000000000000005011512524704700225030ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M6IDATc=diyD}@+Dά@?-%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_1.png000066400000000000000000000006061512524704700225120ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8{IDATcպgY@1?apx˨XǐaT1̀0@F  E,/C=a SphaafH0>sBC+!ҼADI-}%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_2.png000066400000000000000000000007011512524704700225070ustar00rootroot00000000000000PNG  IHDR n1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IDATE! p២`1 hD@h2d`1j8-& aX&wwxpr0/WD=4.䂘'km[A%:g-8]:b?;h'a#a*>SDS%8Ѱ-*?W< Y~f-IU_l * %tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_3.png000066400000000000000000000010101512524704700225020ustar00rootroot00000000000000PNG  IHDR +u]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)IDAT(u?HBQA!9ɥAEZE "j%"$u; ~˽, foRXy(hf f `תךGTj6~ڌ=RIkzftE]J>u'S}z/i=j*RRJTGjR7HP6vM{czH0Co H91I=Խt H~KQGu4Ȝ?̸>Ԗ %tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_4.png000066400000000000000000000011101512524704700225040ustar00rootroot00000000000000PNG  IHDR5m48gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )=IDAT(]ұ+aN$%Ăd\20P\e2+&܀le8lDQ)EW]~z|?gx~|kw®s>6h ͣd]:atP`P= 6cQŝK%V;cqS  U M[҂`Zr-|<7-)H(2ϘA/à0e: p& &>XS9V]\LAs pܦc>o}x|Oe %tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_5.png000066400000000000000000000012011512524704700225060ustar00rootroot00000000000000PNG  IHDR@!IgAMA a cHRMz&u0`:pQ<bKGD̿tIME !KvIDAT8mO(DQ;X`7e%eXH)[ fAL1bj,LRR`afBӈ Qʈ7?y=͸[=羧tRJTf6 Sve1V!_Ѵ4ʦ轓iuBtC͑NL+5 5ئeQ6F3X yLkw4Y' .z_d8q ݑ,B%J풹ݦ"kXE6 {'}aod<,G %ۂRmGS+6L:ن-;dx 1l X7۬X+YEw >1CEY =d1Ȟ1A2KΓ?"/%Q$;{|g?l-%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_6.png000066400000000000000000000012511512524704700225140ustar00rootroot00000000000000PNG  IHDRJq@hgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h IDAT8mM(Da4;MMYؘ(vX)?HPR2J3Ib!ʆB6&RL)jĂp3^s{:au lXX3,hO戶qb?.25ΰ] F k#M)gm`)C=k` MGub @#^h-tVV8t5  3gCk? b~@YIq u^@gYߡpu|w'h((-TtWVEE]Y0"XvVVH%[:D{Y fR4S ZjUܩFhz)VZ'/#V:wEo6L%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/40_7.png000066400000000000000000000013661512524704700225240ustar00rootroot00000000000000PNG  IHDRTRD gAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>IDATHuKHQQ$JZ.j!In ](JЃ ѢDAڈ -*CBD$KK|07濘j9w8s=(~kW#"2Y @ 2vY)mXA꯰5(A~X҈I6E&Q$O2؟ y=,Io*Vy!\5va{ $؁~k, eRh &m-s iLJz!o$#qHdDiA$|nAKbQ8璟 {J2 y/y OK!.I5̤`r'fI7ܠT<5ق3#dXE=;M.Q g܁Z,LK7պ;1l_5!c*wڤnA\$x}HF!S&%h,l5³.v,oQXyxڔW%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel(cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/41_0.png000066400000000000000000000005021512524704700225050ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M7IDATcOb߁ b"W$ 2%4|$/zty $-+%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel)ߴS'IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/41_1.png000066400000000000000000000006011512524704700225060ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8vIDATc?01,02C@ =^#$_3ap a8a20A Ɵ`w& 9!5[0&'= +A]sUe+%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel)ߴS'IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/41_2.png000066400000000000000000000006711512524704700225160ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IDATM1 qle%$^IyFe3@VxI=xO=?Oeccme3E"l:3l p۰.54* 2~|>x{ #y ن@ΒV r5}ҞIJ=}g//E+ O3C%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel)ߴS'IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/41_3.png000066400000000000000000000010121512524704700225050ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !RIDAT(]!KQWdͲ"M(h~1Ų4X2 CPA 3 IX4d1wN{r8G!K|~ )ۅO8?, p^Ôxkd/ý4Ipn=\pCn.|eWᆽ5Ⱦ7gx~L9q x+i<KG;xE~V;-q?7~f} ְ6%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel)ߴS'IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/41_4.png000066400000000000000000000011011512524704700225050ustar00rootroot00000000000000PNG  IHDR5;_gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )6IDAT(eӱ+agRNYȕLJII )bl 7X$reHH aE1\6s~~->|yo?TvqJU)_ibNY+ʭd&:eUCʹdhėlK*%n1^(+#ʎdr*Y'i]ںj>٧if€cve`WuQ$g?GuD<_f ;nnc]k^)co@Gvwg%} ߠUv|};Bg4 ;An;{p/tr^*ŝdô3)\w2nogHc?Ŵv{x"^؏ ("D+~nIDATH}OHTQ Ӱ&pc &E89Df1b!RQčF+7b M)HEt ATZ%43:?ݼx7pι3sg?V2@u'JlzfvPhAoxdvjl^I߀=Q`( XA7&dc`{YfZ8X~ ʂ`4XD`#`s`f0WfvQd`òIOo`?da?j0WlVEvgH0;t(%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/42_1.png000066400000000000000000000005401512524704700225110ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8UIDATc @e? r}JSN*Xͷ `5E5y VU03?Gl`bRؿ%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/42_2.png000066400000000000000000000006201512524704700225110ustar00rootroot00000000000000PNG  IHDR G}ugAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IDATc Sj0t?a`~7 ?"S& @fw[ ?ԼsA K `S3&c1x/Q>N>(%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/42_3.png000066400000000000000000000007101512524704700225120ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !RIDAT(c p20,C I_20P.Tl.[ϕ >f02섈300h00tJu P̎;^`HE~3D+k X++C$P"V`ys*Ӻ s be;``8W#o@ 辭@/Dw?'&FFɜD%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/42_4.png000066400000000000000000000010101512524704700225050ustar00rootroot00000000000000PNG  IHDR5~.tgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4UIDAT8c0-E[[6 7$~=@ w/ƟJ|]=\A+@W30J 1f^b`z d2(X$ 300[!V8DX'?NK%ϣzJ#$;;1L@!5,@59THRs ̹Y 3owQo 3:3DHJ<^D-l 8G8B[%%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/42_5.png000066400000000000000000000010641512524704700225170ustar00rootroot00000000000000PNG  IHDR@#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !K)IDAT8c0PCl K%{ (Ɏdq"Ȳ@Y]p$% R\ GᲟ$A ~cLv'?S̑7*iMHr/&s KhHz| /m0"?PeLGv܂߳e oKpᲛ :Rn {`}@t=L q?U IDATHcO`pe@02 %L,2E4\ʾr@ʊ?Дe݀p@!5117qAz@' |/IjrB@b#+Y'K_1_ARrAԚFvH!,0aA8Q}zɖޅh a9Ȫ⊬VpE'pl28)D#ejy=*Mٗ6A,@n_de` Z ڎ,*(^8sks? *<G$9 +(^,)k3/2TȊYP8@eL-;Ede )VDHE27BQeF*U6lT`Sn[%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel*FIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_0.png000066400000000000000000000004551512524704700225160ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M"IDATc @| ΒGgu!ʾXh=%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_1.png000066400000000000000000000004701512524704700225140ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  +8-IDATc Y֦60/ADF>O4eK!(;%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_2.png000066400000000000000000000005001512524704700225070ustar00rootroot00000000000000PNG  IHDR 3WgAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S75IDAT(c0C0 4^Z Pa7:h#CF覻{בP8Y Q5TVZjT)EuU*%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_5.png000066400000000000000000000005231512524704700225170ustar00rootroot00000000000000PNG  IHDR)@gAMA a cHRMz&u0`:pQ<bKGD̿tIME !KHIDATHcO,`U9XT^< Qi*GUAr&3*K1U948QW !G5%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_6.png000066400000000000000000000005331512524704700225210ustar00rootroot00000000000000PNG  IHDR/JuRgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h PIDATHcO`U? ph 0{TQ7F\`RB`QG FՏUO<e%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/43_7.png000066400000000000000000000005421512524704700225220ustar00rootroot00000000000000PNG  IHDR7TngAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>WIDATXcO`7osgͣF7H[HB8`JbeTߨQ}#\a0oTߨQ} Rk%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel+12 IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_0.png000066400000000000000000000004501512524704700225120ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8MIDATc _@d}{ $ߴe4*W%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_1.png000066400000000000000000000004751512524704700225220ustar00rootroot00000000000000PNG  IHDR)1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  +82IDATc ^ A%? aX\I'>0 ywlSt>3%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_2.png000066400000000000000000000005241512524704700225160ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IIDATc # }9]Wc`8 ee`-a`8eepya/`eBdF٩%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_3.png000066400000000000000000000005601512524704700225170ustar00rootroot00000000000000PNG  IHDR +u]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !ReIDAT(c FyCݑ)0008y@^-w y Uyep} /a@< y lO` ip00܃\ht-`h%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_4.png000066400000000000000000000006201512524704700225150ustar00rootroot00000000000000PNG  IHDR5m48gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4UIDAT(cFF }&.o'B lDT U3N EkJJ@ f!9,ȗ% @w$X@ouF>&.@Z ʓ#%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_5.png000066400000000000000000000006401512524704700225200ustar00rootroot00000000000000PNG  IHDR@2xwgAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/IDAT8cFFFЎ}хd+dHB `!{HB`8$"L7BaH0^D: V)$? wd+:,GOnd  mF1@ ‡L BQ?:#K&%tEXtdate:create2016-11-05T07:33:22-07:00Qu%tEXtdate:modify2016-11-05T07:33:22-07:00 QtEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_6.png000066400000000000000000000007031512524704700225210ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h IDAT8c0 N]Ѕ[@79XA'e($TC Q ]V F·7$X=^*4_E_  &(34(pZ~>D^V Qh@Q6$x];F lQ|0:Z%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/44_7.png000066400000000000000000000007401512524704700225230ustar00rootroot00000000000000PNG  IHDRTRD gAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>IDATHc 0ʌʌʌʌPG'.Gab``EƐ `wt=02jP {t{´F)JX,Tf3a!O2ke~B$jYn ?2 2s0ba-DBLDf*F}KHYҋ۞` 2Y21d%>bʴsdjc8ӂ-6 ?^28NJ%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel,ާIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_0.png000066400000000000000000000004411512524704700225130ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8MIDATc |@(yf?a%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_1.png000066400000000000000000000004521512524704700225160ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JIDATc Tgan3nXc7%tEXtdate:create2016-11-05T07:32:43-07:00%tEXtdate:modify2016-11-05T07:32:43-07:00A\tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_2.png000066400000000000000000000004541512524704700225210ustar00rootroot00000000000000PNG  IHDR pvgAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7!IDATc&'8 03%Lf86%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_3.png000066400000000000000000000004561512524704700225240ustar00rootroot00000000000000PNG  IHDR+mBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !R#IDAT(cFx ;-6S9%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_4.png000066400000000000000000000004611512524704700225210ustar00rootroot00000000000000PNG  IHDR5i gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4U&IDAT(cF)?H 3 pʄGH2sQ.%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_5.png000066400000000000000000000004631512524704700225240ustar00rootroot00000000000000PNG  IHDR@?0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/(IDAT8c0 Tp0*p Ng@ƴ2*84Q&%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_6.png000066400000000000000000000004671512524704700225310ustar00rootroot00000000000000PNG  IHDRJZgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! 'h ,IDAT8c0JJJEBZA$Ё 8*1*AO tg4%tEXtdate:create2016-11-05T07:33:32-07:00lus%tEXtdate:modify2016-11-05T07:33:32-07:001tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/45_7.png000066400000000000000000000004741512524704700225300ustar00rootroot00000000000000PNG  IHDRTmIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !*$>1IDATHc0JJJJd86"Ỳ .G%G%G%i, r}֏ %tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel-ٗ>IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_0.png000066400000000000000000000004411512524704700225140ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8MIDATc 8ozk * %tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_1.png000066400000000000000000000004501512524704700225150ustar00rootroot00000000000000PNG  IHDRqgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JIDATc tQ $w$~%9ٞ%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_2.png000066400000000000000000000004641512524704700225230ustar00rootroot00000000000000PNG  IHDR  f gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7)IDATc C19q!8db /Q.8#3B%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_3.png000066400000000000000000000005021512524704700225150ustar00rootroot00000000000000PNG  IHDR +dFgAMA a cHRMz&u0`:pQ<bKGD̿tIME !R7IDATc L,̯Q\ fz1`ff~3N5v%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_4.png000066400000000000000000000005201512524704700225160ustar00rootroot00000000000000PNG  IHDR 5PkU^gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4UEIDAT(cF90YeP*0s3A5i(syoB8%kXCKt-%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_5.png000066400000000000000000000005341512524704700225240ustar00rootroot00000000000000PNG  IHDR@qgAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/QIDAT(cF\*qoͨ] `e`` cGn%0̈́r >(wNs5E,L,׫%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_6.png000066400000000000000000000005521512524704700225250ustar00rootroot00000000000000PNG  IHDRJ,gAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PX_IDAT8cFFFf[L dW` * ` 3Lc ,j^A +x" rc Y;D%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/46_7.png000066400000000000000000000005711512524704700225270ustar00rootroot00000000000000PNG  IHDRTEfSgAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#nIDAT8c FFFFPE$b:q~0y~p] p&֏FG݀} "r"d͏2eE -:!&%tEXtdate:create2016-11-05T07:33:42-07:00|j%tEXtdate:modify2016-11-05T07:33:42-07:00tEXtlabel.AƄIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_0.png000066400000000000000000000005061512524704700225170ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M;IDATc`6Lm IA_O`!PhE>*N0%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_1.png000066400000000000000000000005641512524704700225240ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JiIDATc t%$?5?v$S8 xl(`8}A}J$oo7C3;_!xPp*hY K1}K*@F8s%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_2.png000066400000000000000000000007001512524704700225150ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  5S7IDAT(c= ЄD gbDXUC0 19 l4oBYJ\_tU(?00E2fF c ф" =ce(D z;B$|z)N4! B&P'TW yhxMhfhB15:"̅%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_3.png000066400000000000000000000010061512524704700225160ustar00rootroot00000000000000PNG  IHDR+Y;'gAMA a cHRMz&u0`:pQ<bKGD̿tIME !RIDAT(ѯKQgA MP0AX  ,,,`0    .<*O.\c5e̳Gy F^q]y`չRk"x+0)~M IXb/S 2L`߼M0l855|o:į}=~JS+s BC\ 0OC),XxH6(:%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_4.png000066400000000000000000000010611512524704700225200ustar00rootroot00000000000000PNG  IHDR5wԶgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4U&IDAT8?(qqπ.NdC6eQR&JF%#J6l'YtLYPG??+g} `q10.x$p skSXApNl{^ ,†?߻{wa|DU,0/_K²z maǀx e <y~qX7GˣIqxW=/8\1%Xӽq5z?y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_5.png000066400000000000000000000011651512524704700225260ustar00rootroot00000000000000PNG  IHDR!@'ALgAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/jIDATH=HB%XFA B8D 6b5D-5b 5`S n. ҒM.CAMF5B}w;;\CZ86׈"w8@<(XDA$ɶ=oD,BDŞ!JDտ!Dd| "&!D"V!VDN+"␈<'!mE 퉈Ă qFD`Tta RDxL"  e"6Q]Լ"M"& K*kW S0D#:q-녈u%E₈Q " ]=QT̊.L )lNQV1*S,%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_6.png000066400000000000000000000012401512524704700225210ustar00rootroot00000000000000PNG  IHDR&J`ySgAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PXIDATHO(a1BmX.2)r`rB\)r\&'eRJ +0Wz>;]Su_b/6aP ryrb"M>$;&,UǾ FX(Al\,fUrCv3 OXZ- ={W|IѳW;a+P!l20mRaDދ„Cza'Pa=`cgzļg`!¶\BXaJ 6(z\_XVM^ʩ`m`gי) +zVvJ*TV A!TAX'ؔ]6~O$b]g%`GIZ4b(%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/47_7.png000066400000000000000000000013241512524704700225250ustar00rootroot00000000000000PNG  IHDR,TܞqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#IDATH=,QᯩhhHi" E&IME$tvN ݤgQDj8A_w ~p/e~YW9n|ĭ1JnU/p mK BK8 !045[W)0 #8G6!BpŭU1}: A0G o#̭Fp+  0 ! `-fȀ p!=>3r)gHR"C:)~9)3 S2Cjΐ;R]3h2 4_^fHƕNmdϐe(PE!C"~m5dHě4<E' x2Esj[e;u2Td;ːu??dhj%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel/6IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/48_0.png000066400000000000000000000005211512524704700225150ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  EFIDATc WJp^q0 Ě`ͮFX%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/48_3.png000066400000000000000000000012001512524704700225130ustar00rootroot00000000000000PNG  IHDR+CgAMA a cHRMz&u0`:pQ<bKGD̿tIME  79ܦuIDAT8c0YigYz ]G.8_FbÀ<2[ 9A,0? t0& 1I9xpwYx!RoXlCz!RA2# "Ub/G,+ rI]AH/R gT:,%d"jHj)XAjIKYRaBUZ X!2ARR ST , bFHR _ {ARp+ ;D7pw>_ 6/P=JI|H8dc:>rMf2å's^81!jtrH? 0ہX&zo#( "$)" xK(/SެQѸӓjo'dǻuc\QNs7?4]e=p~=Tςb3[;).Fp~%tEXtdate:create2016-11-05T07:33:05-07:00DL%tEXtdate:modify2016-11-05T07:33:05-07:00tEXtlabel0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/48_5.png000066400000000000000000000015061512524704700225260ustar00rootroot00000000000000PNG  IHDR&@SZ5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! Uy;IDATHKHTaߜ\d"хnEja تVV]LT 7. HRжA#Ը(ʠ6m"\50APq/}=sp^3g1_eS/ɏcdS+͟pwo;n,eU~VeBjb&j\6ylK.9VǸi`b/HX ev YU{EF<@;n1Fp]K[Epeϑ&KqP\A vȮݬ7Qd'Vj1D[}J6kyl=[r =Ҭ;Oc>}[C7.m%٠hM6 EY;(' Yn"D{XFʑWqEfsl#s2:TN[tl"8:I9HzlIZ {LIjr3oN&l11|dzY?u߶la[40l*J\ZE1I \gR a"={,謉t8fbSOF[t-@fb%tEXtdate:create2016-11-05T07:33:15-07:00L%tEXtdate:modify2016-11-05T07:33:15-07:00k<tEXtlabel0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/48_6.png000066400000000000000000000016521512524704700225310ustar00rootroot00000000000000PNG  IHDR,JBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !x(IDATH]HQeÜEjd&MyԐ( [Q]"%IDdEu(H!]h-#f.Qt*t>緳<9G8vi:/-mG70_sV7ܛ̪nq 3{LxtsF׋ނ3w[KEB1x8Qw4Rw"-Sp-ʮI+z^q'$| +Nw+sӍUN.4S2QnSOoȰ _x NNa.M1J:x \Qlœ#"/`>Dmb 'p[Mmk7#LQ>TlW`bm00׊r7QπT15/ߘgtC=92?ûtUyVWMq>#H$3)E  eeh&:5 {w"-g=xFп0fuMcߚ!o|1^]f-Lƴ {'Zpn7gMEUٙra[{y7bZ gS-d~Gt}o-[i@Gu*i c@5aߐuBj%>Šbܵ /J)ϓQnh:YwL/fhz0n+[Ss1jvL!Vz &N6 ALj2_06P* s5%`xYs9vP9kbz)؂LzX^!tيf ɃYzB& fl%2`pD103™Te vZfs ([*[1 Qִ1@nY赱: zQ{U_*B 3^!keNr\‘,lRFm@z`{1o0ώ_wCz5\CߣW\tҭ6vrd(UZL)r2&$]&҂sCY@ Y^&yL̾26*g~L3I!{[[ZvKeaj&X`O~FrȖdc(p:%Gt~FIuDNGDF199nj躴`C4fOZZ%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/49_0.png000066400000000000000000000004511512524704700225200ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  EIDATc .gvDG"-Fa%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel1qIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/49_1.png000066400000000000000000000004621512524704700225230ustar00rootroot00000000000000PNG  IHDR 7zgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9x'IDATc mX`[wCY }5?*%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel1qIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/49_2.png000066400000000000000000000004711512524704700225240ustar00rootroot00000000000000PNG  IHDR pvgAMA a cHRMz&u0`:pQ<bKGD̿tIME  .Rtf.IDATc8 P}  }C H3 4 =xjjw%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_1.png000066400000000000000000000006361512524704700225160ustar00rootroot00000000000000PNG  IHDRզgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9xIDATcf =L+ܿ lc a`720q-^ >pbe`f1pDp0bMpO72_Axa. 7q1@v$W!R Ȃf%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_2.png000066400000000000000000000007351512524704700225170ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c pSdwZ.Ѕ ?BI3_HKG9`@7s3[d`4y1+#tI102M 7bӁP{7-Λ(ەqjX kB >Pjmfb`"vhp~&%쿙 5ؿ`$ `#%~@ ͕%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_3.png000066400000000000000000000010461512524704700225140ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7IDAT8c 0,NYww3`@n7G'$.݈pހTBL{hDM T _Adւ80idx!23܁+Lf9H[d`ɜ"iI/@޹E&%KI(}%`:AZ1c;H/L>HBf v%ǐ'Ҳ?'D 2 2 )F@j2@>D8v 8d2})q%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_4.png000066400000000000000000000011471512524704700225170ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !lg\IDAT8+DawԨIM)_,(Vb v@AVJQjf|g)ӸG1eeٝs \gx7Tz2"\ޗ(Ogڞ/vʌ;,W#;w搜?07ʋgb ^~Waѣ|9,'0IWkq+8~78:<>ΓƒZ૤ip=U'{Ļ*nF9}K[&t<{ ,"bo\GuT3ԲMd$X1[ 4&U*v xSnrb؝TRSRAJ5^k (j /ӎ0ńfmXEřFMc9('a+*zP8Y^Pm*Bʇ JЊYTȮ%DaNѪ7QlDH F%]`I` ]:"*{cu_(UcCh 5ChEHJ2cTB#X "܇rWQ{ZuZ$<.^CPC„ZfɿWSV 7%tEXtdate:create2016-11-05T07:33:15-07:00L%tEXtdate:modify2016-11-05T07:33:15-07:00k<tEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_6.png000066400000000000000000000013501512524704700225150ustar00rootroot00000000000000PNG  IHDR*J\gAMA a cHRMz&u0`:pQ<bKGD̿tIME !x(IDATHK(Dao$ REjB)Yx/,,(+Ŏ6E %P&i )kwBN{{*{ۇuO5'1`4l`ؖ"j)#,RrݡEIdz5U_:SǴ-]~JE:ڨGFrT鯵ضjԎݐaǥd^jWEAJm\=&4JH BDxHzGE.SrLt W rY ġ /.teeA_qҎ:2+(.SG|GY(ALQbt,|1Cqb2iҲb*eP;e҉E&ʒ g@L]\&:ALrbQ}Ix23,ݲgI$}c&ie?~y\%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel2UњIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/50_7.png000066400000000000000000000015101512524704700225140ustar00rootroot00000000000000PNG  IHDR1T΀)gAMA a cHRMz&u0`:pQ<bKGD̿tIME !#9=IDATXMHTQ;ƌ)PM FfSDF- j*m" \آf!!Ѯm 6*Bi >srϽ73Y _}l#v'WOV|Wjz0je=HB:1 W Z߮|ᙃ3bJJG//Rd-^R"}n9D(ob)j5v!; 0}@Uk?[ڒF#JLy8b;"}_rQMrݵ젬5v/__Y|sYjTf-"Rr9'iTJqV}x=i)o}R4&C=Jߘη&H| U |~MG6!Z${Y'D; VS:v"Fԧ2!bA] |∼BDz뇄FxghBާ~^ HH633Ie#DI/;X&=6%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_1.png000066400000000000000000000006541512524704700225170ustar00rootroot00000000000000PNG  IHDRզgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NIDATc_"r}q b0"k~H/eU *j pb_pR&ɞnX1 u5{0_a Hbe8g`-7BF>ֵPV3+&\l2¿fR%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_2.png000066400000000000000000000007761512524704700225250ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c p]P`=b]@\> d'm9 C Xz#Pr=D lQ~t00FYyB q(GoVq” 5 ~g`Fׂ ݗ.6A,毭  f`010h] ]@V1X8ӴVH?qw׏ ,~ooMh10B1z{M%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_3.png000066400000000000000000000011401512524704700225100ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7UIDAT8c 07ty!Ձ XBȼ `H2ϴ2?g3':?kSa2A@؀o s-0[0ZE><&VLm7ϳ@ G '2`{L8dp׫@\}(5O@U;Ԁe~LX!#BL2#V'v *dZ[$X98)V|n 0FI )p;$)D4KA%үߋ+&7,3e=K++%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_4.png000066400000000000000000000012651512524704700225210ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !lgIDAT8c0-+LY4zGl[`+`dx U2*,VF 1d)k`O`1Bg9XsR#X~\^EȟDvPu  3ρ7?%S $m+~$/&ƐE^㓷{^,YҬ?Xy *`_g@ ,~< E8O8H- ;8w!İN!oo/s! _s@"lOK `0g2W`k ?<07C rFe>#r&:7-1.9m_ÂoT. %tEXtdate:create2016-11-05T07:33:05-07:00DL%tEXtdate:modify2016-11-05T07:33:05-07:00tEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_5.png000066400000000000000000000014231512524704700225160ustar00rootroot00000000000000PNG  IHDR%@6gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! UyIDATHcO `K3佺Ru=.`S0P@|ESuI BUU .ç̍PEUTTq½Uf}p*;"TpWuP-*o$oBRICſB"(m 1@l;UWG*}U O>P(y][~~x\3]któ?AUܩTª$g%B'@˵fepeyxU_ ɻ V_ ࿅_3:/VI@3XL՛`M X'LTYEYa!TA KQUA ?p P8xJVA#X+3Hi,FzDQw+A*@~3`oUM(č9AcE6:(O_)p3$̾QU s/%tEXtdate:create2016-11-05T07:33:15-07:00L%tEXtdate:modify2016-11-05T07:33:15-07:00k<tEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_6.png000066400000000000000000000015501512524704700225200ustar00rootroot00000000000000PNG  IHDR*J\gAMA a cHRMz&u0`:pQ<bKGD̿tIME !x(]IDATHOHQjn9AY)AJ{7 E.&v t%"]-L#+#{fI/~o}͟۝!:d PB<۪[|wo HL/YlGTDn"L&img%U<Fel2;zEY1z2P{X,z\fe VҋU?q7)1\+5<ߠٲ\6H,'CrڌtVJ3;4YRڇFVMz2߅]f=-}Ov%H+t[noK{zf|Odߜi.Uu)Q\AsA;pٔ"' ]f"  NUXvS°@/b'J%0F~ag't8ߙ9l.RVgW6EM: jП?[׸F9y5:)lgfg̋Ow?37,(,>o ;wG*H0kA7:ޘ1%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/51_7.png000066400000000000000000000016761512524704700225320ustar00rootroot00000000000000PNG  IHDR0T gAMA a cHRMz&u0`:pQ<bKGD̿tIME !#9IDATXOHTAI]ϮJ%BEԡBJE,ز[YI: %-E?^D)Xb4̾fޛEfпnUcm.GT.Yq*eͭxR\dTf3LP8 ,Jմ7]RXJS WJ@"xs:ctp'TEPȀp 1`>ZiZiCvA!+`3G3#"*w$x ?$d |wȶR` +r-P.ok=䧬$-AA;-PvK 2S9 {%{_hx~5҅  jU)&rf)Z% K^) ~It$AL1# ZTϠF ʏˆG*<@!)A o䢷 VI7 sh?Z"xpdkcuC/;ϞGI[;m(6lZn%{ 9#'D.cs70b{u|.E?d]Ivs^g5vW6l&۝ EMq_Olpg9 ԜV CA-_%t%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel3"֪]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_0.png000066400000000000000000000005011512524704700225060ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  E6IDATc m?q%aDNV3{W1)(>RBIGg%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_1.png000066400000000000000000000005601512524704700225140ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NeIDATc `j6F 1d?O~? &6O$~:Cw=w$aN b j@v8؎^wTcJ|e rݳ%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_2.png000066400000000000000000000006341512524704700225170ustar00rootroot00000000000000PNG  IHDR lTgAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c 0!dEt  XD?1`e" n,? 0DE20A} ~6o IEG ]ӁhP@?Xj0a`>B 0Q× %tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_3.png000066400000000000000000000007071512524704700225210ustar00rootroot00000000000000PNG  IHDR+N]gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7IDAT8c0$ǚ;NzܒGqK~P`-ɀ[r!n;%1b-y ;x`p쒳`|?6_F@%KbfKfd`kIМ%"KN H.y( b@/>ǀ y %1DMCXChJQIBt㓉%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_4.png000066400000000000000000000007661512524704700225270ustar00rootroot00000000000000PNG  IHDR"5f=gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8cO0,%sWrF 3RW x3R2K|f $ IV|J$(K)dߌSI,H%KAVr$?n%-A‘( PJɂQ 3G,:!%? ))` i4d–vVF ~%:8_%tEXtdate:create2016-11-05T07:33:06-07:00V%tEXtdate:modify2016-11-05T07:33:06-07:00?tEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_5.png000066400000000000000000000010431512524704700225150ustar00rootroot00000000000000PNG  IHDR'@91 gAMA a cHRMz&u0`:pQ<bKGD̿tIME !(XIDATHO+DqsJFJ)=ͼRvނyԘRLyhl~.i]>w| YHn+wH>ɭArXrO<$ U:%W!f(jCsq-޻tTu]nuJ`L]{q|<6c[ }~|wͲh7Ő&Ucoᵽx[x~r R짹_}_=}>"%tEXtdate:create2016-11-05T07:33:16-07:00+V%tEXtdate:modify2016-11-05T07:33:16-07:00Z[tEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_6.png000066400000000000000000000011151512524704700225160ustar00rootroot00000000000000PNG  IHDR.J9gAMA a cHRMz&u0`:pQ<bKGD̿tIME !x(BIDATHODq*iVԡi<:F׈حD%sDX}$CK!yy>Sܞ:<(5Cv_qp~, `wJ- 1a060|M~& 1 z~Yhƶ bjk 'AC9~^ud\mfHZWvB>nbS!d YxzN~gP^IZ^t!auM"u"6]Euҁ"4Xۼwa{䧀n!W仐n._;＀<]F]f(}>%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/52_7.png000066400000000000000000000011711512524704700225210ustar00rootroot00000000000000PNG  IHDR5T% SgAMA a cHRMz&u0`:pQ<bKGD̿tIME !#9nIDATX?(q.Rg$D)()f-łB 2$R7Jѥ$šSσP|9_~m\_UUrV/N֎W.X)%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel4?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_0.png000066400000000000000000000005121512524704700225110ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  E?IDATc 01"&>_Rւ@d܋H/ğ*хB}Gŧ=N%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_1.png000066400000000000000000000006371512524704700225220ustar00rootroot00000000000000PNG  IHDRզgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NIDATc_Pn'7B: cTBֻ! [`$xʵb6db`u k0 !q 0B~gaXq30\qk4:lF.n&{ht~yͿ8C!%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_2.png000066400000000000000000000007571512524704700225260ustar00rootroot00000000000000PNG  IHDR 7 jgAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c0dpA,L 31`3pB|xu[Hx qFR(LP{ՓEe>CxhWmCz z]pPt@` 3An30As债wӺ *J %!bYM-&5t[>}F%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_3.png000066400000000000000000000011011512524704700225070ustar00rootroot00000000000000PNG  IHDR+CgAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-76IDAT8c0b@R8>1 sA6T#Pjvgb 񗀚°KJM. :]J }ߢJjу(z`G*HpARV kV/ɮuf[6\/W2ɝ;@RXr -v)m.v)\5U3aR$!@RR9d2e>An ׸|.0T@C ^&' ٪_`U' !RY&?N4EM@l)>'`JP9 '%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_4.png000066400000000000000000000012221512524704700225140ustar00rootroot00000000000000PNG  IHDR 5 gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8cO0PMA<:@GQpYR @Z\xs1HA (Pʋí%Ȁh" ' K$e?FaU?שa(BʉYrXYr\ LʾM'1n`QeF$L戕Y%tEXtdate:create2016-11-05T07:33:16-07:00+V%tEXtdate:modify2016-11-05T07:33:16-07:00Z[tEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_6.png000066400000000000000000000014641512524704700225260ustar00rootroot00000000000000PNG  IHDR,JBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !x()IDATHcO`tg,2X/ Eq)UIPūHQGT8r F{&bwbp =D).(@bKZ( V GD)vUO_`9-+uab`qYWY2" 3XwaOBqϱ(ŕ$Oa('3}rDW| !+΋2)qͯf2#T@U=dt&FTR@Ђ!ob `Q!}Z B%Hst=/~"_s•~y@Au2`KAHw  v(EH/ge xGQ0NG)Zߗ1RUhAOHǓDPTz0b`P_h Qţb16%tEXtdate:create2016-11-05T07:33:25-07:00aKc%tEXtdate:modify2016-11-05T07:33:25-07:00<tEXtlabel5˵hIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/53_7.png000066400000000000000000000015731512524704700225300ustar00rootroot00000000000000PNG  IHDR2T;*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !#9pIDATXOHQ7ke+2D2ISËmrY)D:AeI`.f7 :BQ*&RQv)J k}3Q<|73;9r%yV1YMvLƐc1 t" 2H/'ݔI$cq#t_7%3pr NvR_QNQ͕y>p״lH!:J4"_vAMtJz۶W 5ƒ_|ՕhYntŐAr9]vɖ7fDfBr&RI6~g+7d!MUәӫCr̔`&( JجB |g1`vh`r `r58 & rL| &鄒 PŸLdbł W&o=ɺ̋%2\ϕOYImS>0D&>Ĉ' Mx n_wq1#_D+ z!8whz 1!Y>X/#qP2sSm@0V<տ`گo2$xVd+eapg*bug(.dePb0]300J330ԁo^x] Hdqk<rAFC0ׄ]aU zvq20܀Z(|$Gr3v&:%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel6R^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/54_2.png000066400000000000000000000010371512524704700225170ustar00rootroot00000000000000PNG  IHDR 7 jgAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c 0Vף肯% 30۔ Ae`SV  P *W& P@`ANPc@`6Ѐd` tw$P5{qA TWܛo X#r >%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel6R^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/54_3.png000066400000000000000000000011711512524704700225170ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7nIDAT8cF0 2x*IS,2A@ XL `₻<@ 2,R(r?y6LLZ@ XdJocgb``;S&P(#UKd^$5!DdA$!fKOiHU'Df"3Wq2/\t5$}&B&Ŀ yb-F4@2?A djA2/Ya W/ S L,^@d "1 72ȳ]eXOc!l\`q"~u!o9J:+|޾ڽ,!iO?%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel6R^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/54_4.png000066400000000000000000000013231512524704700225170ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c E3h,G~  feD_8AҒ/pHy?.UMo# i8*e^!f9F|o$@qGf;;I|暄03Q/13/N B~.o 8 L,A_!ă`?< JNp*/ @/R*dxx#&3D~7Y8.e!gQ]$a* !N yYUτRUV~Q˦D"Әn@pdkU4&%ї6(Y죠&#sY>P<&ԂhCwj gDT&ZqLgAMej$&b;4jE'/6R]k}b"[KX[!Qj#xceu`N"JBAe ׺@C^K55{mЅM:)Z8_8x1%ް6F y()vtZ.f)nߑ4=㢓"HZ.Nc5e%\l+[صH,Lz^agLV A1N &"Jz6zܜ wc⒋TBkEzt}|KcH&|uFIɮD;m*>uL,zCrхLӊb^-q' nDZL PpeQrHR xeΟ? /{XB yQ@)~2p-[?uyn~Yao/]Z-ֿ-*]U_~Y0H2>%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel6R^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/55_0.png000066400000000000000000000005071512524704700225170ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  EIgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NvIDATc &3@7G 8XyP5 ) ,w< pC8(eϓ!?w,λpbpmfCfI;[2yZ?jK\%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel7%nDIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/55_2.png000066400000000000000000000007011512524704700225150ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c 3:L1w BiUU ;C1 Q^q10䢹1;^t430>@$6ۨb_Eb¥:)0x U<CZ8YTj h ?Wx;p=._4G$m%0ae%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel7%nDIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/55_3.png000066400000000000000000000007671512524704700225320ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7IDAT8ҽ QW>#QeR"BDJ) R&d5F EPpEy_Y=sϽ%C%iW;/= T>0d(Ay2J-`?KvgLREb@R)70v@"yȒ# % nf1 H]*s 'C42ru1uf6BtB`Gk1h]CF!%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel7%nDIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/55_4.png000066400000000000000000000010441512524704700225200ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8?(q'!꒺&(u JruÕ2d0HE& ,LnQ ,$<g}ȂƯݴmrx8E8mk 9X>ǠRL/VP}j`B(P>;j`L(P>j`D(P>!±8  4E0@P }V^@[ =Co( [)p.@rI8V0s <'4 FP-&Dऀ7A(0-0 W V6ї 6* vj28RȼP}ـa@ t.2e=קq@u?w#"E||W?3ؗ(%tEXtdate:create2016-11-05T07:33:35-07:00XK%tEXtdate:modify2016-11-05T07:33:35-07:00)AtEXtlabel7%nDIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_0.png000066400000000000000000000005211512524704700225140ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  EFIDATc g+^1$Oo1I=2~λUd\P |O A 9 DW%tEXtdate:create2016-11-05T07:32:27-07:00<1t%tEXtdate:modify2016-11-05T07:32:27-07:00atEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_1.png000066400000000000000000000007011512524704700225150ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NIDATc Y*s»% h8̋`!N  "W 2!ϐWʠ_ ޯTF о6[!;gwsgpBx-%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_2.png000066400000000000000000000010511512524704700225150ustar00rootroot00000000000000PNG  IHDR 7 jgAMA a cHRMz&u0`:pQ<bKGD̿tIME  .RtfIDAT(c0,I 4r5cAg20,1o10L 20Bte`p 00\``F>c`P F30@̯d` ga`000wn}%Gׄ!bܧ>#S7&8 K:! cD$T / _&J`A4 ,P3410XB^00Tb~u2C d/^^dx@ɓl|oDHqo(u}ˍ$NG%tEXtdate:create2016-11-05T07:32:46-07:00$3G%tEXtdate:modify2016-11-05T07:32:46-07:00ytEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_3.png000066400000000000000000000012371512524704700225240ustar00rootroot00000000000000PNG  IHDR+CgAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7IDAT8c04B[5~?T`@h$I$Sٜd1Kywl]T'䝅I93vy`R@eR &2b?\j Wrfä>*SO}\遙+}`H .!<  RaR|_>j$?s O+_;@0HX R 1ym@~DJ\rE@~ D*LFH5 "J3W`2? @R!RXlc XhԂxYY 0?a(A<xdvdU2P Hu^[Կ % l%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_4.png000066400000000000000000000014061512524704700225230ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c0/޵=.c ”_&̀P1]Ё$~K] _vn n(x1 uȁ yN#DM 0`/IPL~[ 1}j9շ<9ީ YE2z Pߡ3$Yc*v#0엽 Ie>pC  ?dIΰ*pD8s~9TI@`C9@%(K(/bk(b=``oσدbOASv47 0O `4X:B%Up@`OD&mi)=#ߙ!+^-}lB3EH~%<iӦ#'vN^#Jo ~m#XH.sׁ= j=Ĩ=(![PUB ]`*z tCr FU,Pe؄ $v]\PTKNCB eHaqC_퐰7FK\e[@~%Mlaǖ&WҘ7cYhaP#T~6\GS96ZMi}qna0 ϲ I,Ro@D"TAf"/D  (E"z* r6ATTp8P>M[UU@!] ^`; @TsZQw]U{Ax߀ ' UU7D?? =MP-uc>!N]N&DxOxjn!pSTxKs()56Ez7PR4+F4Ubycymb;WAY8jثjcu_%tEXtdate:create2016-11-05T07:33:16-07:00+V%tEXtdate:modify2016-11-05T07:33:16-07:00Z[tEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_6.png000066400000000000000000000017441512524704700225320ustar00rootroot00000000000000PNG  IHDR,JBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATH[HQi3PR=d[×0FAôT z,YT&QfRH.hQ`=ڄ,mEl; ~眝_ꥇ>wvnQZPBqoBUpk of!r cbWwY6%)mͽ}-4p ml|J:daWDgCZ#`/E.6& /OCVh d Fo졂85*^(.&@Nh4$}dgC9y.9S~84h`!).w=e˃!Kq{on-Ҩ粈Z[  )- >lQ_k`=~s UU?k9UᒱacqqQ֑<2 n%fu9B:/ůC9:5H{lr)1.'ΰ$'Hk#Ï|xbE4h0'ky*=Ws";2Ⰸt/7BZ:-L>WhQU/2"= sҐ5'G&$o)]r%tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/56_7.png000066400000000000000000000021001512524704700225160ustar00rootroot00000000000000PNG  IHDR2T;*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 95IDATXKHTQ+MJ̨, JkJ# "4 $ LA AeA̚.DZffdzq<)$Wq`̟'1T$~H*֩AvsdUⰟ#ٸS q8ΑKY&-d/JL('#!E%Ix$ +pڡKyE;i}hhO)k|:힒\AVadUzjHtChMKwB,pkIo]}c. = b\]Laǽ?Pb"9E5^խ-Oi3݁'&%1Vqm&ȿL; x3ܴa#9idb2RGz5JRMb6a%sHnE`B ;3( whjBRB^xA?z);!EdO{7~QH)>*(Bz̐M&RNycmDwvpE{+Dׁ?"̭ƽJ6e( ! 31kd]>@ 0g)U 9a!74;5Hb6UO8mxb, Ψy6߰@Y2CfUV9Iv%tEXtdate:create2016-11-05T07:33:36-07:00i#Q`%tEXtdate:modify2016-11-05T07:33:36-07:00~tEXtlabel8sIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_0.png000066400000000000000000000005241512524704700225200ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %IIDATc ש}/ _s _ qH R_>X?";XBv%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_1.png000066400000000000000000000006631512524704700225250ustar00rootroot00000000000000PNG  IHDRզgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NIDATc*u=! *`7 rG@ A˞ es=Z@Y \O; {A_2~[ *8nˠ  ù|# _,,>z-ЄMh' F5 d%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_2.png000066400000000000000000000010241512524704700225160ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  /UD IDAT(c pIPʽ{Lbe\'& , Ȯ+ cXlU EA , bbԾ'@ 1ito&АkAX,Y^Ė ` `{ӯzN'H}w\l5<\>OOa`B , \Q^00t?b "Pc?3zx.GUl+qDTX U%tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_3.png000066400000000000000000000011651512524704700225250ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  8-7jIDAT8c 0V]],p$DY#]2} ( .י}7TTW̝ ds?$`y3^d~`_ od|`; i ;i! Ӂ`|21@m@&US "!?A* _absfphqk(sB QZ "~( LG_+TX =,J])s(SP7=.]K o&cIՁ_D%XOǔ ?P""$?e"i%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_4.png000066400000000000000000000013251512524704700225240ustar00rootroot00000000000000PNG  IHDR5ogAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8O(qwF?!JMiI\p@Djqa%QS+rA$r hmwa~}"{ݘ5mW|(K1rSwje9XߊB+.4kv{:Txe1HڢϨ~Nul5W 'y3u4Z=qt3nE7)ZEJ&tHTL}}_$Q_@_Qx ,ƒhOq3> Pf Avecr%:Kkf=/_{t({',~&Ϋϛ,ycs:lppoe Owr¹O<'} _ćO/5+!׀>^r+_ G{ O8 k/z%tEXtdate:create2016-11-05T07:33:06-07:00V%tEXtdate:modify2016-11-05T07:33:06-07:00?tEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_5.png000066400000000000000000000014671512524704700225340ustar00rootroot00000000000000PNG  IHDR%@6gAMA a cHRMz&u0`:pQ<bKGD̿tIME !(X,IDATHOHTA犭ƮQՈ@40Xo!KH^B$4:$(&b!dR(xE""HKC(Y Z}okk4̛18[NAۓUX77G#>қ6ԛRf_/CN8lڸ}LDVl#롕6piG>zFy!=C{V$ yo8{E9W1,%5Gg0w4Au%7PF1i ۄRⶫ@)*MJF~)*E3[tN&EzN|V0}B[Ѯ1zt 6)(d5(3mL (uyI2SzpVJJ{zJL\kՇi;Jmܿ>UGT%{* &78!GO_,7 ¥*.U67IF wG7*?]$$ We?~"tzdt)Daбw3r7)%N4蟫Z1JMӦ@jً48 %tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabel9CCIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/57_7.png000066400000000000000000000017641512524704700225360ustar00rootroot00000000000000PNG  IHDR0T gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATX_HSQBf +!(hEEdC""" )* $IPIK!XETbH`6W)gʶK~s4~߇se0b(]͇)6{FW錯e PCf Xʄ5p2#m̼x(3K򴷞]ˋj="GcHM^׺68%d+ꗎn㙔@5u7A_(ZPB]Z+b+9(؜0-&[Ђi]Q|ԦG`*xJWr0RABPld aOB7ꪲb͘h>)C"|ZLlc!P-kqmfʹ8NO y/=y@XhGE ϥ6ƕ;'r؂Dw?J1dGѧ9ŮJc Yr,p \#S fD`bn*ni/ݙfIrn> ojůj)oT( j)@GpHwSfKZG |H'%:5.8v @1λA SE < 6_:,.༵d@G  q~͘ l 0AIDATc Tg/3N@%㎳f&?K >f00p1,&_1v+P}@9{%tEXtdate:create2016-11-05T07:32:53-07:00~%tEXtdate:modify2016-11-05T07:32:53-07:00ctEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/58_3.png000066400000000000000000000005501512524704700225230ustar00rootroot00000000000000PNG  IHDR +dFgAMA a cHRMz&u0`:pQ<bKGD̿tIME !R]IDATc yۅSgiv d2,j# PJc]st8 a |r #A}VpҒ]k〆$}q%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/58_4.png000066400000000000000000000006031512524704700225230ustar00rootroot00000000000000PNG  IHDR 5PkU^gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4UxIDAT(cy@C6ȽsN1B08|sC XT 3޼9%`z yG0gs;v0%XDKB|=6;(0%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/58_5.png000066400000000000000000000006221512524704700225250ustar00rootroot00000000000000PNG  IHDR@r/gAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/IDAT(c Fߚ5 {-@*CuP~W@]0s4k] SrՔi<@_CG3 UP~(? ʯAD\0g)h<P>Z2%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/58_6.png000066400000000000000000000006521512524704700225310ustar00rootroot00000000000000PNG  IHDRJxOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PXIDAT8cFEL4Cvߞ 0U 0lL$&Îl!RIo?7;BlЉ^!/w"T1JL&"鄉DD+u` 8>xg_%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/58_7.png000066400000000000000000000007051512524704700225310ustar00rootroot00000000000000PNG  IHDRTEfSgAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#IDAT8c FņAfHb@pBl>iabbN0p!^yX\..VM/@"}QdG0$=A6g\X}ZEDz)F^#Z?o3S$7bdP~0ߨ14b%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel:[ IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_0.png000066400000000000000000000004621512524704700225230ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  "8M'IDATc (&y Bdw y^,Y 2r%tEXtdate:create2016-11-05T07:32:34-07:00~+w%tEXtdate:modify2016-11-05T07:32:34-07:00`#tEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_1.png000066400000000000000000000005171512524704700225250ustar00rootroot00000000000000PNG  IHDR 7zgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JDIDATc 9,;=(LYBX$CX _!C PK>cе*dG%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_2.png000066400000000000000000000005571512524704700225320ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0dIDATc tf0*cAI@(X]' >.f`(+ P3'K!fnGp.&:|%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_3.png000066400000000000000000000006301512524704700225230ustar00rootroot00000000000000PNG  IHDR+mBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !RIDAT(c,NL7:lqo oLvx/npI$*5p) [&d``JF!LNb`` ``Eػ"pG3Y^^Ifq$%tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_4.png000066400000000000000000000007041512524704700225260ustar00rootroot00000000000000PNG  IHDR5;_gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! b.4UIDAT(cFEl|,ۓ{320E?#DDR+kE3XN{"9[.+P`LE$bG2>#P@HHI:@H"AJZ]X$O(R@w"@JSkMK0/ =×E %tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_5.png000066400000000000000000000007531512524704700225330ustar00rootroot00000000000000PNG  IHDR@?cgAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/IDAT8c 0э>E3@ X":*Z":*"z* նTߓuXպlP }M>KUTQD/Ag `A?(Pm;T*ߢ AٟJ-$(U/H-VžD;=PP3lfP |D·[ =MglR JЩ%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_6.png000066400000000000000000000010221512524704700225220ustar00rootroot00000000000000PNG  IHDRJZgAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PXIDAT8c0JJ`xr;6 \ 1$@6NCv8JOT0q14ykŒFS`qHSA`IǔHĸs4_ ktidD$Dݎ/MK!HAx q?t 0|n W.,#;J1$1X\D5Xb*fD)ĥ~`Hk! Ϋ_1%bE+?c !9#d_%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/59_7.png000066400000000000000000000010601512524704700225250ustar00rootroot00000000000000PNG  IHDRTV0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#%IDATHc0ʍ }_[*UbS0TR n1Rt.tir9rg1ܒ rt77[,~;ϒaAGrfNi;)g rt}ØrP)G,~ЁÔ.Pr!RfX0Tf,r)C,["V?,r1b[Ѷ?9_,rorscq_d`/SAo%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel;, "oIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/60_0.png000066400000000000000000000005031512524704700225070ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  #8IDATc (@ƯK~g.~,uh'G5i7!ZsM>qә%tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel0IDAT(c 0 ftkQEK1A`k'6@v. (  Fr~a(:e *]v,(uoA&G ; 'BQHOak=Hg>z]3:˰k\'YWZ?qw%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabelL X%_w*2gtc)iQeD-0kC^z* 濨|R' )!RV`:X'KzrϰIKXb^(I*ݯD%iCR&/D^{,i.D+p$ "o43:p'qKk݇); ?Ipv_ %tEXtdate:create2016-11-05T07:33:03-07:00y$%tEXtdate:modify2016-11-05T07:33:03-07:00tEXtlabely%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel9 |*7Bk~LUXldLsl!TyLuQNj NeF3*9 `*٧OJU"}o<* aA({[%0Y4 2nS%0wCJj /pBN_`=ops)H)=nTd8U0ϕ| _4"l47N`lJ"DfED iHEؾD2,o:CקkVP9_F@*]o4s`\8OD':щoO^9%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel0+IDAT(c0 3a}t0pW>:8N5L,GHw . ,`믪'%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/61_3.png000066400000000000000000000004731512524704700225210ustar00rootroot00000000000000PNG  IHDR+N]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !0IDAT8c0JNҚ;(Lrd@s| aEH V%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/61_4.png000066400000000000000000000005021512524704700225130ustar00rootroot00000000000000PNG  IHDR#5= gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'e7IDAT8cO0UCo| jn. 0$F͍A- GՌ6>`%tEXtdate:create2016-11-05T07:33:13-07:00y>y%tEXtdate:modify2016-11-05T07:33:13-07:00ctEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/61_5.png000066400000000000000000000005121512524704700225150ustar00rootroot00000000000000PNG  IHDR)@gAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/?IDATHcO,`U9rT%. R6ax0<b*JU9rPҊ7o%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/61_6.png000066400000000000000000000005121512524704700225160ustar00rootroot00000000000000PNG  IHDR0JЈgAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PX?IDATHcO"`0aTèkf ` 0F5RX0h0aTè^چW"%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/61_7.png000066400000000000000000000005311512524704700225200ustar00rootroot00000000000000PNG  IHDR7TngAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#NIDATX1 DѺA0<_p8 KY{9rQmw*Pq*{p8.hja%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel=nZIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_0.png000066400000000000000000000005021512524704700225100ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  #7IDATc pƌƯV@H168~J] (1 M5Ju- %tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_1.png000066400000000000000000000006001512524704700225100ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JuIDATc HM—`Iݡ`'B-L b̻_.Kd_f10/f1p&A A] K=Av߶ߔD> Xe%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_2.png000066400000000000000000000006741512524704700225240ustar00rootroot00000000000000PNG  IHDR lTgAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0IDAT(c 0Wty1, f}A]̽p\նOSAuOp0(e/eAߣzWF8\t_v ~e2 YRP{_V ebnQ(]_+ABYvP$73zGIט/mb#? %tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_3.png000066400000000000000000000007601512524704700225210ustar00rootroot00000000000000PNG  IHDR+N]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c0 #I߾w%8N߰JZ0Pm,:36ڛϱx\4kl< מK SH_Bv@䭗|O ﱆ _fy)"e< 2 '\{H"C䯕PC^DJ)jcI&`#%3p%0L|yeH.]%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_4.png000066400000000000000000000011041512524704700225130ustar00rootroot00000000000000PNG  IHDR"5f=gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'e9IDAT8cO0*![Ɏ}lf`М 優a-Go{T`;a;_G50Epݯv0E ݫ|0EyqDzpGG`bUv"8bfO&:J3ck\[dWcQc L}L*))tV91X.B{0y4QgL RN&qÇ ӓBK,İ*6cI9@>*Y\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_5.png000066400000000000000000000012011512524704700225120ustar00rootroot00000000000000PNG  IHDR)@gAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/vIDATHcO,`U9TF$R5[a"UDd`ͺBJ [ d~/f!2'ǧDQ*2"HF-.Z"G8Bz Ă $T{ ʻH %Q#T^ΈP'UU0뤐uZl*neF(DOp[3 {3aE(L<3nJV?^U dtY:J[TF;FUJ?@z%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_6.png000066400000000000000000000012761512524704700225270ustar00rootroot00000000000000PNG  IHDR.J9gAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PXIDATHcO`U>OlK JIQ{@`8 )sW&"zɺ'x՛ YKОx 1+Sϥ(S3xPt-Oh.O9 gE>hCB>Έ->@pD )Bs )s)\(:j|&j")O&)oeDc.f1 -QE9n7 PҎۆ?8 \)S=e)ݣ)5yqGGQ^`ѬF(͟Y-kLohS+Д Csp@o{P )3^r(Zw@+Ry/ s$"RPL[U'R-Hן+lmQGr?Y%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel>\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/62_7.png000066400000000000000000000013711512524704700225240ustar00rootroot00000000000000PNG  IHDR6TPgAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#IDATX[(Qk/EQl>HۛKIxlDSJ"AJ6J$RD^HHd.q rf΋~sf}C)Df2j>gaDde`\4}F|qIxdax70BsQە~%PEte{:ktڣ3.Sdʔʸ{#tFgx9 V*]F2n{5:nu1Odܘʸ\4#YwHgT*rPdFb<ølXug ;8 &=$d2:G# ;j\mvPDzl0.ETv6 h\^sP)gm\gIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_0.png000066400000000000000000000005111512524704700225110ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  #>IDATc ?<}~g^h oZg.T =06쾶O%tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_1.png000066400000000000000000000006321512524704700225160ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ,7\JIDATc ox qx?ߊ30}3 bMLd0l?3/mŠ8+̵ N_Ğ,O K\1/CDޔ00o[K' -c\%%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_2.png000066400000000000000000000007501512524704700225200ustar00rootroot00000000000000PNG  IHDR G}ugAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0IDATc 0}E=N@eA؊@D0ߌ3~ O(oяDcbe` _s٧~j~. Ow{ފ10(nwo7$10Mj__1p0\bf`c`9#E ,Cf`?4.Ua6}?Mpq^%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_3.png000066400000000000000000000010751512524704700225220ustar00rootroot00000000000000PNG  IHDR+gAMA a cHRMz&u0`:pQ<bKGD̿tIME !2IDAT(c S."]g{p P ~ *R>C*l3:*IsPLH 1ȼ {dT@"`߿z ; w3 IE{_ rR߮Cܝ FO?'swkDDD QPK eC SNpp~ Sg``z b5<@yBւ쐌 b`f=w'Ts%R\ng07լbdpT3 1c4|^&gUIb2DRf8%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_4.png000066400000000000000000000012011512524704700225120ustar00rootroot00000000000000PNG  IHDR5~.tgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'evIDAT8c08#Ȩ}U ~AxȀv_,A \bi fIx`6Ty,Db;=jD">dC$N[$$ `!$xMD^?BQC]<ϑ%rCCDr/T`v;[!NGPc+VQ;,^#M@'(z49gk`g1}* &O`N8H^A -J PAN=/"I x㞄is1 ;uDOXAC`ٽS&8d0%@Cb7Xo#D&_H|'pxd>%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_5.png000066400000000000000000000013311512524704700225170ustar00rootroot00000000000000PNG  IHDR@#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !L/IDAT8c0#{BE+878{& HD=€$#ɾ`@6!V*dO3 # &{7 VsWd[=00,L ^E`k`\d VPwccz)Vu..d""oٌp9ή.ǖ-r1'Hum0ۘi'D%ɚG ÔnNrIIl ̻~p=pٿ`+p IH>ʅ7kv!$=lAhq4Y;fp9n腥c֒ы? @wp*c)C6 7쯱΀w3VPXeBr ]OlrsK>%tEXtdate:create2016-11-05T07:33:23-07:00~Y%tEXtdate:modify2016-11-05T07:33:23-07:00tEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_6.png000066400000000000000000000014721512524704700225260ustar00rootroot00000000000000PNG  IHDR!Jb*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !!PX/IDATHcO0PQNW5AF.sXUqc@{*Zt9 he% $*> H 2YWOօ))Ex]0b1D'T,Db -G **| $ET(9T!`*)H*2!*TW$@ÌK9~t) 5ްPMţlVx-bo 3"ncXYdyyyq$?;H n\12FU!/E#cUM cWP  "kSJ0F6&4B_@(&ԳLR TβWߏAӈ;X 9(⎩@' xΠ: DbH 1($'zDṉXR5K~6xT9=nTqqX=.|KTn\*^@>bFF .GvE "GU4dL*%tEXtdate:create2016-11-05T07:33:33-07:00;~%tEXtdate:modify2016-11-05T07:33:33-07:00JF{tEXtlabel?+`vIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/63_7.png000066400000000000000000000015751512524704700225330ustar00rootroot00000000000000PNG  IHDR&T˼gAMA a cHRMz&u0`:pQ<bKGD̿tIME !+#rIDATHOHQ.\"N HQ2V$e (vPDA ,XQBy2Qᡨ&b{{3oz:̼͗0XU5cFT_̪>oilL 6o\DQ{epbƃ"1z쐻d|t5)blY2lC( v*+d&{/Qk2썤;ŊdMYTp`W̾ru:v}t(hY*HXTe~fWYaʮ`XL({Rq'I,o%x!~x\BKfKooERf0QQI,2mLQ߈!oPbwi8nh2!S[%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel@ڊIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/64_2.png000066400000000000000000000014451512524704700225230ustar00rootroot00000000000000PNG  IHDR V%(gAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0IDAT8˕MHTQr 7M9 #(ʏY "hAF!E#ˆĊ!B lp$`ƙ9sKy?=b%YPX},R+ko#+o |մW2(#Siv.; >W%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel@ڊIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/64_3.png000066400000000000000000000020161512524704700225170ustar00rootroot00000000000000PNG  IHDR*++E"gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8˽kHTAŏe!>j,#%P6>XEYDio*%VAV@7AhRfE fz̽zw])|:so񟹳Qc~{8yƗtvkUsToqp ='y ije_ʭm݇Ϭс]lVQf&sC=Qejq=6 T1poTz".f)-,bMƑ La9#@.dÖό ~W'x]>kƬ*h`!rR`9 ,lb|/}t.@p Hh+Lk#EaȎspV$/DFc„Oe@,$.5:9n8Qp; U-NT*5=ŭ"Jtu,29J4QzUA@>+c\?Ѥ4w]rЛ2E:+m*N{Wg(f3Zhz$^S<&}$O^rAʞnH^G.q\JBJ&]\% y- t0'~>z!K>IJ+OOHm8uCZ q<]cc_Va,Q="\7ܳDRU,/ti5PftH~ cZF% w^.rA(, G>~ Ƌ vp Zi0KVa?, ]C%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel@ڊIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/64_4.png000066400000000000000000000022731512524704700225250ustar00rootroot00000000000000PNG  IHDR45+OgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'eIDATHkHTAgf ӊ0aP*$В44l +J)!AJ*4 {H1JV{iΝ{w>v>9sfXпWң#6:h߀v#K]hjR0DŽqk%d21&o]t6_L!JP&χe"XCyT U;O IX&h@ȡ2q-vl{BF<>Eш x)#I}~qv^ _%BD;S\v/A=C6tf?F:Z ΅鏛 4T (Ld͠sR+bb AؗT솬b@g)gAMA a cHRMz&u0`:pQ<bKGD̿tIME !оIDATHLUe+W*0!b?ȹ(LD1Lq-4uꔫMŌR+9MlVR+/WL(L dR2)?\#Oy9R=ܝ<2OJM(l9{F{MfA6&ƨ Т^[ċ!˴M<w)YSz/WE)o1O:~1(R0;vqx$sA9&Reae;?0]Ez|Z t4@ Swxtm 4M/>ë˺^z[Oyns~̱(lrM@6Lu^I7Kmml^~%z q~2F+$&!Y-3mxubӅܭo08˄~5μqY^.k8KHu uu4/lHWԥ=U7Z c],潺5h^8.9 >Vg&OJN8on:6d1 :VG0]51ϱ⦢oΊ?YMn s>88luMo1•pOaDt+XWnzr05U9r9wVypS>J%JZZz䌫rgm0~]aaSe2q]kg_inTؔ\;xb! U(a!7GO滶Po82 hF#qK 7o8^QSbꏹIye2{+ijaO4E#9"P8 u Joc> $LJ8}msA RPi@'9] 0Aٛ[„3 ̘"\%6&"ȆŌ_e$TtwR!qj?Hfʑha} AoCk O'LV G,>&>A1GlgPaA2"Ȯs6b7NfYj@kEP4V*GYi (6eH@ W)#U2]ҥ A!Y=c;, DA@4iE AQzAcU@؍!+0| 2* OdQLB>92 #Hdń1a0+UEi&fABS%߆4G;֜5{jd$ts9Q\>Ezʃ0G)ViFsd2 *Li>`}S'TetJkY.hY^F*KIKı{#P„)u|X_mdw^EȰS H3{7ܱ>Y` zǢ;ץ Ԩ;Tjc xR}3izҦ))"4\yJO(S&Sێߔ7C & 5.OQiM+p|樅2:F t!1kl |i#E>mKS0WFvwsݻ~{<~K7Բ;2R?mʴ .R"R<#Vz7qaxάzhڠ1M{ /ϼAn1bG.Soګi^z#6Rs!2i7]lUZ/9,6%I:jX%kѓE\bq,fVfevYi4wY(Lf3M0I'fk0'). 69aZAV i IiMH\bʘ)Ԡ`6GtLkes*)ez߮#W_p Ef5p|9J\bU<3H,ޟ#[xfD(:*CmtOo\$:Ye/*SH$O&Y %DT"uz0vG;P\DUl~µ2]lcƃ<\!m*#,|p&dfPG^12߂x@fa s17 >@68z<؆y^@Jf!=k`jUN.l&c}!(ԢdG(c|ZlJd'e\lO&8;w1aXK[ O%q&AtǙOݴ7*%G8'fio˥bQA t):}z& ċ%T)Z\U3O?TVJN5:uƴbBdrn S"GߠP/LR#@Q@ P]y32jax1fӿtNpK}:S28ꀺ7``? mxNǤŸF(EQI9{ &MC~~$SŒ#tLb6^23&Xpy%\T=7zr(zR6&\sYe.'Wd;u|17h. :lc( "%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel@ڊIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_0.png000066400000000000000000000005221512524704700225150ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  DGIDATc_AZ `-b:L[ ̓gA! @pļ@3 De ؙ [Z0&_%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_1.png000066400000000000000000000006501512524704700225200ustar00rootroot00000000000000PNG  IHDR(gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SIDATc$MU, ("  rDv2=eX$A!rw ;" @2 & @E a a< w003CO! $S@ "o9~G,):%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_2.png000066400000000000000000000010151512524704700225150ustar00rootroot00000000000000PNG  IHDR -5gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyIDAT(cϐ8$RNcxU/B X$&8_cHQ``c`ÐXaWt #@k&IZT 20,C 0HG10H30}g v!I<Q10 I2 sp|(pnqP_2 N0d`ȁJ,$7 ]%@.nKlΌAH 0%T]Ar*ACLZ!%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_3.png000066400000000000000000000011451512524704700225220ustar00rootroot00000000000000PNG  IHDR+. ^gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;ZIDAT8c P'wO,| x3300-fp)$p| X!wq$JDՀR~+lAv$[%$~OR"0参7HOŔ s,2` XADYx2M_=! WvC?%l/B D i$? ,B )l@Q&<9U,A3?AIED"H? ͂ǂPR_cV S?u b;2|2XS)811M\_SW⣵^T9?D>c)^C\i}/pm UIJN0%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_5.png000066400000000000000000000014241512524704700225240ustar00rootroot00000000000000PNG  IHDR/@qgAMA a cHRMz&u0`:pQ<bKGD̿tIME !! 6 IDATH=HAz/bD)b"(`LJDLcGCDm&$HJHlQPPAϜMr[vf[_{1w!Ǯϝw廨?K'ZqV_?c$Ҩ j/th2SOO2ҧkL[Q e" F.1ĕ|_Eiw)qBE_ebVJ?NeMֹ|oBf/C^$Id?w(%@RdK&޴mQ$&nU31|nt'$ͬs*WY O~LũyJnI:/F; {r}-_WK{l1h8 z,޺{i }en<æoCѳ?ޟF3gǛex_ >){VF& ?+n&'gLF{,%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/65_6.png000066400000000000000000000015231512524704700225250ustar00rootroot00000000000000PNG  IHDR6JgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1HIDATHMHTQ(T 8BRD!\ D0 (PAZRUmvI~*F]%~Q8{ysV-l=oeU ݙɊo͆,z>Nڳvxo~]sX5d]dcՒK#{db a 4g4~c)(3e-~/ bKkL[#ةTL)'!pUk(ag]9-!~4pݠeоzMުQ{nV l jF!~}]JC LJQ 'C F!|4C99MϪq6.ebxKLUtlc}!eTۤgAMA a cHRMz&u0`:pQ<bKGD̿tIME !&ΒIDATXKhQSbV1b+څvUXEuaJ[Ņ"D"JuW]+mMbڂHMrMOf̝t/7̙ G!O:w: q|6Qzt;32npɏd5xt {yESk!8 Al'84mWE4"or7K\y~VWhysojuiiBC$oJ ˄$WXӳ>530}%-RM3[}y% '7W*3f:S&Dg*&MxB0Uۄ[&JF <(c _7]|BSk<^ڢq 8GA{*}!LߗԢW𼓚/4KQ^N.\e9)#Q|l@gf'uCjk:Z$O2OU,eV+ࣳQT$?\#\MhηhcQp҈u\ZŇ8m0Ԫ_U7szۘ'Q^(ChژK~{81ǸYQ3.V'!SIs|OGs._sA -py/.Q%^%_ L%tEXtdate:create2016-11-05T07:33:38-07:009*=%tEXtdate:modify2016-11-05T07:33:38-07:00HAtEXtlabelAݺMIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_0.png000066400000000000000000000005051512524704700225170ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  D:IDATc 8؎h>`faX= 0h4q7%%%tEXtdate:create2016-11-05T07:32:30-07:001d%tEXtdate:modify2016-11-05T07:32:30-07:00ltEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_1.png000066400000000000000000000006021512524704700225160ustar00rootroot00000000000000PNG  IHDR S"gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SwIDATcN3\ 8ԾB8gߞd`\aBpZ`+C8 2MSv C<€E o ᜹ c0qh {PC)4ݶ%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_2.png000066400000000000000000000007021512524704700225200ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  1TZyIDAT(cmfFAGB C \h2a1y4!/T SXk`av!enY֢e ЭPq@t3$o^T@?Lho2Yу q,3u%tEXtdate:create2016-11-05T07:32:49-07:00zlC%tEXtdate:modify2016-11-05T07:32:49-07:00 1tEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_3.png000066400000000000000000000007621512524704700225270ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;IDAT8c0Z}of ,밋3D_My`2$qRAlY ?@@Ә 5L iB{;GhMU<p$ne&Ipϭn8'X}L 9L:o"xW)^b栋+g _+6X 1⌽XMb=Fx2 B(0qdmJ'%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_4.png000066400000000000000000000010721512524704700225230ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! eCL/IDAT8c0}<.[πU|[(G>Y( [Qe~O,<0sc _]ߊCo!!!k *ae`T3@<ߨaҐz;KAx㗣 VaqcQ_z*w-rp^vw."{9%첝[HDiauEȲS߿ѡ^h@3 EHYwWQd9eVC@4*KYJ9Ǟ%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_5.png000066400000000000000000000011601512524704700225220ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !! 6eIDATHcO0PGI:2`o0"!J*n߾}|}t%005wp)? 8JA(iQ2"G,P:%uxXEV"3t ?ª#:>JB1͵@bE(a' voNuЕXgAV\\rSOʃY;@,(5 n% "GI8D:%AmJ޳Aq+ t9N%WxJvP B7@i!0# ! )aD(1N!07HqV~MJͨ}F[sDD%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/66_6.png000066400000000000000000000012631512524704700225270ustar00rootroot00000000000000PNG  IHDR'JmgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1IDATHA(CqlìZrpT.Rr ԊBݤEM \(@cL26o~V.޷O~߫( ^f*u"U-6_Osݥ9890Gin6q \gsT4!ppc%kJݢ…p'Lsہ< E :1^:nx=51SIԎFq],d3}Bu$RgF{-ye \/cp!g8<,.X\Ĺbf;JtGtCJ'Cw)Pi8v ?AB/KᐈoNYx/":3VwN%mZwzʟK;Y7x?sژnSysq`Dй d󰘤֙w!`\DtS%tEXtdate:create2016-11-05T07:33:39-07:00k!%tEXtdate:modify2016-11-05T07:33:39-07:0065tEXtlabelBIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/67_0.png000066400000000000000000000005131512524704700225170ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  D@IDATc jXIk2\ʮ;p#XiI :rM igAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()IDAT(c0.ɘW%j7$`@+L-20?񤓦 R-87ĢOݢ6FS^Oe``:.4,_"ſʽ?anr ?5,"( SPY$|2%J~DyCù s?22dT5yބ1-bjFLQ˨.~8@Y/%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelCraIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/67_3.png000066400000000000000000000011401512524704700225170ustar00rootroot00000000000000PNG  IHDR+. ^gAMA a cHRMz&u0`:pQ<bKGD̿tIME  ;UIDAT8c0 s>L'ĨIxcD?/_`UA[j+_U4Xr Jj2U`U=TUP VJ@U%XO6@~+~Uo!y`UWZ ߲e3(%0U ʸ7+:PS(Dsd5w#T?ͶRx6G|pUgLY[8U2cS8E0Y(eD#[*TxEu U7wTq1 Ckj#bAw T_"^FEPŲ߈PRu\3XA{9+8Va3֍IPjƄhJ23*j /qn#) EV RQnU?o0 (_-fBZC$U)[6I CxsFY  \lMgVMh똶ḡD4U<s/o~1%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelCraIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/67_6.png000066400000000000000000000015541512524704700225330ustar00rootroot00000000000000PNG  IHDR4J;(gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1aIDATHOHTAnQbC]dPn ȃ"҆bRQB KjRt("T AZCP~mq潙~Y}h8:(;J=|V:h0U>dVMlPFph3X), #ضj*}!<Jl'wݚ|fw dza"^}Yj4gb@؝\M/|7)(-:[ˏ96F6c,j=E?en[RwcgВf6H v_.얓/*n.\**_2`j/ϡʡl_ۜÙ%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelCraIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/67_7.png000066400000000000000000000016761512524704700225410ustar00rootroot00000000000000PNG  IHDR;TgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'IDATX[HQٖݬ]a$HkYY"J=+FB(pDPы  ) >$? Z鿿r[/>©-7˷ڔ8DsL6XZ& H3\ebG)~o*doXFKyjx-m6b4̤(:w4ڬ._^V3FVhQi2XU'w`mج]FREnc ҋu*'֟m-CKcX?^^lCF߲vvMylkoS74 ;#y*Vh ]1tzMzB3PJXj,VBWA5b}0*ڣv]K f l-N@zۈ}!b j9Z\NvU tjr 8lWA| x/ XOaTlg-cf]>kH ca+]:r* m&>O7.Ve8d}PUY4R-i0uh2zůj+. %-vnjNAu,wɊکq:eσ|)ayYr^]\ž:Yݲοii_ۿG'(%tEXtdate:create2016-11-05T07:33:39-07:00k!%tEXtdate:modify2016-11-05T07:33:39-07:0065tEXtlabelCraIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_0.png000066400000000000000000000005111512524704700225160ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  t\>IDATc d>Yg?0! \1un`^6@<,,])K:*]%tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_1.png000066400000000000000000000006161512524704700225250ustar00rootroot00000000000000PNG  IHDRCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SIDATc3/ ~&0˫_Aփb`DE`f }(P(3@b`cKfy00h~ByT#Y S%tEXtdate:create2016-11-05T07:32:59-07:00C0%tEXtdate:modify2016-11-05T07:32:59-07:00ǛtEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_4.png000066400000000000000000000011221512524704700225210ustar00rootroot00000000000000PNG  IHDR$5 zgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! eCLGIDAT8cO` J`(̀NTsV`tE ܛPM1ÕM VPUDZ*PU p+DU>>EYS(\~S1Ĩ-xKdU?,_2Ut⨻xqevh;^E2*zqe6C},!?0!1Wam*3>ŧj<>(Cǣ4í D 8}ߛw%QMz| U6`?AEq?R$#X1"M-Ђ"zTѐUI p%tEXtdate:create2016-11-05T07:33:09-07:00&j%tEXtdate:modify2016-11-05T07:33:09-07:00`tEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_5.png000066400000000000000000000012111512524704700225210ustar00rootroot00000000000000PNG  IHDR+@хgAMA a cHRMz&u0`:pQ<bKGD̿tIME !! 6~IDATH+qoϐh 9Rs98r-Iɰ .;aZJ('jde|Lx}>s|^}߾>GޅScovW5L0Wk[akl+Ho/[-?>\m/`ՓZaIR4ӤSmAE0!Z~466N1lʰ7h7 ÂC//rl}waoѮ3l1΢3l^mڞa!E .I[7uirHDڥQv.۞CaS$B69&і&\U"' Q0ZEkQ40mIX V7;;NMf^- ^9%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_6.png000066400000000000000000000012761512524704700225350ustar00rootroot00000000000000PNG  IHDR1J7gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1IDATHO(q`vrKDj-)EM՜Q 2Z.;rP.,m,%?{C%Ou )D)'>u~yaZ쥧e#oc udw?5'r߈opZ%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/68_7.png000066400000000000000000000014051512524704700225300ustar00rootroot00000000000000PNG  IHDR9TgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'IDATXMHagE$|aD"=AA ;e IVPA / jċ0ZD`yH)Z]_huYf=7ea:q .NQJh{>)J!UEyh;(J!ʖT̨J=SBr~|XyXmY<^bB/|*LoK2ϙhDz9IuB"R($)X^t˜B7&ws1Ir|](qw )~Ä|1;>rWJPke-w}%&_';ɛ]%Hs%0y_eHYЀ0@kl'@~)0'b E7y;>UƟ𭐻Lތ=ԄeuwwZA9rnk3ezQV,|x={E4E?;v2+'?xֽ r)Ȍ3(a̯ƥ%tEXtdate:create2016-11-05T07:33:39-07:00k!%tEXtdate:modify2016-11-05T07:33:39-07:0065tEXtlabelDNIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/69_0.png000066400000000000000000000004661512524704700225300ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  t\+IDATc 53 @mϟCmz:䙉%tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelE~TIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/69_1.png000066400000000000000000000005011512524704700225170ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01S6IDATc :{sfFR^x~5^\An3OBЧ^%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelE~TIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/69_2.png000066400000000000000000000005051512524704700225240ustar00rootroot00000000000000PNG  IHDR >gAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S():IDAT(c03 D 4݆P膒!@;ʎC'*@f(xG ]ZVRrT%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelE~TIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/69_3.png000066400000000000000000000005071512524704700225270ustar00rootroot00000000000000PNG  IHDR+ gAMA a cHRMz&u0`:pQ<bKGD̿tIME !H1g%tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelF/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/70_1.png000066400000000000000000000004671512524704700225220ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01S,IDATc :{sfFRFV\zf#6lB%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelF/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/70_2.png000066400000000000000000000004661512524704700225220ustar00rootroot00000000000000PNG  IHDR G}ugAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()+IDATc aM;?BŊK1lnoX7@vIDAT8c0_π̧a&ÝCEz? x>d9[|r)|qϨ<}ٗcOMԗ?Z8/XXeC%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelF/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/70_7.png000066400000000000000000000005171512524704700225240ustar00rootroot00000000000000PNG  IHDR$TIʅgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'DIDATHcO`3 @E @"TEqw4KQO`QEFK"QECX=3ۍl9%tEXtdate:create2016-11-05T07:33:39-07:00k!%tEXtdate:modify2016-11-05T07:33:39-07:0065tEXtlabelF/IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_0.png000066400000000000000000000005311512524704700225120ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  t\NIDATc 1#=ؗȼx` tHOWm@E2AL$s1W. %tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_1.png000066400000000000000000000007001512524704700225110ustar00rootroot00000000000000PNG  IHDR4gAMA a cHRMz&u0`:pQ<bKGD̿tIME  (01SIDATc@Ĺ,MvKp/ `"˖Ag߉% ?`@4>daD32P# ,i @5$!nP#Ǐ'2ȁ22Lc+X<'CXղT0|5h܀Ŀ J ~"KxNq08En%tEXtdate:create2016-11-05T07:32:40-07:00}%tEXtdate:modify2016-11-05T07:32:40-07:00tEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_2.png000066400000000000000000000010541512524704700225150ustar00rootroot00000000000000PNG  IHDR )7gAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()!IDAT(c0!CE8goT?Ӥ`@e-#sd .u_$ 3s6k'T&':Ę [aʁ2f+.kX T_v LOJa`ooC]#T<\Jkv; C RR` m z`E@)Whwq\?w&BpPn'w D b`k X|7J;9ўx iLȱ9\L⇊AOlOkoh|)pJfnr`i%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_3.png000066400000000000000000000012461512524704700225210ustar00rootroot00000000000000PNG  IHDR"+8#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !HIDAT8cO0 s6us\J^11@vc,J^e1 7J3ѭ(JeX,JJ y^s'B|{7+e[<9&ܒRq50Do f ݿ,Go.Onk1=$Njdk,XI#HIcD\x1q(+JJ1=փnH'D!kU|C7)d*Ht7T.127QT3P%j 8#*SNEşbƃ+welj CJ?c2'#+wQ‡ &ϨY}) yǙEM~BMR Q%d+vB&I%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_4.png000066400000000000000000000013771512524704700225270ustar00rootroot00000000000000PNG  IHDR*5(&gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! JIDATHcO4` W釭N\L<٫SɓXRzʦ]46 Ce@c Scj1i$?FD;~ǡe|E ufυMXf,Ƭt6x;ؚI2e(j{>%tEXtdate:create2016-11-05T07:33:19-07:00N&%tEXtdate:modify2016-11-05T07:33:19-07:00HtEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_6.png000066400000000000000000000016721512524704700225270ustar00rootroot00000000000000PNG  IHDR:J5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1IDATXoHSQ675$+e,%1[bDQAB@1C@E"r `TE|0$&n߹{o޽w;9w{;dp0aY+lIDRa'3.i#*#1,R$CoوQ8{&/]~pϖEA5-Abo[JT[˧G#TA'zC(L˽S87_;5xÑ}E7sReտa/KҶqNQf7䡃A dv<#"eKRBR#(WEDi- bRH߉h"s%zLv )L>i51f(cT L|Ux-Lc0mmtB`4]qLl7M+4Ti?Yt3AXN#:4E>CVPԀ REmO*gWwYP_sg=ƯtRh~it*:/\J?/*JoE/8 ;\15qkɣBZ@C-W-Ѽ i5Ч9l]&K@۪qXz;٨)\<ZIGy _ | :<3;+ѝ3uLR9 d]U|!>),9))_>RS)p%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelGuxIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/71_7.png000066400000000000000000000020531512524704700225220ustar00rootroot00000000000000PNG  IHDRCTrՌgAMA a cHRMz&u0`:pQ<bKGD̿tIME !' IDATX_HAP3:.3Ӌ"**z*G$exC,Bʊ(EчŃ$BJ0!=8;&=offnw'{333ZlcJKiյnCm`s 0fnmA̰5f̺"noh07R>TktR'Z7"-BxƟcLkLe!8twrs>GViʒ+ܳaܗeӠLKsvt!ۙcJϋ~̋.|}=94fS;b-,jc:ADh"͸*@D@dI3ZDrӜQE!b @F=UF%w ;}0F40& %#L^CȀv$Aٴʾ P+lAYNe1rh:0,)(cl\" ^c&[J?K 2 hHHvc" ㌔E y&%Qi1eDtGڻRA *h,3"6 o2v1NH+rǹdSllf7e * ZW/z Z[Ov+'?=NWcĬo\>8RL1 jU:nMH'0VMDx)?υnT,^JR[7(67lڇC%tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_1.png000066400000000000000000000005041512524704700225160ustar00rootroot00000000000000PNG  IHDR MRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G69IDATc ;ad`bnp8X |#B@/^ҙ-yJk%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_2.png000066400000000000000000000005301512524704700225160ustar00rootroot00000000000000PNG  IHDR KgAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()MIDATcr\n=av3!Vpc`7Jꂹ P0t@VXH;5i|L.`g]j%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_3.png000066400000000000000000000005621512524704700225240ustar00rootroot00000000000000PNG  IHDR+PЇgAMA a cHRMz&u0`:pQ<bKGD̿tIME !HgIDAT(cgQ!%t $d 7$qyP*Xo"SB@%D>X2yV ދr~8,1k ՓUT!%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_4.png000066400000000000000000000006101512524704700225170ustar00rootroot00000000000000PNG  IHDR5|gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! J}IDAT8c 0)ͣ4M* 턉."E@E^ :h=ɛPadf@AuQE',?Զ0#X+w:g&~{P߆(7E/)%tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_5.png000066400000000000000000000006421512524704700225250ustar00rootroot00000000000000PNG  IHDR@!48gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EIDAT8c0P..JJJ NCn`J-J=K} J}I}g`d/J}q2̠2 fP *!*]j] ȴKF.? 1qj%nM +()io.%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_6.png000066400000000000000000000006751512524704700225340ustar00rootroot00000000000000PNG  IHDRJDgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$1IDATHc0PUzc=cNfQQQQ"_*]Z*,]*{*`$]2 @H;}eȇK1`3`Xe@`e˄)y .$,2Q Y;pI戇ѤF]))rSDds%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelJ cIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/74_7.png000066400000000000000000000007411512524704700225270ustar00rootroot00000000000000PNG  IHDR"TWgAMA a cHRMz&u0`:pQ<bKGD̿tIME !'IDATHcO0 0?dTɨQ%JF;+)ĥd\I%J2J|03x UKd * Up Jޛ؋/\;c L . P1|1¡a!LF* _͉Mϕ%ocYTh\eHNC Br 6HpYH 'pA) hu%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_2.png000066400000000000000000000007061512524704700225240ustar00rootroot00000000000000PNG  IHDR 7 jgAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()IDAT(c0`p>UJT䁬I?f h)ߨM@+ 7ge`~x `#B~t}: @R w'CуnELۂ@ 4@G hqtȮB pE$]U _%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_3.png000066400000000000000000000010141512524704700225160ustar00rootroot00000000000000PNG  IHDR+Ŭ$gAMA a cHRMz&u0`:pQ<bKGD̿tIME !HIDAT8c0gp~+ЁMn#MQ/" 7,SU^cqOS6wƁX\'`ވKAtp- a["b O!kX< "nc_:,هU-f95M<1Ğ`M/vXrxbs>ς#J56:&I3 eX%`繆MF Q9.HDo%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_4.png000066400000000000000000000011241512524704700225210ustar00rootroot00000000000000PNG  IHDR!5>gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! JIIDAT8/HQ0؂iA10 5ht5  hap``ZLiӆA ۑcX9|)ˏ,N{5kK>Ū$brY6I!8 H4 Dʑ:T!D-jE9xMC$o%(K{iAr5hK%+H,!HC-ҽ| )rY2(R$ӟR4I֥ gKK)F{c$C$RqR`"t-E{dGI}%P᰹m)pD IV xS EHP@P?&ӡD%tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_5.png000066400000000000000000000012071512524704700225240ustar00rootroot00000000000000PNG  IHDR&@SZ5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !E|IDATH;HQY"=H(Ih!#()jJjiАcAChE4VE A0H|}34\8pp lWD ċgƋU-2v@XD*;5 nlY^;EP<\(r# l^T9g=afa[ Q {ja]3eD$Aؔ( ja^S?cs 0SYPٌI‹z&e~aoN-iưP1uʒna p$1D?-PSe ]kpJ05m;g,?n1$L1pe|1ՍRݭ.mjÞ(hh'%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_6.png000066400000000000000000000012721512524704700225270ustar00rootroot00000000000000PNG  IHDR,JBgAMA a cHRMz&u0`:pQ<bKGD̿tIME !uIDATH=(ayőR.% Q겐E)d2(J )겐A7eRKKQsr o7wzXH"Ӄxϝp/L*C orڃwƦCఓvC਋v aely kX ?~oa18ǃX“Px//>/0 CfhCFcgxXvk@bfIdp_Z꒠#MCOZi˾Ęe:ۢ 㩒zDbک$qαz5љ] :zBb'nWb]Hm +P u&A=%1mq#qEafSJDmf%qOwU80E o c5DR8-0ܽz%tEXtdate:create2016-11-05T07:33:29-07:00S!%tEXtdate:modify2016-11-05T07:33:29-07:00"tEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/75_7.png000066400000000000000000000013701512524704700225270ustar00rootroot00000000000000PNG  IHDR3T ;PgAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*IDATXMHQ1*& QMBt[D+Ah#-!T\I$BA'DL cJafs.'2g>޻9~aF[Čt0ǐ/QnD%)!H.A1FϐLI$yA%yl\$OHoI`!)t_d!4-$3s!U$]NՒ5JR F Q7HC{9 ,A2u< hƤMܛ{@5 M{3M'1Wڿ,4 SRhePP44sz!ѥfV\3xlJMAVwA3glkn'46D -k=D;A)!M4~f0m+߷SO3gPhv  451Qwǹ5Z5 819 'eR&eN9j>8^4%tEXtdate:create2016-11-05T07:33:39-07:00k!%tEXtdate:modify2016-11-05T07:33:39-07:0065tEXtlabelK|SSIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_0.png000066400000000000000000000004521512524704700225210ustar00rootroot00000000000000PNG  IHDR y9JgAMA a cHRMz&u0`:pQ<bKGD̿tIME  t\IDATc >>3@P\6m V0%tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_1.png000066400000000000000000000004561512524704700225260ustar00rootroot00000000000000PNG  IHDR MRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6#IDATc ^@f&{cڔ~4%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_2.png000066400000000000000000000004631512524704700225250ustar00rootroot00000000000000PNG  IHDR G}ugAMA a cHRMz&u0`:pQ<bKGD̿tIME  2S()(IDATc  Gxs7C{('4ttx<%tEXtdate:create2016-11-05T07:32:50-07:00#^%tEXtdate:modify2016-11-05T07:32:50-07:00R_tEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_3.png000066400000000000000000000004651512524704700225300ustar00rootroot00000000000000PNG  IHDR+]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !H*IDAT(c0`f룂00! ?Ax&`%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_4.png000066400000000000000000000004761512524704700225330ustar00rootroot00000000000000PNG  IHDR5~.tgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! J3IDAT8c0YO1%@ĨĨDf3X$& ^X':x %tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_5.png000066400000000000000000000004741512524704700225320ustar00rootroot00000000000000PNG  IHDR@,*HgAMA a cHRMz&u0`:pQ<bKGD̿tIME !E1IDAT8c0/G%G%G%G%" #K)rLp"X%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/76_6.png000066400000000000000000000005021512524704700225230ustar00rootroot00000000000000PNG  IHDR!Jb*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !u7IDATHcO0G}(>bTŨQ* ` pUp~ةOUA e``QeF*U6l)Ӵ1a£ʰ+OU6l+̮vQ/Q%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelLlIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_0.png000066400000000000000000000005651512524704700225270ustar00rootroot00000000000000PNG  IHDR n3gAMA a cHRMz&u0`:pQ<bKGD̿tIME  t\jIDATc R !)5p (.|Vy-6y| 5Ɯ3=A$HYliQ %tEXtdate:create2016-11-05T07:32:31-07:00CF%tEXtdate:modify2016-11-05T07:32:31-07:002ltEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_1.png000066400000000000000000000010061512524704700225170ustar00rootroot00000000000000PNG  IHDR*gAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6IDATӵбKAHIEIEA[I`msR8[S[6FS Q;F==w?{>!ZoKFxzd+ ΆثK3 ekL}h'B3 *dfxrX`J/a)&9=;m9`ыQ:+Fdi <$esep##,|kϲ84w+rjZUយ^/now?(ۚOUM%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_2.png000066400000000000000000000012421512524704700225220ustar00rootroot00000000000000PNG  IHDR ,gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TIDAT(?Haoc4H#b[ĩRI(t0-PH"tufiԡPt(*HRiA`4>{.KwU0Şӕ)u/ZX? aMS}iaLFH v찜~,-/~.+n  W`[XW-zxȑӜ\M 04Ю3N&+7/7I3]No)5=c@dBi>Ƽ7oS%yEُ@MYmVMP#՜'>M'q;U͓2;:/ZUI9- v)]wQQYW: Oq?,9$T[t~ ul~^2_]F=%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_3.png000066400000000000000000000014641512524704700225310ustar00rootroot00000000000000PNG  IHDR'+ ΒgAMA a cHRMz&u0`:pQ<bKGD̿tIME !H)IDAT8MHAZj?PQ&(FeA-ڙQ*E$]Dj*wF"HаBJ̟~\i{f<̝93o/Ȍ=[FC\2g&UuiWC\7 rT `YDA({ϵ@[Gm>!Xp^XQLTu?wZq%I )^ڵJ41mz8|\;brlⓝr Uwqĭ\)/wƳ;8ˑ;=܂>JRZũj+?}{3{]dEO)g~Pk?۟ŏxSA9#I6z_OQ_!)w?3wا+. x s]~ 8u86op?rB_jYN>X{L<_aYޢպ]qeW3f4U~F˻q߀w4Qer.O4VJ5?ĨVǂ ԌlSXcSym*}..G;QyivQ*W яAB ^G8~8ަ*;ݰ}cCaG3Җo,Չ|3":l!!x qoA7 ^In%3|d-?aŸK*6a.]jEryq~+h7v?I aeX&ǗQ'RqOmTyrrXAMؤ6~?r )ٟëH=K?97SY#Y`?=ÝZu"DqgNWĭ.iUJuxo=}1"&L 1QO%tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_5.png000066400000000000000000000021121512524704700225220ustar00rootroot00000000000000PNG  IHDR9@^PgAMA a cHRMz&u0`:pQ<bKGD̿tIME !E?IDATH[HUALeԇ.%Tڃj=@J*Ą ).Y a=t2P*SC2L4Mss߳g<8kƽ5{$d͙j%9Q9r'B{ABq"+ j+Dd"'r8 >=jÖrϤG`G,'Os2cKs3͞M¸7۞ 2Ύ,GmvdfC+8yWȫLׄ׎.5ZeŠ#d|[^+?N>ݢVRKg똕<_m'L$h+V.Rh舙sv"(d!V+CԛI]ڳ14HcTb&/),[$v!($l|Q>2\^Y.jvvQ~@m^7.{P+})+#sѷ,BA/웂wH< chu\`*q:'?9JFsjŹsqIJ2x^7;އmC 3 DBЛH{#V荘%<0ГP5^{!!^;hՈ]遘b"+ {B6}ڬFX ZBjy (unI~?Jr)B܈񲟧D #1?Wd QD'b͞B`4N5v\OBdroZ@Ls m!v3 D,2[#|IF&8ȽrxiQ vZv#eWk. -eA2b12x+4RV]^Ȥ\dru. 7:X٩E0:sG>D@h>9I;) &qZb BIm̖؀])zcb,TH5~Da{#Zq~!Um$oJv Dw߇y{퀭G`8Y U\Іl; f/wV~>&]c{ U+HF|ESK&`uJOqL+ m,/KKu@X1l]蚄])>dB6Kk-Dms.erˈ-#0 Ծ%P",B1u$tr;K!?4ӝˍW?h υX標aBO&8/wXM]"deܢ@;dG~t8 XXvN1#qf'쁦GwxM *n( TTvi7Tћ|KÇ.·粐q?#S D?Am%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/77_7.png000066400000000000000000000026101512524704700225270ustar00rootroot00000000000000PNG  IHDRLT ygAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*}IDATXklEXnr ZZkKD $UM RIh$FH%\D Jj\FIK҄ JmOϰgwogvgvLOw9Ξ2~SˮuNpcD?-+\{&f$VClf(v[̆j|'>[++|_[tF,12}=Iۓ!ILlߍ`.Ď[E0MǕsl˂aO9.v&U'\azی@|z-pXM`;[{-J`SӜDX٘9J]vε1cGI fXd3&aٴ 'LI4Q672d!( #ogHY VIt)E1>tIyG`sIT iQL;z/—z6Y뿇T=l1śD;!WB!OIXy<_\M,%P8-nT\ a\IvZ u:Am@|+IBOL[U=HI4͔KD1W XIt'zEqQ:.bD,_Eyd?:zC~h"+Q)60;߮;G.p i wm ^+ /+a(>,P@g^^(Od|>EhJY酽 ъ]qSM&/zaOpF_'ݙEo\Ȗfct^#;iDjI߂=}ȉ5QEH塯h{yqYQ5Lua RxE/YGw=XU22lcIޕax}w=na_yeϔ=Z1|wku4v΁T¾A-O1qڧ.cvaRZh. 1.|Fk^$ũO'`u3/ckFsOZfug˴e 0?xڪՎ`j!`ETk'zzVjtYgbv^y *L'; lm)*ytúaݰnv;p'w%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelMkfIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_0.png000066400000000000000000000005111512524704700225170ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  >Ya>IDATc x A!"}zV,xz=ȂLR9w%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_1.png000066400000000000000000000006151512524704700225250ustar00rootroot00000000000000PNG  IHDRCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6IDATc{? `-vag8 `:&M n [N_e`[, d7e" +YCe^&dO5.BRM#4́()( g78%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_2.png000066400000000000000000000007011512524704700225220ustar00rootroot00000000000000PNG  IHDR -5gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TIDAT(c0C7DTneC+. J\ ypH0$!'"1QH|Ŕ| HZĔHdsLŀ oYrw0%?P<ƔL֩Ɣa"-8|0%u!g bpHCRA+i1exab%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_3.png000066400000000000000000000010001512524704700225140ustar00rootroot00000000000000PNG  IHDR+. ^gAMA a cHRMz&u0`:pQ<bKGD̿tIME !HIDAT8c?>;~y_X! ?3x~/ R).U`TVp`2pV y ?`bqV |`'p_Vw qo+P|KX9..:#~_ʌ_V7} NN/%Q7%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_4.png000066400000000000000000000010571512524704700225310ustar00rootroot00000000000000PNG  IHDR&5GgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! J$IDATH!HCSQ␅!,h`b2VfXrA6dE0̇ yVPILbAH؉ !dcM1db٦ `C"$ԭ\\[jF"ˆ.ÁLe82 Es7\2ڝ]3_e8Q7Sq.F-\1u.ոp3ng{x)abx^kb)zbC#G14a@U!jS핕=-%tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_5.png000066400000000000000000000011301512524704700225220ustar00rootroot00000000000000PNG  IHDR-@gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EMIDATH=(ܝRԥ$$a,L7xncrL6&1 EJYbn ]tܹ Ο#` 3GI4Ve-%1Fo`:e,QZ"ea~yѕyznHt֋Y[#3nk;~϶v@yo+U`k+L2i[#^8Qޘ5.j7'mz塴hR>5[wldw!~];C㱇xc4^F#3h|L109h8 a4LiD Zg4(.O%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/78_6.png000066400000000000000000000012061512524704700225270ustar00rootroot00000000000000PNG  IHDR5J>CgAMA a cHRMz&u0`:pQ<bKGD̿tIME !u{IDATH1(arQn[XXXdEnP%ePʠL%1PSd"uYXnaDqb O#`"/QJ)%3w;RI((3JFQ(zft<2Jr,$z(d%[FIQҜr kҫ^j51VH ?T+ MSo3tόUFKڲsM%-_iJ2Fix?h 1Z?-]WFK[hig2ZڝVTYuGcsEFs;Fs7[gyo{'gWb]FR}Fr}F@cF}u*345gơcߙqh>ith>lx|Ԩѝ|6PuFx@}FdHQ#ŲQ941Oi,tFc-f4rF㰘8)etT2׵u͌SkB;5j/[/4R:S/6@0F%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelN bIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/79_0.png000066400000000000000000000005411512524704700225230ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  >YaVIDATc wW1WY?a@ɇ3LW+p6c3Pm⪛4]jjgQi 2\lL_V[џ%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelO{eJIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/79_1.png000066400000000000000000000007111512524704700225230ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6IDATc @Ĺ,-NVCY P*ÌA *!b A̿ ` 0ËĊXľ 3ăl`)3D# !b <0o"  +Ka` b`0 iܱgTC% ߧp9̮.Ïb?\%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelO{eJIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/79_2.png000066400000000000000000000011121512524704700225200ustar00rootroot00000000000000PNG  IHDR \gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3T?IDAT(c0!{[ fhrg3PGd' @BHD*w)`ar\ /B+p< rXO#9PrX. ȪE*;XNh{TOk10HPl7Pn'F &<4)@@OL hr@ wZP^CHn&P\P$쿟 h6Cs0b?Aeu8jŋL J#H&(>4p $VHF ܍Mºy(ՀY-' "6EQ b L5o@[ NJA|? #x`JRhI98ur)VYuE=aπ$0% E!AտO׫A&#)_G S˜3's"²pF5AW(0T(V\C ,""**&wz͡%tEXtdate:create2016-11-05T07:33:00-07:00|c%tEXtdate:modify2016-11-05T07:33:00-07:00!tEXtlabelO{eJIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/79_4.png000066400000000000000000000015041512524704700225270ustar00rootroot00000000000000PNG  IHDR,56VgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! J9IDATHMHTQ7(*Y~- $H ?2)Dt!nm!n)FGD- j m 4L1Noͼ{q';7osF֛ony>olԦhQPsZHm-k75Hp0O^I+[Za)(,Kw壢]j{t=_A3/n^r֣9ZA*L lt:% 92k~dAG`b.Ks_l'π;8C<׀+PYzy.:xϱ`u\PⰛ{Pb205>ɠ KxPW<U+q7&\$3X ]aG]\e*n]{ ܇Nj_#N\*]xN'BO$^V]O93;Ko̜33 3*gٗyҤy:D#$U@$~-Lpu:bAtk%|}gU ZyUwN} /l86rkakji;Mۙ#'_ht9;ݸ7T^vҙ~0AN| VE qCز`%t;60u1TѳHO9 АTT|ؽ<E6k X|Lї0qC@]@.Z_qx!&Dt.gYj;E0~irCHbZ~!kr j^ 2.Y;#Pn<ب[/b8TSt;'16bc9ECf,o9־D}3V>Ի,@ [\b!if&SfN2f[1Kh^`o5&[p"I0lPx a7`6pؑbvn$Mjpq$Q5}otrY3U>bJ~OGUݔt4\6! Pbi?@C ;f9U=\67!]ݱ6* 6?ڨk7T:%-tCtکe/ݛ_ F1#czV@nվ#%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelO{eJIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/79_7.png000066400000000000000000000022171512524704700225340ustar00rootroot00000000000000PNG  IHDRFT[gAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*IDATX[HTAq׻[j$Ib킁]P^Ұ$]̐IA)MB(,nd LRt.so32XzYRX7$N~y|V̤q% Del̕H9Mp*R jE?uW|QdJB{䙩lqE>|@3UP8ipjX&arye'dyEzc$OKČK0-tQz=L1cQiS/bPA> !`‡A)RN.o(Gu2V2pME5:l!j mb΀ZL1$$z3Vf[[dt& wL1L22U%38Qalcqh1#8133{qS'C]sgr7rIfgD)բ7ECk1q& _gMsg3¹9M1f<[sC|p $Ve<1E8ʜ-ns<02pZ1SD-:e}l" !fPn@XMYOOL3ɻjW zcf,]#2MH*IT $f@MbRMv<rC:޸H2 Ya/IDATc 8c 8vۗvhv =T* ӏF <ƌ3%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_1.png000066400000000000000000000005351512524704700225170ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6RIDATc Q% r_10Lw3D P& `6'P9|9 fG*ldSd Ē%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_2.png000066400000000000000000000005761512524704700225250ustar00rootroot00000000000000PNG  IHDR >gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TsIDAT(c03@I= `[&v*tj) # *0$` o$W@-P-S9KC>6xR漇;<xJk %tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_3.png000066400000000000000000000006451512524704700225230ustar00rootroot00000000000000PNG  IHDR+]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~IDAT(c0`JSߣ T, Ο?wd x َ. _! bJ}0%$ `dt/^BxwH"n 9 "H+?|ob摷NtJԮL%"qcJF%P:Zxԍ%tEXtdate:create2016-11-05T07:33:10-07:00Hc'%tEXtdate:modify2016-11-05T07:33:10-07:009ۛtEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_5.png000066400000000000000000000007441512524704700225250ustar00rootroot00000000000000PNG  IHDR@#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EIDAT8c0P"h3 }l4jQu,'ԋOa^Yddl*[}U Jwa3D?vYP?b }U T+Vy!NÅȞdeb^jZtw[a!)!,, mH'Yѫxb_ΔbT'gٺFz=/ʎʒ'cS%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_6.png000066400000000000000000000010201512524704700225120ustar00rootroot00000000000000PNG  IHDR!Jb*gAMA a cHRMz&u0`:pQ<bKGD̿tIME !uIDATHcO0GE7`O] Л }T0, i+!  `HAVhDR I7<N•aU9 *`~ơ̐T,XK1.\*TT0[pC)?*DŒgm2Sq9}hjkkRi,{ SUҺYE>y?y_"z]l9`U1b8ZGqOUO8B%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/80_7.png000066400000000000000000000010541512524704700225220ustar00rootroot00000000000000PNG  IHDR&T˼gAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*!IDATHcO`e 0~u"10d!JC_1tQДi.*{3BHʒq(OIebp*e?xoKdSv.OK>eafc%p(|6% ) Kţ0 Wq+;'QZUsّuTerzzL!D~(eDTb3c?"!X;^]p1r ə=R!)*U6l)OU6l+W>Bl` [%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelPmIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_0.png000066400000000000000000000005501512524704700225140ustar00rootroot00000000000000PNG  IHDR cCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  >Ya]IDATc 5*3+1T7?ݬ?@G3LU+kpk'96gu7Hۏ*5V@@&,ZgtSkP>%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_1.png000066400000000000000000000007441512524704700225220ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6IDATc @tmfV]Q P*ŐxssdB*Į20 /e`X +a`~ &s`G"A $+d18"@by o)Sfa`)Y L@CQsk˔D>@އ7V]-~{?CL &!)/2UCM%tEXtdate:create2016-11-05T07:32:41-07:00I %tEXtdate:modify2016-11-05T07:32:41-07:008޵utEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_2.png000066400000000000000000000011461512524704700225200ustar00rootroot00000000000000PNG  IHDR \gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3T[IDAT(c0!ce+Bhr?D`-md !wE $YJ5e%L+w HT.ȑ w=u _X #M$?J2^im  @@=@MnP (Hwɭ8(.@[ \(Pȝ W!b[3 < rbl@,I Ȝd[010| i4] ѴG Iƍ N<ʸ;C# )-ʖR=(i֊MYA;G| yD |5daݘ%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_3.png000066400000000000000000000013641512524704700225230ustar00rootroot00000000000000PNG  IHDR$+5=ugAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~IDAT8cO`@^4iٵ8-UbX=fb@Ng1b@Q˄IpQ%ADU:xWZe |HzBl?a:5<E#(XD"'j\*:ֳ=ˡ@W̸B0Il+bcPL1XQ#db@EH;)z "'"s.6EgA2`E f6E@2V`E@L-l"A29`Eo{&>$-`GkJ? 62uRF'%kpW% StOUsC`MpUP5`뾁x/f8B o#)ʡ Q/̺_ȊU^CDEQD ɂXh涩y@j]L5 s`TUA:Dn?a%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_4.png000066400000000000000000000016031512524704700225200ustar00rootroot00000000000000PNG  IHDR,56VgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`xIDATH_HSQ3E1kARjDCD!=dQ>TA/&aTHAXA$tD *D2Q0lmvܻ==}_Flxvm;-werlnVd`#Fx|68هfQ\rtx?͌jޡĘBks `ۯJ³[Wo۷5^QW!y)Ͳ"&pثQIƴPw<ńGODR#Ԍ7Vz2XЈUcӊa5eUZ}lYڂ@Ukg}TuKhS0YnD&wVU2;,(^9xGGx7R5rjq3~#k@=NpSTJFf8r Acp*ʹqe Ј&*જ + "<]RPM_>BK;]M eRvmy8=JA1%ڃYڅs&Ti1T MBu8=Usif~Q8U;'{8%eViAp$)%Lm`PT-Q97kFY(ϚVBwU)clp.KP\Tnj2U; GU9L\B9 m"Y, C7( ¬slAV`J}rhy&7ZޅsMJ*U$ͻW/5(wHL' F|YžPQjY*WMy\iHv<ʒTg +]?J, `cCQc Ac1Pמx:{OCm%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/81_6.png000066400000000000000000000021431512524704700225220ustar00rootroot00000000000000PNG  IHDR=J-.LgAMA a cHRMz&u0`:pQ<bKGD̿tIME !uXIDATXkHSaOn2E$IZHeiVXBRƔ"dB*E0ѕF ] ,UTXy-~h{w 2Gmnb;!.^Akϴ@ Pni Tw"bAcc;qՎtb$Fbgimyg?Җ,nO`A)=T5oL=w:n9[/ڙrL|iͣFaaI+ ,ٙ;^| ]am^itl)8gnطtq\1Psկic F 1E#=APcSmދabũrld-Ls+rF;U0mDљ_4Zn]fq60E YWZ %ln[;3ehӑP1)G~c)G/nMyI9&'RY,jޥ6}k0#%^$5фc/jR~%ߧ>@=Eԩu7TF5m ݮx`"b# " j.!V~iRRt' t95TB_fFm N5d -\,w^kM 7j/J ,ZSJÄȊ!) RWY|AJ[FJ<Io&I _: wЎwJSc}EVL==U*baHV EG6YhSjk߄\b']tK41Ntќ(=$o ɝ.9ǚ$1.'3v.)խTև*ɘD 1;iBX6_y(ayw lDKCذߖPBM5={l33"IgdI@>~n&ao̥\c5at@*2banN>ᯪ0H<%Cf&^d^ ­Ps$BdL?SL:Uϼ&fL&ɜg>NSD $3 d@2G,%]L)@dH&3Xd*ЋG=7 _){*W-0);񗅹ArNuOH[eOIJnx>ߔC@{_:$[e D+'IT Ùu,uF:/Q M.,d8}2xh'IJ`{p_@.UrVP/3 43ECWq9 r2cJg0Li#]8JfP2ݟqi3EW,J4k:H@I+e_t5: Fh; wt/T{~ֺN=V=RSq3CP :WgѦW \Z3ګY.y+w@SYnԲ~Ƹv30Κ!_/RHTWd83o%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelQj)IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_0.png000066400000000000000000000005011512524704700225110ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  >Ya6IDATc x!cCR!RAkCc 9 KD4&%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_1.png000066400000000000000000000006071512524704700225210ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  )G6|IDATc 3nray7W10a-wa%7b`d&g;/a b 'Y  RB)@\J2 2Q Cm2QZGB^@]ZPجDP/go^$Qg)B?BA5@<~100@_A F`iK0%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_3.png000066400000000000000000000007741512524704700225300ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~IDAT8c0?BFAg 63!{ݻ,I<bbbo0ŏض@lLNC(I"8ALX˸k8pnPs~C/#6 CAf uC#C#!)8xbd_ be db|0?q& ⿔R%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_4.png000066400000000000000000000010601512524704700225160ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`%IDAT8c0p_}.[Te>OA>Y44Y_- ey. c  b_%`.;/] Jms-Ps>w/Һm91A$u3i<Ġe\Ϥ{ 0kPL`bR eYz߱Ça5V#OCE.(sZ*hPFl5KHVcd5pXd?Br7%Ҩ,]dF%tEXtdate:create2016-11-05T07:33:11-07:00h%tEXtdate:modify2016-11-05T07:33:11-07:00/tEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_5.png000066400000000000000000000012211512524704700225160ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !qBIDATH=HBQ!AJ4DPSTD$574DDCAD-QR ADAX8TЇ;a(]t;{O24$#B%clъLΔDTDND+pJ|9[sǹ+$2e35DL\CVE8_&I+L6<`(+eiL\Ҍ(+h>;eOOx,tdQ'0)Ӫb0 =E윉v3Y'}d^.!s\AB+jcIN-H(X.7rcuنr[yS dYHCBj TO_Xd$qLCBCl&2$<`>y٧_ _4'z%tEXtdate:create2016-11-05T07:33:20-07:00Yd%tEXtdate:modify2016-11-05T07:33:20-07:00xtEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_6.png000066400000000000000000000012721512524704700225250ustar00rootroot00000000000000PNG  IHDR'JmgAMA a cHRMz&u0`:pQ<bKGD̿tIME !uIDATH?HBQg2" phh!hhh lh" $" ltP2l*'*"=Px~.wy[T{GѬےUUL}TOHI51'I+N}(g1'9\ahuRyQ3+ttə.uqvn=y8nC?v.ϱ>; iƐKLק>^ڶYV?XY<^9{{Z]EB G~r&;ATu0kO; ݚr*?tuw ~T79躆v:b8 Cp: rA1t c> t QHKp5ơ 1"G13%(ʽYn%eWvdD~!kI%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/82_7.png000066400000000000000000000013431512524704700225250ustar00rootroot00000000000000PNG  IHDR-T3\OgAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*IDATHI(Q' 2HJRv! aaAQl!C )IJ@R!`A6)CcH|$뷸uϹ]A:GҺ͕f 6dh١thѪtѢх^$tCQ^ҟy\4!!k*XS" }VAp߮PХOay9m\A Q?o6\$&E{/%sm|W:_Tu` -|&AGJyA.ֆڸw%z3s&8o/+= ~B%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelRcIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_0.png000066400000000000000000000005201512524704700225130ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  >YaEIDATc 'KaKU?K?c3+ G =+`"D%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_1.png000066400000000000000000000006521512524704700225220ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?IDATc ?Ϫ[@o| Rk.53_v10GUe`X0<&)A|N0 <+T@zE 1sϾ8n}`g2|F]Q $%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_2.png000066400000000000000000000007711512524704700225250ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TIDAT(cpigR.l_Bv32_9 [sށ@z6Xd (4loӁBSQq(]v@1-(BT@;maGMǝz`A }'̀ba] jxm )  bbh9.T9M"3Ɵd2T@Xq!+ &IAk AbA׃,D18S :A{@1U_z!$|#|f&O-BOPs6?T3Jc>D8 ʟPāz 9h?utt@=gă1- O+SAN {q ,a^A=Z9 d`. bKfS@X DdŐQƢp{S&e <:gN8b3%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_4.png000066400000000000000000000012601512524704700225210ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`IDAT8c0%ss ~܂:`3%5Ed Y *`^}+ÀoeB:z! GN06a/.n$9D쟪H>++l)Y3lp6C\),aqd fU34lc^3_swؿdlDCOWE3åEO'_J^Xd>MyX8dC4?."{&km$ٟsR(8D)X)@XR*H1| Hg`58|wH>28DI88(IģDD/s/V Ubд{3I(ƥ<<11gƮ$Dre7BSlJoDN)(%9ٜTO/J-'Ȋ`*R,+_lJ&!Y׏U 0ӕp”?Įga05ٸUP5q+9Uҁ[_nTJkA MGS QUg90 M`%?&ȃ8:wD BR ݏ @,s* 8@C, gm`esMc "e~Jf"' * GVC Q%I\J&-L3`2k=lJpQ%#E 9- #%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_6.png000066400000000000000000000015501512524704700225250ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !u]IDATHMHawK7P$!;.R*ZICD%OCy(*dLrZ??.>=~< hnHe]=Za.H4da8/d8VC,m p:x!>7Aӛx(MnM%3gt\G]{-7LjNY6ݓFx&PCZKp :i'rVr8^cb@3U^?»[d>`OcfuzY\32eݓE=zaA\ !dv݊ B(F*nk!@Cbc9P2½IhX t-6r,\#7W%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/83_7.png000066400000000000000000000017031512524704700225260ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*IDATH_HSQ]i_ȱiL)j){T"# 3F62 Z !D^ Y=TaC%hؔQkX;s=C}ϹAOA ȓݍH09~mF,AT;&8M($"WFB K73R(笈V_@x=69nS`:ߌr8 S tXC Pz5H<Wq`KAWrƯp6orZy.M78)? o>F>?v=Z){p>"8{%{0wCayޖn\>+uݍAd!)_Cӕulk$՚gd:}I8GGS7tW*qNiϘk9XSJXRɋŲzkʾT~ݏ-mJqħi厦>A& _1RL%tEXtdate:create2016-11-05T07:33:40-07:006mC%tEXtdate:modify2016-11-05T07:33:40-07:00qktEXtlabelSodIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_0.png000066400000000000000000000004631512524704700225220ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  >Ya(IDATc ?St?l}8a@@29?C%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_1.png000066400000000000000000000004701512524704700225210ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?-IDATc ` a@Rn7yQ;%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_2.png000066400000000000000000000004751512524704700225270ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  3T2IDAT(c=g@s69`Bݨ6 Q,D(.x,5߶%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_3.png000066400000000000000000000004771512524704700225320ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~4IDAT8c0fSOyg@9t8%h`ޙ/M%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_4.png000066400000000000000000000004771512524704700225330ustar00rootroot00000000000000PNG  IHDR5wԶgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`4IDAT8c0<8gHpfkPIL0*I;I-!*<%tEXtdate:create2016-11-05T07:33:11-07:00h%tEXtdate:modify2016-11-05T07:33:11-07:00/tEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_5.png000066400000000000000000000005131512524704700225230ustar00rootroot00000000000000PNG  IHDR!@'ALgAMA a cHRMz&u0`:pQ<bKGD̿tIME !qB@IDATHcO0PCŧx !ҸClTŨQ*`Ћ[V0bN>`o{%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_6.png000066400000000000000000000005121512524704700225230ustar00rootroot00000000000000PNG  IHDR'JmgAMA a cHRMz&u0`:pQ<bKGD̿tIME !u?IDATHcO`K @u 582cTݨQuFՍpQuꆂ:N O%tEXtdate:create2016-11-05T07:33:30-07:00 dZ%tEXtdate:modify2016-11-05T07:33:30-07:00{tEXtlabelT^IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/84_7.png000066400000000000000000000005311512524704700225250ustar00rootroot00000000000000PNG  IHDR,TܞqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !()*NIDATHcO`$&*g \ULbH ,'ݣG*UYa9IDATc 7!'΅Ջv@ xaaOs%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_1.png000066400000000000000000000005611512524704700225230ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?fIDATc .30030|x-U~ï j hA{0q0>/ Mۂe9cQOe>`}p,%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_2.png000066400000000000000000000006441512524704700225260ustar00rootroot00000000000000PNG  IHDR 3WgAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TIDAT(c00/rߏ c @@!(|(d`_~O0Q^pYPѯ&@NXȘ;@æ@& H\&-LD>he@Do zFI7Om똱#t2h@/%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_3.png000066400000000000000000000007331512524704700225260ustar00rootroot00000000000000PNG  IHDR+N]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~IDAT8c0\ ף#Kr)Q*{ bdJ~%(rB@}$dIzu)s)d,H$,b*~E b2e/-ow̭* PW] h@[&r#$?s< 9|Z[58ee?%tEXtdate:create2016-11-05T07:33:11-07:00h%tEXtdate:modify2016-11-05T07:33:11-07:00/tEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_5.png000066400000000000000000000010731512524704700225260ustar00rootroot00000000000000PNG  IHDR*@IǺgAMA a cHRMz&u0`:pQ<bKGD̿tIME !qB0IDATHcO4`r0*"pTQJi(J!=JC>+mp*=Ut" IƟ`kaSsBaS 3(bQjL Ds(+Rw&(_ c #T 7ZHC%CirL|\Y ͷow 2+_IyQJX1g[eFP1?RUtS?]Q&G gJ=QN&Yȶ"FQJW Ah'\D%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_6.png000066400000000000000000000011651512524704700225310ustar00rootroot00000000000000PNG  IHDR1J7gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EjIDATHcO*`ЏS]  Sn ۣ:Fu1cc;t(e0!:\?tLxQ?ť#DG? Wa5LG- :.tб7;L"v!0 0dtDcLLuGL] hKoqk`Xб!E8oq92wp I,&72POt5@bB@4\6|n>n cL9ꝏFR^t劅#-ȄdG5!N& cwGu1:)kJE%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/85_7.png000066400000000000000000000012441512524704700225300ustar00rootroot00000000000000PNG  IHDR8T۫gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-ЄIDATXcO&`B~2G5j8qTa/>"i+ yOc&\c8y|^C5EӨPpyxc0,kq\LC᪢aq{P .&&p-npkzG1koV{Mc:Bf$sn`h$/IOe$O(mPBeI5R [@%i@S84 uRgGf>U{U@4/?w5 +EqF%DOg%^zKpbt;.PV+`blQ1:ewVVX*r2j$QjQFN}*W.%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelUn0IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_0.png000066400000000000000000000005301512524704700225170ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  >YaMIDATc y!7;d}Gf߽n0kW02[o(U j2SL%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_1.png000066400000000000000000000006621512524704700225260ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?IDATӭ1 aX X L>RbgP2(tmfJMr1swٛޯ^=6l^Qd(T{EN^`p`ЗKaP^? = |>vMC9L_V;ļ(f]? [ riX%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_2.png000066400000000000000000000010211512524704700225150ustar00rootroot00000000000000PNG  IHDR >igAMA a cHRMz&u0`:pQ<bKGD̿tIME  3TIDAT(c0 IA;P3BP@?U};00hAẃB  >đ0f00<PGį T<|$~_s$\|=Pd[#B3~ &A#^600?;Cx"A[b5=4!"GC`?S%tEXtdate:create2016-11-05T07:32:51-07:00) W%tEXtdate:modify2016-11-05T07:32:51-07:00ttEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_3.png000066400000000000000000000011451512524704700225250ustar00rootroot00000000000000PNG  IHDR+cgAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~ZIDAT8c0MdEhq e@s kv/ ( &A$dɿ @`_@6;$M P778=H Pl@?mF;A-z*{I~$!g Rۈ APYw`?D"GށBHh> v),@!%rCc0$b,E=,;H O!yIv]HA* $cb8Rd cJ(,} `)VٝIwPdec?;xQ*?eo0xM<%~ QY$dd%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_4.png000066400000000000000000000013041512524704700225230ustar00rootroot00000000000000PNG  IHDR#5= gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`IDAT8=,aCU1$" ђN6E6b0R6>&Ab"CQ1{"xy/{t#i h1MU19{5S "gLTr?զ'1'rC70HޏYS2VF0_hwdfY WwL&i_'H3?g1d\bH%) K &9ٹ"kɓTdΥxH΁nOaf< K6.1A>ڋz&/_+YYWrmG3FjW&ccN0t!M+c3q6d4 >eH\`>_ k -\4)@䯶cA0A\ b9c:x3Bf7W棆?1C3ܮylJ$cC7^Zt5|%tEXtdate:create2016-11-05T07:33:11-07:00h%tEXtdate:modify2016-11-05T07:33:11-07:00/tEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_5.png000066400000000000000000000014201512524704700225230ustar00rootroot00000000000000PNG  IHDR*@IǺgAMA a cHRMz&u0`:pQ<bKGD̿tIME !qBIDATHK(DQkȫke,)y5EVV,(lbce9嵱bA,l(E+ E FHw>333d}sU }Vc"X8<8iƭ \ I!nj{G.}1u-ϹN#b:Q+ kS \3XhHJ]xn$=>']-7"z9aÌ."VJc薪V$e gI)uZ;pepTa0lQaRj)G!FtGDJ#Uҏr$&]rٓQ֊a.);4!rJ5/Iqtּ0?G.O4:F3BRwe!ɺn p!GpOvpΫOV7n ;-N.e*1k5B:;U-S8^'Z+40Vik0fw3-;ψ1u)z29t" pAxTߗs;n>Mn x-{]%Ak5ppSȨV4f@0lf8+.8}l<2 zt>]xP~eyΉꌝ F䈱j:ΜO$CMs.`46\ 65oe DE@L@ԒLT/S24WML`jex'D,0^BL0`,B>%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/86_7.png000066400000000000000000000016761512524704700225420ustar00rootroot00000000000000PNG  IHDR8T۫gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-ЄIDATXOHqU\H-"xPtV[ !APuX+$C!:d!=ĶP;Zg}έ9ov%(o= \aV 7pTb8p] ';B3ԝn~D>({D΄/5\۱C+dx >u[tk^xpۖ$x4/$ipSpo^0Na aįXL=ɀ 9>o}kd,nqz`.8mSV? 88ੀ\RlY`ٛ>{MZ0] R&qaý€t jGы|]_ 84 vqɺ?*oF"_E{{bR79#-N岓^D(R'5({/qAwk(.~5m_|q5PxzD8s"\qEB:@/lp[iP Z|-^1s;Tf,p{L >R/8bT6T j 6CNDB| px/doJ7bX|2Cwiǀ˃UO]6$Q,o)ńTUX; zw4[%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelV?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/87_0.png000066400000000000000000000005701512524704700225240ustar00rootroot00000000000000PNG  IHDR n3gAMA a cHRMz&u0`:pQ<bKGD̿tIME  >YamIDATc?/ ?a/ g6y@e?s_T,?U0g7;!*oS}? ħC9pFiFxf%tEXtdate:create2016-11-05T07:32:32-07:00rM%tEXtdate:modify2016-11-05T07:32:32-07:00tEXtlabelWh IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/87_1.png000066400000000000000000000010111512524704700225140ustar00rootroot00000000000000PNG  IHDR3gAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?IDATӵϱKBQSP M 0*$hV֠""IjSY{gp(,MQqvw{Tp<'v4=%3~NsnDM²5nixMޥ^D5w.q,5Wx܂gRdÎ|V~ O!>(uD^!w,Q^bɀI"Ļfx0Íxi*m&O&Y`?{%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelWh IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/87_2.png000066400000000000000000000012651512524704700225300ustar00rootroot00000000000000PNG  IHDR V%(gAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0IDAT8Փ?hSQH@&%]BAE0T(BJv(bi5t ,:b v(Ғj|7yj))ӞBFsɝ}K'h"r33F/xz"2S/q՝\Dr:|=zA~͗$ $_.r͐ȧ ?9T%uY gcNҒMM C ' ̇I?ȿ?%tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelWh IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/87_4.png000066400000000000000000000017351512524704700225340ustar00rootroot00000000000000PNG  IHDR45+OgAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`IDATHYHTQLS* cՃEFE+$AH>I")8-DKQ F PMjh DVFQFIER1V44_9sgýo1hR-a`VbCk)zIYX [YzԂ>z?gIT1vĂQ0LS;Ej?"͡#!&c=V3rzrD N:!ao9a_&(IS!Sk4DV{UD:"M b?dLw[Ge'6 Q+ZxvT}8\-"'L"V/x- yS׌^Bqʪ)f#MQB1[A"%TNg-R4 >#yJvi9݋)qY*Xl6kUIJt-)m&w*(kF1Id/({>lYvN򧟶,)CꠑeZi?J{ LJ\k@t5I9`o tzrm=c $+gY,"Yh\F)XNEJh097nkW rqBCK0ЦPLR{BZqT6kQL4\12rYp4c ט[ÝPn0DzN7= =XݘpÈ'n6mA}*]2۠ڏ⌱N2ug4{xH`U֤?4!Tem$Aw&\s5{Sg/0,/bTnn\oZK7c wBq[zC8~S3xzV]b#Rqv%de>gzTRMW 7lALwyoniam R֒Je7DfDnZl#C_Vpa}VCiJl XEd7Yvɮb:DV`/k%ǰ#UDDv2Y0;?x0aZ8\M&ib쑬LhQukN1 |ݍ:A/'/=ڴJ %Bl_RePƏgCOʵM ipnvpx)rH 'sc<զ<*Ff%i1oOmlZt61bg+WZ#;;@="gMiq,[U>B'lV1`_vmž'lV8C0%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelWh IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/87_6.png000066400000000000000000000024111512524704700225260ustar00rootroot00000000000000PNG  IHDRGJEgAMA a cHRMz&u0`:pQ<bKGD̿tIME !EIDATXyMQϼ1co0%B,Hvce$[h5d!{2(0W3{{wc~}ν 8/^8Ui~Gyw}<枊\O]̗u6cjZUsIky2c=\81u~ [kFbtYقҼG|LJ:烸[*sMIP( 0o+0\:NbZGpPӼKC16Uo W|%k"fܾXui' s%;yۥPvmyK]Ip[G͜k`$q^šzjirllFC,?qZR.﭅o"eNO 'dB P(rh9gxUq:go"iKٳgJS,qB͠l3b^@ްshny/Rh!-98W<2hpKGיϝ"`ny+g7ĆU<_6h z*k@*Х:HUuђ YY:j4:1fXz3+D> >cFo20ݞhN0ewoϛԥ60ͨG<.u6U:2fjz=gg#I<Ƒ{ix<\~Czk6c%:Q[+QWntuF$B$:h,R u]Ȏx$z8*xJ[Z "z2%`[B|Zm;,Z ng(,b2*A;ĕ1H ZJߤHTUOŔrLXĽW13p4MNIGy,nzy{'1m?~5RҍKC(Jthy,2⿜(g Y,5@Doo#nZZ5ibo̗(bî PlCe|c:BK:-|q0,>O` +p Ľ(}` X Ui7u:acKS$6ԏ;"` |?ؠ/-2 σ-F`';-C+%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelWh IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_0.png000066400000000000000000000005221512524704700225220ustar00rootroot00000000000000PNG  IHDR 2gAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiGIDATc eWߠY^a)! {!obz IqHR\%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_1.png000066400000000000000000000006731512524704700225320ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?IDATc ebX bd`>Og<7B 3H~Og|pa(C71F&xH.H< "@k1,8  o 0B ̿0$ ķg0 `(ُ+^%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_2.png000066400000000000000000000010341512524704700225230ustar00rootroot00000000000000PNG  IHDR lTgAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0IDAT(c 0 dO1```b1Fh%%@U } naY10(|Ea6В`r+Y00(a`Eqyf5gc)h~(4lpxA;hPq9Lt3  /:```>;m)Ht;,~@^b`;(Cc H%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_3.png000066400000000000000000000011761512524704700225330ustar00rootroot00000000000000PNG  IHDR+Ŭ$gAMA a cHRMz&u0`:pQ<bKGD̿tIME !kx~sIDAT8c0 f ~$D'-&K {5 &MC czeH;Ddze [Q6F|C)?(\b es/`kGJw ]5Q\e2% {)H-a]'I<K! Tm 1 ^fASG)(SxP_0?۠& BX>a`¯^eE(TPbyPc9RFbǥ0 "o6#CX?) w@læ"qZ05y7X1L4B ShFc؃p-Zr(kFUZ@>),:1":͠).8 h$!bڿ"j!iP("D$5]S@SK "!pT @P.iT *UHB%5-%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_6.png000066400000000000000000000016451512524704700225370ustar00rootroot00000000000000PNG  IHDR/JuRgAMA a cHRMz&u0`:pQ<bKGD̿tIME !EIDATHOHQYm[5<%$(DA-xc"ȃaDA)R(:%B`%=x% 'ԶP*o潙7:tw~̼f4eeZ^#s 嘂rr~^Yx#B!g,2+&vC?AxuRzķ.pQwy`#߃L0&𩕈WlR߂X/^ !u+:$7dI"n"pyi" .kVSeU}0{Y"íg;w?gwО_ͷ_ЮUHצ97TVrf44fÊlsuH}5Rsgn}yiCA.&}/ʼn+dB9n@i! ?8O cRmTaoǻJ_؆[&e9ix-_,l8Zl%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/88_7.png000066400000000000000000000017651512524704700225430ustar00rootroot00000000000000PNG  IHDR5T% SgAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-ЄIDATXKHQ׮)$z+)D&"2 hZ$Y-"Yj#-"Ea-au13 -Er_9sqO"j߹3sf a|z yAPMa[nC^NtuYQpg9XUU!y|yxNe;j/Eᢧ_`W.#d49VEgYGlp}"!ydv|WC%c_G2NNXCrTC QcsQjF]<: e,MQ>OT=.jԟXj8++p¨v]P8E :z+ڤkQft3V=58OVeNBu H=Pj(smu8Õ!]U$M kc0##╆G+P,OQVR ~6a^lhžwms(qSL^i;D. 1ߓ>~c$찣ڊ5&# ʥAmFlD6mwI2ꭏ3I`'*j\G<͢n)V!A ^ʌۊډ>RG1ri&ÉdksF=t I56K/R!L[pP85SGNk:1Ԕo1hJ%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelXIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_0.png000066400000000000000000000005011512524704700225200ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  !Ii6IDATc /DR{UՁ@3`8s0Kv( %tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_1.png000066400000000000000000000006031512524704700225240ustar00rootroot00000000000000PNG  IHDR:dgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?xIDATc 1^(04l͟@+ ܓL@/c}.C7g*N }srX1"ĮB#H`;ZR$w ( ڍ.U w0cSH B ϭ QA2I ry:8a? )Xv6H{M/r %tEXtdate:create2016-11-05T07:33:01-07:00" h %tEXtdate:modify2016-11-05T07:33:01-07:00SVбtEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_4.png000066400000000000000000000010711512524704700225270ustar00rootroot00000000000000PNG  IHDR"5f=gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! M`.IDAT8cO0dg=,Y TrNW d0X-4~T%Px"x yQXM("_ v T, 8#Lwb >-$%,jj`Jfн c V5 N`W%0Z}Yb)Ѹ,.`KaƴXM/Œ |k5X 3y`Ǫ~CX cq)y N8 V!`%G*`TWظwF%tEXtdate:create2016-11-05T07:33:11-07:00h%tEXtdate:modify2016-11-05T07:33:11-07:00/tEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_5.png000066400000000000000000000011541512524704700225320ustar00rootroot00000000000000PNG  IHDR(@M2jgAMA a cHRMz&u0`:pQ<bKGD̿tIME !qBaIDATHcO$`B'00M&XP 6 Xupxb* V] P,t ;Bij@U",)/DE)jpb0 " !H2I&pu?!"=hQpAS^ " _b)D, UrqHq!OX4 7?! ?Vߕ ҩB<+%nF @8BNO9* ǣ\g| `)PuA (| UxPG*UHB`TB:()^4Ӯ%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_6.png000066400000000000000000000012471512524704700225360ustar00rootroot00000000000000PNG  IHDR.J9gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EIDATHM(afڔh%."q#˅V.vR+\N.K9P 5Al~f>\54J34 p(5P;x{9~bE՛HKnTSv ԠӑQFnk!l]hVS0֘>)vgb xx7}5y8gVcNn~[M&jCdiIR{sRs:tw"'oF_PVV$eô3 jʉ! .xl.]ﵒ6O%tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/89_7.png000066400000000000000000000013471512524704700225400ustar00rootroot00000000000000PNG  IHDR5T% SgAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-ЄIDATXM(a)Ju(9:8MR$'r(Q(RPJ$Ȳ?>Όi˼y{wwe ޯVzimh9΃׊y@WkuBPթ2aUE6cWAXT3QQ7fYmP`\}Ma5J5r1[SP]4?H8)_Џ=(ta}rdϥQM^it&Sü&d <+P4B Y*v28Qx((=f9ߕ>7krpKWkBg~OIȶ*k"nv~=*,au垉Nh(T0[ H|T!A-0%dc5ZNCaZzKDt@7نN>jnezEB14P2Zy\2sn6٣%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelY"IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_0.png000066400000000000000000000005161512524704700225160ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  !IiCIDATc o??vn\V8Xl}"?*PC3X1tBG1D%tEXtdate:create2016-11-05T07:32:33-07:00%tEXtdate:modify2016-11-05T07:32:33-07:00EtEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_1.png000066400000000000000000000006031512524704700225140ustar00rootroot00000000000000PNG  IHDRզgAMA a cHRMz&u0`:pQ<bKGD̿tIME  *?xIDATcg<{s80 "sk2DGp /Z#I J?ӌ ς X!$΃%n &+ ݿTf%tEXtdate:create2016-11-05T07:32:42-07:00xkT%tEXtdate:modify2016-11-05T07:32:42-07:00 6tEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_2.png000066400000000000000000000006641512524704700225240ustar00rootroot00000000000000PNG  IHDR lTgAMA a cHRMz&u0`:pQ<bKGD̿tIME  4$0IDAT(c 0g@o¦Tm(Ha``:n] 700E4u6{)20C[$znЪwVAo EVE@21׃AZUsCUJҦpp,z %.n XD0`%-= (fhi%tEXtdate:create2016-11-05T07:32:52-07:00%tEXtdate:modify2016-11-05T07:32:52-07:00ŜvtEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_3.png000066400000000000000000000007641512524704700225260ustar00rootroot00000000000000PNG  IHDR+Ŭ$gAMA a cHRMz&u0`:pQ<bKGD̿tIME !)IDAT8c0;̀ pi- pG@){XVR2_ʅ- );G(tT V(%\&Hp(H~$5n8bikMPJ;V5I9&[(r\H[lrR"%M4Pwld"K-d)I\nvma%tEXtdate:create2016-11-05T07:33:02-07:00r%tEXtdate:modify2016-11-05T07:33:02-07:00b,tEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_4.png000066400000000000000000000010451512524704700225200ustar00rootroot00000000000000PNG  IHDR!5>gAMA a cHRMz&u0`:pQ<bKGD̿tIME ! )IDAT8-hQdd8 `d03" f,.hAL2L _{=%=^%DE2j}Rb}(E6_{1 \QQ*.?PQH ~ =.Q3PQͩ0 f9;E;~R˃yo3MJل-{B%tEXtdate:create2016-11-05T07:33:12-07:00Ir%tEXtdate:modify2016-11-05T07:33:12-07:00ʲtEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_5.png000066400000000000000000000011231512524704700225160ustar00rootroot00000000000000PNG  IHDR'@91 gAMA a cHRMz&u0`:pQ<bKGD̿tIME !qBHIDATH/HQ9a04j\ jAM!*hX+b"hR42); 억޽`k;q*67cube;ߗ %|ݿW}ѥ#M-jWҥ7E6Xծe YhwH]nڭ}hw;vdZ%hCs[ iw,{}%YIR[!W e$ːn閡&YwI>#. Ղ+6vr]wN]y&;;v&9r- &C+%tEXtdate:create2016-11-05T07:33:21-07:00`.op%tEXtdate:modify2016-11-05T07:33:21-07:00stEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_6.png000066400000000000000000000012041512524704700225170ustar00rootroot00000000000000PNG  IHDR-J gAMA a cHRMz&u0`:pQ<bKGD̿tIME !EyIDATH+Ho~X ,Xղ6X,jM|EhZ,N1XbS0aCd |;݇EqqwY>ղK-aћd=&%n|s$ze)ʸi)}s$rU)uJk/θ +Rztue=Eemqt]le] >z8 cqt]jg+zAJ$@1>B#]{#^uA~ A`'@'ޑWIY)%]?"@0&]c%@/JCKH:g}=}0h ^GBEOBkdIUWuUK~Z %tEXtdate:create2016-11-05T07:33:31-07:00o%tEXtdate:modify2016-11-05T07:33:31-07:00RtEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/90_7.png000066400000000000000000000012711512524704700225240ustar00rootroot00000000000000PNG  IHDR5T% SgAMA a cHRMz&u0`:pQ<bKGD̿tIME !)^-ЄIDATXO(qL4RV 2%qp99bJ$vpA;(R9JȟZYq<.&_~}iHe?.\)j^MEm!{fHAU RPqm29_Ԯw^ϨWIZ1#h`13*Ւ VwE̟`u$h`madjMRS0&%C:2>\>;*XմK# ᪛΋+g#XKIpee=!Xm}p-\Ij`70jUVVFF7ZB:P>p5,HԡUQ5V%5 XʨW2udu*43'X;RE*hTQ6%)NEYpPo*?onuǒ%tEXtdate:create2016-11-05T07:33:41-07:00Af%tEXtdate:modify2016-11-05T07:33:41-07:00KtEXtlabelZsIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_0.png000066400000000000000000000004441512524704700225170ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  #IDATc`w?I[ biB+}%tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_1.png000066400000000000000000000004551512524704700225220ustar00rootroot00000000000000PNG  IHDR)1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%"IDATc :B?0tZ{`^nke%tEXtdate:create2016-11-05T07:32:44-07:00"n%tEXtdate:modify2016-11-05T07:32:44-07:00jtEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_2.png000066400000000000000000000004611512524704700225200ustar00rootroot00000000000000PNG  IHDR n1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0&IDATc @t7BfΝO!f%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_3.png000066400000000000000000000004701512524704700225210ustar00rootroot00000000000000PNG  IHDR +u]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !-IDAT(c @8ޝ"xQL ~ޙo2}5;%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_4.png000066400000000000000000000004741512524704700225260ustar00rootroot00000000000000PNG  IHDR5\]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'e1IDAT(c  P`M_g{$> ?* ! )lz%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_5.png000066400000000000000000000005011512524704700225160ustar00rootroot00000000000000PNG  IHDR@2xwgAMA a cHRMz&u0`:pQ<bKGD̿tIME !о6IDAT8c3;u|E;BBB$df(B)c !O%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel[aC7IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/91_6.png000066400000000000000000000005071512524704700225250ustar00rootroot00000000000000PNG  IHDRJf+VgAMA a cHRMz&u0`:pQ<bKGD̿tIME !" 0IDAT(ϥ! AEკ QM($I$$I2A2I$f4I( l6ۛ=~/]wT0UyL`QWOOŪ$*|Tl {g TN>*V!骬`PTT*"|W9PJ?* X)*TBoO%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel\֔IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/92_3.png000066400000000000000000000007751512524704700225320ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8пGpw]rK9NSD4PS4ihHKCD%ѐ"!R[$jI:=@CS/cJgxgc9eMxLq<Úxl`S%|)h~}_PU(ߧxi܉aE ᾰ,=%7 ńia;Otж߀4hs1[ Gp&Uao /IX둮ڮ X K{gmT8Ox* ~/Iy a/"%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel\֔IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/92_5.png000066400000000000000000000011441512524704700225230ustar00rootroot00000000000000PNG  IHDR!@'ALgAMA a cHRMz&u0`:pQ<bKGD̿tIME !оYIDATH;(aRA drɂ26#6$†b#2. SwQ'<=}{lZo#:eD]+R-Z2JP" DDdcQ{9iD+WV/2CbGa $EAX3V)DbUD$[ KjL{aZ2F1 "^,$IE.@XD7K0%-Qka$o@X ӄ e+$>O@ؐD;[G֤%$% 7AؼD !AX-"Dlzʝ_U*F&%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel\֔IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/92_6.png000066400000000000000000000012151512524704700225230ustar00rootroot00000000000000PNG  IHDR&J`ySgAMA a cHRMz&u0`:pQ<bKGD̿tIME !" IDATH?,ZƆԨUCR1JVm(ZIQ4FiLRth':c0|/O.py~z;%+6Gewl+mnsY_tZ}bbĜ! bĞ\̖"v)6frGĦ5宀٦2uyv8lL؞,^U։U5vE+9r%`[ {Y\X rybbb 0K%ռ.rbbAef}|)FFcf3eƉ9As`{i5`P.I"6fCby0f@ Rbqbc?dA4ذBrb9(_ck':\?%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel\֔IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/92_7.png000066400000000000000000000013451512524704700225300ustar00rootroot00000000000000PNG  IHDR,TܞqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ IDATHODq_ZfM6"4fE44Eԡ53bbu(ku(F2QCRYU~~vzvTs~=}DOD`wg718F vkn~Ǒ[ҵO:N"r@[6$Qp)‹'Z"JnDxD8a%݃\Ml/~Q1kCIC]X3[ s|a`S=½™[0M/G9>@)݀X vG.? g#}q8VtsOlJ C vS7887Vt ¿8>`W-}[ ^v|l`+Wq=K[ҝp1e=8j`+R )[t/ތӷ`{t^'+ݎ4(eµ%tEXtdate:create2016-11-05T07:33:43-07:001w%tEXtdate:modify2016-11-05T07:33:43-07:00@btEXtlabel\֔IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_0.png000066400000000000000000000004441512524704700225210ustar00rootroot00000000000000PNG  IHDR IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  #IDATc8?lyM+v%tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_1.png000066400000000000000000000004641512524704700225240ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%)IDATc A΃ ]€f̈́K"%tEXtdate:create2016-11-05T07:32:45-07:00)%tEXtdate:modify2016-11-05T07:32:45-07:00̑ftEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_2.png000066400000000000000000000004641512524704700225250ustar00rootroot00000000000000PNG  IHDR gAMA a cHRMz&u0`:pQ<bKGD̿tIME  6>0)IDATc @`^|*Q6}X@cn%tEXtdate:create2016-11-05T07:32:54-07:00"%tEXtdate:modify2016-11-05T07:32:54-07:00LLtEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_3.png000066400000000000000000000004731512524704700225260ustar00rootroot00000000000000PNG  IHDR +u]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !0IDAT(c v3@|/@D(o>?>F塦<<\Kwab%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_4.png000066400000000000000000000004751512524704700225310ustar00rootroot00000000000000PNG  IHDR5;_gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'e2IDAT(c0Era `F""C$H"1"hv(o%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_5.png000066400000000000000000000005011512524704700225200ustar00rootroot00000000000000PNG  IHDR@!IgAMA a cHRMz&u0`:pQ<bKGD̿tIME !о6IDAT8c I3A15ĒU Lb@C2L@9_Z%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_6.png000066400000000000000000000005041512524704700225240ustar00rootroot00000000000000PNG  IHDRJkgAMA a cHRMz&u0`:pQ<bKGD̿tIME !" 9IDAT8c0&|ܦf00G٣£££#UA>STRE?V@0Eo%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/93_7.png000066400000000000000000000005121512524704700225240ustar00rootroot00000000000000PNG  IHDRTsxgAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ ?IDATHc0'u{7 8$U΀?*5*5*5*5*5*5/.LARQb]P%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel]IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_0.png000066400000000000000000000004631512524704700225230ustar00rootroot00000000000000PNG  IHDR ggAMA a cHRMz&u0`:pQ<bKGD̿tIME  #(IDATcKS<d<ӻP\Q"%tEXtdate:create2016-11-05T07:32:35-07:00 %tEXtdate:modify2016-11-05T07:32:35-07:00TtEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_1.png000066400000000000000000000005311512524704700225200ustar00rootroot00000000000000PNG  IHDRCgAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%NIDATc@u $1de(c0~ ;9@7 cmF/`a`X;bFm%tEXtdate:create2016-11-05T07:32:45-07:00)%tEXtdate:modify2016-11-05T07:32:45-07:00̑ftEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_2.png000066400000000000000000000006061512524704700225240ustar00rootroot00000000000000PNG  IHDR 3WgAMA a cHRMz&u0`:pQ<bKGD̿tIME  79ܦ{IDAT(c0@1 ]  /cd`h```݅,8%~nAL4C?DP\DUU 6A?lg \n΅o?%tEXtdate:create2016-11-05T07:32:55-07:00qf)D%tEXtdate:modify2016-11-05T07:32:55-07:00;tEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_3.png000066400000000000000000000006551512524704700225310ustar00rootroot00000000000000PNG  IHDR+cgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c0+;>Uv-XQ 3Hz-aeW-貯AsgPeC$,G K__lHy?=ȒaQ$d @#ȑIQQQY2e@vb3$%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_4.png000066400000000000000000000007551512524704700225330ustar00rootroot00000000000000PNG  IHDR#5= gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'eIDAT8cO0fe=qJwBt@h[煨V%v$!@B 6o5 !"7Rh3NCԦ`(Q9 !QQۈ4,4iPCT.D 2(vdѳ=zjy"=2H_V+~,c`o!J!AϺ?VP Vk - JG*U:tTR,e%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_6.png000066400000000000000000000010761512524704700225320ustar00rootroot00000000000000PNG  IHDR1J7gAMA a cHRMz&u0`:pQ<bKGD̿tIME !" 3IDATHcO*`:2@"u2tt1 @:1:3"`h#%t\*elx*U({O'}2p U$b|©#D>wCG TgHvJRbtleH*?BlղSinc>YBYw'Q}.0-|QuQ=ň):CY -Iݏ bc ο: "5Bt@Z/M Jjh0cTǨQ:Fu1cu"rF%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel^շIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/94_7.png000066400000000000000000000011221512524704700225230ustar00rootroot00000000000000PNG  IHDR8T۫gAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ GIDATXM+DQ3U2f0Fb LV6gQ|D$!,vҵ[{6\eV2e!Nw0@1 O{; nPx&?{*sCQ<4m4Nt3.9x,ХwxG$jy*j1r3 N8@z?3]ɾ84$jp+WrZ2|:s'Z$ngp/w.;IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%IDATc F2 ?t\;%%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel_f҇.IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/95_7.png000066400000000000000000000005011512524704700225240ustar00rootroot00000000000000PNG  IHDR.TkZLgAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ 6IDATHʱ ~bB2'u]u]u]?s[u]%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel_f҇.IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_0.png000066400000000000000000000004541512524704700225250ustar00rootroot00000000000000PNG  IHDR }swgAMA a cHRMz&u0`:pQ<bKGD̿tIME  $9x!IDATc63<] ?!EC)hCX%tEXtdate:create2016-11-05T07:32:36-07:00:^%tEXtdate:modify2016-11-05T07:32:36-07:00tEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_1.png000066400000000000000000000004771512524704700225330ustar00rootroot00000000000000PNG  IHDRظgAMA a cHRMz&u0`:pQ<bKGD̿tIME  -@[%4IDATc`/"xQ3?T^%I%tEXtdate:create2016-11-05T07:32:45-07:00)%tEXtdate:modify2016-11-05T07:32:45-07:00̑ftEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_2.png000066400000000000000000000005241512524704700225250ustar00rootroot00000000000000PNG  IHDR n1gAMA a cHRMz&u0`:pQ<bKGD̿tIME  79ܦIIDATc @|"r'(3a&[A; 6 !jj{Pf C$p+~"6o=)3eS%tEXtdate:create2016-11-05T07:32:55-07:00qf)D%tEXtdate:modify2016-11-05T07:32:55-07:00;tEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_3.png000066400000000000000000000005601512524704700225260ustar00rootroot00000000000000PNG  IHDR +i6gAMA a cHRMz&u0`:pQ<bKGD̿tIME !eIDAT(c@HCPuCL- Qa` JL +P 3z '! 7`K wšsV۝#`%tEXtdate:create2016-11-05T07:33:04-07:00p3G%tEXtdate:modify2016-11-05T07:33:04-07:00ntEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_4.png000066400000000000000000000006041512524704700225260ustar00rootroot00000000000000PNG  IHDR5\]gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'eyIDAT(c ?T;_8|G uDIoC\@  Ww!s%a``}J!ȿ߳S>1פ%tEXtdate:create2016-11-05T07:33:14-07:00G4%tEXtdate:modify2016-11-05T07:33:14-07:00tEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_5.png000066400000000000000000000006421512524704700225310ustar00rootroot00000000000000PNG  IHDR@2xwgAMA a cHRMz&u0`:pQ<bKGD̿tIME !оIDAT8cp 輅.S{Y;?pDZaHCVa$$l6cω QQQ! P[%>!Y%tEXtdate:create2016-11-05T07:33:24-07:002@%tEXtdate:modify2016-11-05T07:33:24-07:00CKktEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_6.png000066400000000000000000000006741512524704700225370ustar00rootroot00000000000000PNG  IHDRJf+VgAMA a cHRMz&u0`:pQ<bKGD̿tIME !" IDAT8c0 @/h'o=`t# BQXbfX)O`"bWQC( <",fMjW, 1(S 5HLF`j ĸޠ Q*H.9n, afexTpTpTp LJ%tEXtdate:create2016-11-05T07:33:34-07:00@I%tEXtdate:modify2016-11-05T07:33:34-07:00tEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/96_7.png000066400000000000000000000007261512524704700225360ustar00rootroot00000000000000PNG  IHDRT3gAMA a cHRMz&u0`:pQ<bKGD̿tIME !,.G$ IDATHc0-¶.V 1%@8_4&,t4!?$C5[^ ~&O"q(tHC wH.t/`q}`_ ѰC,.]-+X#g?cH؁%01#H9DXC }a@o0갨$QQQQQ JD>)?Q%tEXtdate:create2016-11-05T07:33:44-07:00yIP%tEXtdate:modify2016-11-05T07:33:44-07:00$tEXtlabel`дIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_0.png000066400000000000000000000004751512524704700225310ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %2IDATc tmKO ojʶ q>%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_1.png000066400000000000000000000005661512524704700225330ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NkIDATc [d{7WQ{?7Cx ?:&@x q !$ !t)q4%tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_3.png000066400000000000000000000007521512524704700225320ustar00rootroot00000000000000PNG  IHDR+0|gAMA a cHRMz&u0`:pQ<bKGD̿tIME  9ZIDAT8c 0{Le @p?&sY \d*$j`1".$.km``2r *V#1mn5@,1'eZ2@̃pMf}^ThT#d.50ߊH2"kɀ9@ 2ʙtu HY[`؂sA$t)^ձL(IFty(ΰ%tEXtdate:create2016-11-05T07:32:56-07:00@3%tEXtdate:modify2016-11-05T07:32:56-07:001ӋetEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_4.png000066400000000000000000000010621512524704700225260ustar00rootroot00000000000000PNG  IHDR5s0gAMA a cHRMz&u0`:pQ<bKGD̿tIME !'IDAT8c0JQiZl*1a>'3] p]F^ P@Dֿl9@zUE/W~`_ݑkZX, F~,mAIi$i-%uU@lmi $i[EZI:οd#;9$= Yz,l9tjĀyr7 b#=hπ& QDZJs5d"6&)3!RGw㟃'R2Ѩ4Vϕ%tEXtdate:create2016-11-05T07:33:06-07:00V%tEXtdate:modify2016-11-05T07:33:06-07:00?tEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_5.png000066400000000000000000000011751512524704700225340ustar00rootroot00000000000000PNG  IHDR#@ґqgAMA a cHRMz&u0`:pQ<bKGD̿tIME !(XrIDATHcO0UC75;}%y"å~ O0|AR3 ؾ a}F)e@z_00xt5100js K_lP S +]yv:+3DLDS3݃ljb!<݀F[P/Q(YcG-\pLCVs‰AS3YI'MM[N0 d5c45(# (J>qB5<ĞP`,B1Hc;LBJ(*F5A;Wєp̃QĒOa`}_MWbqkuU3juV;rE%tEXtdate:create2016-11-05T07:33:16-07:00+V%tEXtdate:modify2016-11-05T07:33:16-07:00Z[tEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_6.png000066400000000000000000000012711512524704700225320ustar00rootroot00000000000000PNG  IHDR)Jk"gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATHcO,`U9rT%5UTb%̯1^:`3Pd?r2\Ke#hǪ_&XMe K1UNa 0TBxd_?*AҁPK;?,*1Äj_ VHJ|VPiz{U~d i4aZO'\4|~ijMeo1J*2LlL|**adGUY U9C4w6C*oEE*yh*A*cT%;h +@&<`?p#{0 )܅paBR'φ-Üv $c rED]p̀ b BmXTɍom"H.8C4Tܐm)%p?0hjQ*)R ۴ C%tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabelaIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/97_7.png000066400000000000000000000013601512524704700225320ustar00rootroot00000000000000PNG  IHDR0T gAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATX+q~cYMZQJJNXQSX8avceqf ?88P3B6x<=|\vz>ޯz?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_1.png000066400000000000000000000005711512524704700225300ustar00rootroot00000000000000PNG  IHDR >IgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NnIDATc ʩc!^s`f`a`0 113IfA- f``iJĠd|!Sb`d`x}- z%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabelb>?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_2.png000066400000000000000000000007061512524704700225310ustar00rootroot00000000000000PNG  IHDR :w-gAMA a cHRMz&u0`:pQ<bKGD̿tIME  /UDIDAT(c|?2`y ?yd*@ApBrB\ ]l@ C;H8?!@vtU&B{Br iPnFZ/d s# G `B  zS2qH!64pc ;X(.w%tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabelb>?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_3.png000066400000000000000000000007751512524704700225400ustar00rootroot00000000000000PNG  IHDR+LgAMA a cHRMz&u0`:pQ<bKGD̿tIME  9ZIDAT8c8 @~T&ܦ S *P%0@w$pgg kR (A3q,|yxp:1yppX g/){ A$~* .^3 \<c y3P*_k` U8{S=D"cC 0 U/e |/ ?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_4.png000066400000000000000000000010701512524704700225260ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !-IDAT8cX  c @?*;$dnͰTQqoMv*0Ǽm`@ChYE!YȲ@wk- <(!@ˑe,`,1Hǐ,!+|=`#,tl(+lB6Y=X!k,{,$,[uH`Hvϐ+5`*J,J`;Pc* LPJJLPvrk _(#4&T&>ycƩhy)Fe% 2sR(%tEXtdate:create2016-11-05T07:33:06-07:00V%tEXtdate:modify2016-11-05T07:33:06-07:00?tEXtlabelb>?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_5.png000066400000000000000000000011731512524704700225330ustar00rootroot00000000000000PNG  IHDR"@ZOgAMA a cHRMz&u0`:pQ<bKGD̿tIME !(XpIDATHc N >3AQ%4PnV"'cmJU0@S=lJn2 yJH0j % M2PGY,}$ >AWj CH!b7A?(QZ P,DDd%|Pe"DTY'j݇2~ERRRHJ&)q_BR2MID n4%~xR%D6]T!0AQr"*(J (J8#.*Cߢ8X\ɂDa: EvX % ܺp^jڵo264\G1r.!13p̱9L9H1r)y?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_6.png000066400000000000000000000012741512524704700225360ustar00rootroot00000000000000PNG  IHDR(JIgAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDATH;HBQs+j!D Aˆi j E5BZʢh^A!4f6TTS0;YcpC.;ss-t TMZnh1yh<ڵ,Ѓ"G0>M?$]CeLN=8$FEth&.;-C<a+{ғ> n8ea]b. .V+i\, $ #Β1I+I0e234NVq.YMyn~✃- y'Ѵ5CT!|'.@BQOIf!S갘&CEpG),8XVBq(c|ۙlH*]w6*?/ %tEXtdate:create2016-11-05T07:33:26-07:00Q%tEXtdate:modify2016-11-05T07:33:26-07:00BtEXtlabelb>?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/98_7.png000066400000000000000000000013621512524704700225350ustar00rootroot00000000000000PNG  IHDR/T71rgAMA a cHRMz&u0`:pQ<bKGD̿tIME !$ 9IDATHK(DQo\MIY)YP jIѰe%+Pf,faID6 L^kq9L};<$Pk^$5]ˡ=h>ULS \BWH Dd~ >#)TPK|JWT^縣 W|1^@}VJiAf/7uynXhoiߡ 9COD8LݬG}XoF=EG=b^/UE\&~uL5N/ў =}.\UN<4(DCƫ/>Hˆ+ϯfSTȿ%jRďy2C0?Vs7~5(ē["䤒ձv8WGH[Kc醟p 5]A}%tEXtdate:create2016-11-05T07:33:36-07:00i#Q`%tEXtdate:modify2016-11-05T07:33:36-07:00~tEXtlabelb>?IENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_0.png000066400000000000000000000004711512524704700225270ustar00rootroot00000000000000PNG  IHDR tgAMA a cHRMz&u0`:pQ<bKGD̿tIME  %.IDATc t$ V@: "^ FQACF i%tEXtdate:create2016-11-05T07:32:28-07:00tA%tEXtdate:modify2016-11-05T07:32:28-07:00k)!tEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_1.png000066400000000000000000000005571512524704700225350ustar00rootroot00000000000000PNG  IHDR 39gAMA a cHRMz&u0`:pQ<bKGD̿tIME  %NdIDATc iM2000Xؕ , 3?e`8 c?g`8cc(110w 4bL9,QfFEq7Q%tEXtdate:create2016-11-05T07:32:37-07:00 1%tEXtdate:modify2016-11-05T07:32:37-07:00QˉVtEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_2.png000066400000000000000000000006471512524704700225360ustar00rootroot00000000000000PNG  IHDR >gAMA a cHRMz&u0`:pQ<bKGD̿tIME  /UDIDAT(c}s/F03 ( dw&A30^|$(ٖ\! (``0D(f`,hh?$S{%m w[WL)"vT7y d4o$%tEXtdate:create2016-11-05T07:32:47-07:00*S8%tEXtdate:modify2016-11-05T07:32:47-07:00[OtEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_3.png000066400000000000000000000007201512524704700225270ustar00rootroot00000000000000PNG  IHDR+]gAMA a cHRMz&u0`:pQ<bKGD̿tIME  9ZIDAT(c00KZ$[ X܏K2A5TCp1š*$l ?`)H`AeOTw>)@sy24 hW@߂SD(M4,^,,vZ`smu`R'P ?ʅ!B)בɹ .LɆC9H%tEXtdate:create2016-11-05T07:32:57-07:008m%tEXtdate:modify2016-11-05T07:32:57-07:00tEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_4.png000066400000000000000000000010111512524704700225220ustar00rootroot00000000000000PNG  IHDR5gAMA a cHRMz&u0`:pQ<bKGD̿tIME !IDAT8c 0_ú,7Qdfq0@1B_/|vg`*ÉL4[z'[ҹ2!|,1KEd=D.XBO'eA XWp3`1Hbɰ"v,232XdebSsT$BoTf7$DSU<| R]~]?aQƢ!qBp!!Rp %%b Q<2<4O\%tEXtdate:create2016-11-05T07:33:06-07:00V%tEXtdate:modify2016-11-05T07:33:06-07:00?tEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_5.png000066400000000000000000000011001512524704700225220ustar00rootroot00000000000000PNG  IHDR@#gAMA a cHRMz&u0`:pQ<bKGD̿tIME !(X5IDAT8c0ʎʂùަF^ˀS>#ˮQ`@_p>d-R_[zٷiC=:0/"IH^ <L N2!,EV3챰"]vD,eAdknbU."} Ol*vyYfp"U"-I ,~+._ai~`%)]D$'V}94yO2>R6!dϧGpJZ-%P2R" xvt%tEXtdate:create2016-11-05T07:33:16-07:00+V%tEXtdate:modify2016-11-05T07:33:16-07:00Z[tEXtlabelcIIENDB`ggml-org-ggml-3678254/examples/yolo/data/labels/99_6.png000066400000000000000000000011651512524704700225360ustar00rootroot00000000000000PNG  IHDR"J)gAMA a cHRMz&u0`:pQ<bKGD̿tIME !jIDATHcO0*U2*=ӏ]3BRx0EZ:WuY\]hGB1:oX?,:sS= a.w) x1 = a.w-1; if (x2 < 0) x2 = 0; if (x2 >= a.w) x2 = a.w-1; if (y1 < 0) y1 = 0; if (y1 >= a.h) y1 = a.h-1; if (y2 < 0) y2 = 0; if (y2 >= a.h) y2 = a.h-1; for (int i = x1; i <= x2; ++i){ a.data[i + y1*a.w + 0*a.w*a.h] = r; a.data[i + y2*a.w + 0*a.w*a.h] = r; a.data[i + y1*a.w + 1*a.w*a.h] = g; a.data[i + y2*a.w + 1*a.w*a.h] = g; a.data[i + y1*a.w + 2*a.w*a.h] = b; a.data[i + y2*a.w + 2*a.w*a.h] = b; } for (int i = y1; i <= y2; ++i){ a.data[x1 + i*a.w + 0*a.w*a.h] = r; a.data[x2 + i*a.w + 0*a.w*a.h] = r; a.data[x1 + i*a.w + 1*a.w*a.h] = g; a.data[x2 + i*a.w + 1*a.w*a.h] = g; a.data[x1 + i*a.w + 2*a.w*a.h] = b; a.data[x2 + i*a.w + 2*a.w*a.h] = b; } } void draw_box_width(yolo_image & a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { for (int i = 0; i < w; ++i) { draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b); } } bool save_image(const yolo_image & im, const char *name, int quality) { uint8_t *data = (uint8_t*)calloc(im.w*im.h*im.c, sizeof(uint8_t)); for (int k = 0; k < im.c; ++k) { for (int i = 0; i < im.w*im.h; ++i) { data[i*im.c+k] = (uint8_t) (255*im.data[i + k*im.w*im.h]); } } int success = stbi_write_jpg(name, im.w, im.h, im.c, data, quality); free(data); if (!success) { fprintf(stderr, "Failed to write image %s\n", name); return false; } return true; } bool load_image(const char *fname, yolo_image & img) { int w, h, c; uint8_t * data = stbi_load(fname, &w, &h, &c, 3); if (!data) { return false; } c = 3; img.w = w; img.h = h; img.c = c; img.data.resize(w*h*c); for (int k = 0; k < c; ++k){ for (int j = 0; j < h; ++j){ for (int i = 0; i < w; ++i){ int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; img.data[dst_index] = (float)data[src_index]/255.; } } } stbi_image_free(data); return true; } static yolo_image resize_image(const yolo_image & im, int w, int h) { yolo_image resized(w, h, im.c); yolo_image part(w, im.h, im.c); float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for (int k = 0; k < im.c; ++k){ for (int r = 0; r < im.h; ++r) { for (int c = 0; c < w; ++c) { float val = 0; if (c == w-1 || im.w == 1){ val = im.get_pixel(im.w-1, r, k); } else { float sx = c*w_scale; int ix = (int) sx; float dx = sx - ix; val = (1 - dx) * im.get_pixel(ix, r, k) + dx * im.get_pixel(ix+1, r, k); } part.set_pixel(c, r, k, val); } } } for (int k = 0; k < im.c; ++k){ for (int r = 0; r < h; ++r){ float sy = r*h_scale; int iy = (int) sy; float dy = sy - iy; for (int c = 0; c < w; ++c){ float val = (1-dy) * part.get_pixel(c, iy, k); resized.set_pixel(c, r, k, val); } if (r == h-1 || im.h == 1) continue; for (int c = 0; c < w; ++c){ float val = dy * part.get_pixel(c, iy+1, k); resized.add_pixel(c, r, k, val); } } } return resized; } static void embed_image(const yolo_image & source, yolo_image & dest, int dx, int dy) { for (int k = 0; k < source.c; ++k) { for (int y = 0; y < source.h; ++y) { for (int x = 0; x < source.w; ++x) { float val = source.get_pixel(x, y, k); dest.set_pixel(dx+x, dy+y, k, val); } } } } yolo_image letterbox_image(const yolo_image & im, int w, int h) { int new_w = im.w; int new_h = im.h; if (((float)w/im.w) < ((float)h/im.h)) { new_w = w; new_h = (im.h * w)/im.w; } else { new_h = h; new_w = (im.w * h)/im.h; } yolo_image resized = resize_image(im, new_w, new_h); yolo_image boxed(w, h, im.c); boxed.fill(0.5); embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2); return boxed; } static yolo_image tile_images(const yolo_image & a, const yolo_image & b, int dx) { if (a.w == 0) { return b; } yolo_image c(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, a.c); c.fill(1.0f); embed_image(a, c, 0, 0); embed_image(b, c, a.w + dx, 0); return c; } static yolo_image border_image(const yolo_image & a, int border) { yolo_image b(a.w + 2*border, a.h + 2*border, a.c); b.fill(1.0f); embed_image(a, b, border, border); return b; } yolo_image get_label(const std::vector & alphabet, const std::string & label, int size) { size = size/10; size = std::min(size, 7); yolo_image result(0,0,0); for (int i = 0; i < (int)label.size(); ++i) { int ch = label[i]; yolo_image img = alphabet[size*128 + ch]; result = tile_images(result, img, -size - 1 + (size+1)/2); } return border_image(result, (int)(result.h*.25)); } void draw_label(yolo_image & im, int row, int col, const yolo_image & label, const float * rgb) { int w = label.w; int h = label.h; if (row - h >= 0) { row = row - h; } for (int j = 0; j < h && j + row < im.h; j++) { for (int i = 0; i < w && i + col < im.w; i++) { for (int k = 0; k < label.c; k++) { float val = label.get_pixel(i, j, k); im.set_pixel(i + col, j + row, k, rgb[k] * val); } } } }ggml-org-ggml-3678254/examples/yolo/yolo-image.h000066400000000000000000000025021512524704700213760ustar00rootroot00000000000000#pragma once #include #include #include struct yolo_image { int w, h, c; std::vector data; yolo_image() : w(0), h(0), c(0) {} yolo_image(int w, int h, int c) : w(w), h(h), c(c), data(w*h*c) {} float get_pixel(int x, int y, int c) const { assert(x >= 0 && x < w && y >= 0 && y < h && c >= 0 && c < this->c); return data[c*w*h + y*w + x]; } void set_pixel(int x, int y, int c, float val) { assert(x >= 0 && x < w && y >= 0 && y < h && c >= 0 && c < this->c); data[c*w*h + y*w + x] = val; } void add_pixel(int x, int y, int c, float val) { assert(x >= 0 && x < w && y >= 0 && y < h && c >= 0 && c < this->c); data[c*w*h + y*w + x] += val; } void fill(float val) { std::fill(data.begin(), data.end(), val); } }; bool load_image(const char *fname, yolo_image & img); void draw_box_width(yolo_image & a, int x1, int y1, int x2, int y2, int w, float r, float g, float b); yolo_image letterbox_image(const yolo_image & im, int w, int h); bool save_image(const yolo_image & im, const char *name, int quality); yolo_image get_label(const std::vector & alphabet, const std::string & label, int size); void draw_label(yolo_image & im, int row, int col, const yolo_image & label, const float * rgb); ggml-org-ggml-3678254/examples/yolo/yolov3-tiny.cpp000066400000000000000000000573701512524704700221200ustar00rootroot00000000000000#include "ggml.h" #include "gguf.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "yolo-image.h" #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif struct conv2d_layer { struct ggml_tensor * weights; struct ggml_tensor * biases; struct ggml_tensor * scales; struct ggml_tensor * rolling_mean; struct ggml_tensor * rolling_variance; int padding = 1; bool batch_normalize = true; bool activate = true; // true for leaky relu, false for linear }; struct yolo_model { int width = 416; int height = 416; std::vector conv2d_layers; ggml_backend_t backend; ggml_backend_buffer_t buffer; struct ggml_context * ctx; }; struct yolo_layer { int classes = 80; std::vector mask; std::vector anchors; std::vector predictions; int w; int h; yolo_layer(int classes, const std::vector & mask, const std::vector & anchors, struct ggml_tensor * prev_layer) : classes(classes), mask(mask), anchors(anchors) { w = prev_layer->ne[0]; h = prev_layer->ne[1]; predictions.resize(ggml_nbytes(prev_layer)/sizeof(float)); ggml_backend_tensor_get(prev_layer, predictions.data(), 0, ggml_nbytes(prev_layer)); } int entry_index(int location, int entry) const { int n = location / (w*h); int loc = location % (w*h); return n*w*h*(4+classes+1) + entry*w*h + loc; } }; struct box { float x, y, w, h; }; struct detection { box bbox; std::vector prob; float objectness; }; static bool load_model(const std::string & fname, yolo_model & model) { struct ggml_context * tmp_ctx = nullptr; struct gguf_init_params gguf_params = { /*.no_alloc =*/ false, /*.ctx =*/ &tmp_ctx, }; gguf_context * gguf_ctx = gguf_init_from_file(fname.c_str(), gguf_params); if (!gguf_ctx) { fprintf(stderr, "%s: gguf_init_from_file() failed\n", __func__); return false; } int num_tensors = gguf_get_n_tensors(gguf_ctx); struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead() * num_tensors, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; model.ctx = ggml_init(params); for (int i = 0; i < num_tensors; i++) { const char * name = gguf_get_tensor_name(gguf_ctx, i); struct ggml_tensor * src = ggml_get_tensor(tmp_ctx, name); struct ggml_tensor * dst = ggml_dup_tensor(model.ctx, src); ggml_set_name(dst, name); } model.buffer = ggml_backend_alloc_ctx_tensors(model.ctx, model.backend); // copy tensors from main memory to backend for (struct ggml_tensor * cur = ggml_get_first_tensor(model.ctx); cur != NULL; cur = ggml_get_next_tensor(model.ctx, cur)) { struct ggml_tensor * src = ggml_get_tensor(tmp_ctx, ggml_get_name(cur)); size_t n_size = ggml_nbytes(src); ggml_backend_tensor_set(cur, ggml_get_data(src), 0, n_size); } gguf_free(gguf_ctx); model.width = 416; model.height = 416; model.conv2d_layers.resize(13); model.conv2d_layers[7].padding = 0; model.conv2d_layers[9].padding = 0; model.conv2d_layers[9].batch_normalize = false; model.conv2d_layers[9].activate = false; model.conv2d_layers[10].padding = 0; model.conv2d_layers[12].padding = 0; model.conv2d_layers[12].batch_normalize = false; model.conv2d_layers[12].activate = false; for (int i = 0; i < (int)model.conv2d_layers.size(); i++) { char name[256]; snprintf(name, sizeof(name), "l%d_weights", i); model.conv2d_layers[i].weights = ggml_get_tensor(model.ctx, name); snprintf(name, sizeof(name), "l%d_biases", i); model.conv2d_layers[i].biases = ggml_get_tensor(model.ctx, name); if (model.conv2d_layers[i].batch_normalize) { snprintf(name, sizeof(name), "l%d_scales", i); model.conv2d_layers[i].scales = ggml_get_tensor(model.ctx, name); snprintf(name, sizeof(name), "l%d_rolling_mean", i); model.conv2d_layers[i].rolling_mean = ggml_get_tensor(model.ctx, name); snprintf(name, sizeof(name), "l%d_rolling_variance", i); model.conv2d_layers[i].rolling_variance = ggml_get_tensor(model.ctx, name); } } return true; } static bool load_labels(const char * filename, std::vector & labels) { std::ifstream file_in(filename); if (!file_in) { return false; } std::string line; while (std::getline(file_in, line)) { labels.push_back(line); } GGML_ASSERT(labels.size() == 80); return true; } static bool load_alphabet(std::vector & alphabet) { alphabet.resize(8 * 128); for (int j = 0; j < 8; j++) { for (int i = 32; i < 127; i++) { char fname[256]; snprintf(fname, sizeof(fname), "data/labels/%d_%d.png", i, j); if (!load_image(fname, alphabet[j*128 + i])) { fprintf(stderr, "Cannot load '%s'\n", fname); return false; } } } return true; } static ggml_tensor * apply_conv2d(ggml_context * ctx, ggml_tensor * input, const conv2d_layer & layer) { struct ggml_tensor * result = ggml_conv_2d(ctx, layer.weights, input, 1, 1, layer.padding, layer.padding, 1, 1); if (layer.batch_normalize) { result = ggml_sub(ctx, result, ggml_repeat(ctx, layer.rolling_mean, result)); result = ggml_div(ctx, result, ggml_sqrt(ctx, ggml_repeat(ctx, layer.rolling_variance, result))); result = ggml_mul(ctx, result, ggml_repeat(ctx, layer.scales, result)); } result = ggml_add(ctx, result, ggml_repeat(ctx, layer.biases, result)); if (layer.activate) { result = ggml_leaky_relu(ctx, result, 0.1f, true); } return result; } static void activate_array(float * x, const int n) { // logistic activation for (int i = 0; i < n; i++) { x[i] = 1./(1. + exp(-x[i])); } } static void apply_yolo(yolo_layer & layer) { int w = layer.w; int h = layer.h; int N = layer.mask.size(); float * data = layer.predictions.data(); for (int n = 0; n < N; n++) { int index = layer.entry_index(n*w*h, 0); activate_array(data + index, 2*w*h); index = layer.entry_index(n*w*h, 4); activate_array(data + index, (1+layer.classes)*w*h); } } static box get_yolo_box(const yolo_layer & layer, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { const float * predictions = layer.predictions.data(); box b; b.x = (i + predictions[index + 0*stride]) / lw; b.y = (j + predictions[index + 1*stride]) / lh; b.w = exp(predictions[index + 2*stride]) * layer.anchors[2*n] / w; b.h = exp(predictions[index + 3*stride]) * layer.anchors[2*n+1] / h; return b; } static void correct_yolo_box(box & b, int im_w, int im_h, int net_w, int net_h) { int new_w = 0; int new_h = 0; if (((float)net_w/im_w) < ((float)net_h/im_h)) { new_w = net_w; new_h = (im_h * net_w)/im_w; } else { new_h = net_h; new_w = (im_w * net_h)/im_h; } b.x = (b.x - (net_w - new_w)/2./net_w) / ((float)new_w/net_w); b.y = (b.y - (net_h - new_h)/2./net_h) / ((float)new_h/net_h); b.w *= (float)net_w/new_w; b.h *= (float)net_h/new_h; } static void get_yolo_detections(const yolo_layer & layer, std::vector & detections, int im_w, int im_h, int netw, int neth, float thresh) { int w = layer.w; int h = layer.h; int N = layer.mask.size(); const float * predictions = layer.predictions.data(); std::vector result; for (int i = 0; i < w*h; i++) { for (int n = 0; n < N; n++) { int obj_index = layer.entry_index(n*w*h + i, 4); float objectness = predictions[obj_index]; if (objectness <= thresh) { continue; } detection det; int box_index = layer.entry_index(n*w*h + i, 0); int row = i / w; int col = i % w; det.bbox = get_yolo_box(layer, layer.mask[n], box_index, col, row, w, h, netw, neth, w*h); correct_yolo_box(det.bbox, im_w, im_h, netw, neth); det.objectness = objectness; det.prob.resize(layer.classes); for (int j = 0; j < layer.classes; j++) { int class_index = layer.entry_index(n*w*h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; det.prob[j] = (prob > thresh) ? prob : 0; } detections.push_back(det); } } } static float overlap(float x1, float w1, float x2, float w2) { float l1 = x1 - w1/2; float l2 = x2 - w2/2; float left = l1 > l2 ? l1 : l2; float r1 = x1 + w1/2; float r2 = x2 + w2/2; float right = r1 < r2 ? r1 : r2; return right - left; } static float box_intersection(const box & a, const box & b) { float w = overlap(a.x, a.w, b.x, b.w); float h = overlap(a.y, a.h, b.y, b.h); if (w < 0 || h < 0) return 0; float area = w*h; return area; } static float box_union(const box & a, const box & b) { float i = box_intersection(a, b); float u = a.w*a.h + b.w*b.h - i; return u; } static float box_iou(const box & a, const box & b) { return box_intersection(a, b)/box_union(a, b); } static void do_nms_sort(std::vector & dets, int classes, float thresh) { int k = (int)dets.size()-1; for (int i = 0; i <= k; ++i) { if (dets[i].objectness == 0) { std::swap(dets[i], dets[k]); --k; --i; } } int total = k+1; for (int k = 0; k < classes; ++k) { std::sort(dets.begin(), dets.begin()+total, [=](const detection & a, const detection & b) { return a.prob[k] > b.prob[k]; }); for (int i = 0; i < total; ++i) { if (dets[i].prob[k] == 0) { continue; } box a = dets[i].bbox; for (int j = i+1; j < total; ++j){ box b = dets[j].bbox; if (box_iou(a, b) > thresh) { dets[j].prob[k] = 0; } } } } } static float get_color(int c, int x, int max) { float colors[6][3] = { {1,0,1}, {0,0,1}, {0,1,1}, {0,1,0}, {1,1,0}, {1,0,0} }; float ratio = ((float)x/max)*5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1-ratio) * colors[i][c] + ratio*colors[j][c]; return r; } static void draw_detections(yolo_image & im, const std::vector & dets, float thresh, const std::vector & labels, const std::vector & alphabet) { int classes = (int)labels.size(); for (int i = 0; i < (int)dets.size(); i++) { std::string labelstr; int cl = -1; for (int j = 0; j < (int)dets[i].prob.size(); j++) { if (dets[i].prob[j] > thresh) { if (cl < 0) { labelstr = labels[j]; cl = j; } else { labelstr += ", "; labelstr += labels[j]; } printf("%s: %.0f%%\n", labels[j].c_str(), dets[i].prob[j]*100); } } if (cl >= 0) { int width = im.h * .006; int offset = cl*123457 % classes; float red = get_color(2,offset,classes); float green = get_color(1,offset,classes); float blue = get_color(0,offset,classes); float rgb[3]; rgb[0] = red; rgb[1] = green; rgb[2] = blue; box b = dets[i].bbox; int left = (b.x-b.w/2.)*im.w; int right = (b.x+b.w/2.)*im.w; int top = (b.y-b.h/2.)*im.h; int bot = (b.y+b.h/2.)*im.h; if (left < 0) left = 0; if (right > im.w-1) right = im.w-1; if (top < 0) top = 0; if (bot > im.h-1) bot = im.h-1; draw_box_width(im, left, top, right, bot, width, red, green, blue); yolo_image label = get_label(alphabet, labelstr, (im.h*.03)); draw_label(im, top + width, left, label, rgb); } } } static void print_shape(int layer, const ggml_tensor * t) { printf("Layer %2d output shape: %3d x %3d x %4d x %3d\n", layer, (int)t->ne[0], (int)t->ne[1], (int)t->ne[2], (int)t->ne[3]); } static struct ggml_cgraph * build_graph(struct ggml_context * ctx_cgraph, const yolo_model & model) { struct ggml_cgraph * gf = ggml_new_graph(ctx_cgraph); struct ggml_tensor * input = ggml_new_tensor_4d(ctx_cgraph, GGML_TYPE_F32, model.width, model.height, 3, 1); ggml_set_name(input, "input"); struct ggml_tensor * result = apply_conv2d(ctx_cgraph, input, model.conv2d_layers[0]); print_shape(0, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); print_shape(1, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[1]); print_shape(2, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); print_shape(3, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[2]); print_shape(4, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); print_shape(5, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[3]); print_shape(6, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); print_shape(7, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[4]); struct ggml_tensor * layer_8 = result; print_shape(8, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 2, 2, 0, 0); print_shape(9, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[5]); print_shape(10, result); result = ggml_pool_2d(ctx_cgraph, result, GGML_OP_POOL_MAX, 2, 2, 1, 1, 0.5, 0.5); print_shape(11, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[6]); print_shape(12, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[7]); struct ggml_tensor * layer_13 = result; print_shape(13, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[8]); print_shape(14, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[9]); struct ggml_tensor * layer_15 = result; ggml_set_output(layer_15); ggml_set_name(layer_15, "layer_15"); print_shape(15, result); result = apply_conv2d(ctx_cgraph, layer_13, model.conv2d_layers[10]); print_shape(18, result); result = ggml_upscale(ctx_cgraph, result, 2, GGML_SCALE_MODE_NEAREST); print_shape(19, result); result = ggml_concat(ctx_cgraph, result, layer_8, 2); print_shape(20, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[11]); print_shape(21, result); result = apply_conv2d(ctx_cgraph, result, model.conv2d_layers[12]); struct ggml_tensor * layer_22 = result; ggml_set_output(layer_22); ggml_set_name(layer_22, "layer_22"); print_shape(22, result); ggml_build_forward_expand(gf, layer_15); ggml_build_forward_expand(gf, layer_22); return gf; } void detect(yolo_image & img, struct ggml_cgraph * gf, const yolo_model & model, float thresh, const std::vector & labels, const std::vector & alphabet) { std::vector detections; yolo_image sized = letterbox_image(img, model.width, model.height); struct ggml_tensor * input = ggml_graph_get_tensor(gf, "input"); ggml_backend_tensor_set(input, sized.data.data(), 0, ggml_nbytes(input)); if (ggml_backend_graph_compute(model.backend, gf) != GGML_STATUS_SUCCESS) { fprintf(stderr, "%s: ggml_backend_graph_compute() failed\n", __func__); return; } struct ggml_tensor * layer_15 = ggml_graph_get_tensor(gf, "layer_15"); yolo_layer yolo16{ 80, {3, 4, 5}, {10, 14, 23, 27, 37,58, 81, 82, 135, 169, 344, 319}, layer_15}; apply_yolo(yolo16); get_yolo_detections(yolo16, detections, img.w, img.h, model.width, model.height, thresh); struct ggml_tensor * layer_22 = ggml_graph_get_tensor(gf, "layer_22"); yolo_layer yolo23{ 80, {0, 1, 2}, {10, 14, 23, 27, 37,58, 81, 82, 135, 169, 344, 319}, layer_22}; apply_yolo(yolo23); get_yolo_detections(yolo23, detections, img.w, img.h, model.width, model.height, thresh); do_nms_sort(detections, yolo23.classes, .45); draw_detections(img, detections, thresh, labels, alphabet); } struct yolo_params { float thresh = 0.5; std::string model = "yolov3-tiny.gguf"; std::string fname_inp = "input.jpg"; std::string fname_out = "predictions.jpg"; int n_threads = std::max(1U, std::thread::hardware_concurrency()/2); std::string device; }; void yolo_print_usage(int argc, char ** argv, const yolo_params & params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -d, --device DEV device to use\n"); fprintf(stderr, " -t, --threads N number of threads for the CPU backend (default: %d)\n", params.n_threads); fprintf(stderr, " -th, --thresh T detection threshold (default: %.2f)\n", params.thresh); fprintf(stderr, " -m, --model FNAME model path (default: %s)\n", params.model.c_str()); fprintf(stderr, " -i, --inp FNAME input file (default: %s)\n", params.fname_inp.c_str()); fprintf(stderr, " -o, --out FNAME output file (default: %s)\n", params.fname_out.c_str()); fprintf(stderr, "\n"); } bool yolo_params_parse(int argc, char ** argv, yolo_params & params) { for (int i = 1; i < argc; i++) { std::string arg = argv[i]; if (arg == "-th" || arg == "--thresh") { params.thresh = std::stof(argv[++i]); if (params.thresh < 0 || params.thresh > 1) { fprintf(stderr, "error: invalid threshold: %.2f\n", params.thresh); return false; } } else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; } else if (arg == "-i" || arg == "--inp") { params.fname_inp = argv[++i]; } else if (arg == "-o" || arg == "--out") { params.fname_out = argv[++i]; } else if (arg == "-t" || arg == "--threads") { if (++i >= argc) { return false; } params.n_threads = std::stoi(argv[i]); if (params.n_threads <= 0) { fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads); return false; } } else if (arg == "-d" || arg == "--device") { if (++i >= argc) { return false; } params.device = argv[i]; if (ggml_backend_dev_by_name(params.device.c_str()) == nullptr) { fprintf(stderr, "error: unknown device: %s\n", params.device.c_str()); fprintf(stderr, "available devices:\n"); for (size_t i = 0; i < ggml_backend_dev_count(); i++) { auto * dev = ggml_backend_dev_get(i); size_t free, total; ggml_backend_dev_memory(dev, &free, &total); printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024); } return false; } } else if (arg == "-h" || arg == "--help") { yolo_print_usage(argc, argv, params); exit(0); } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); yolo_print_usage(argc, argv, params); exit(0); } } return true; } static ggml_backend_t create_backend(const yolo_params & params) { ggml_backend_t backend = nullptr; if (!params.device.empty()) { ggml_backend_dev_t dev = ggml_backend_dev_by_name(params.device.c_str()); if (dev) { backend = ggml_backend_dev_init(dev, nullptr); if (!backend) { fprintf(stderr, "Failed to create backend for device %s\n", params.device.c_str()); return nullptr; } } } // try to initialize a GPU backend first if (!backend) { backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr); } // if there aren't GPU backends fallback to CPU backend if (!backend) { backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); } if (backend) { fprintf(stderr, "%s: using %s backend\n", __func__, ggml_backend_name(backend)); // set the number of threads ggml_backend_dev_t dev = ggml_backend_get_device(backend); ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; if (reg) { auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); if (ggml_backend_set_n_threads_fn) { ggml_backend_set_n_threads_fn(backend, params.n_threads); } } } return backend; } int main(int argc, char *argv[]) { ggml_backend_load_all(); ggml_time_init(); yolo_model model; yolo_params params; if (!yolo_params_parse(argc, argv, params)) { return 1; } model.backend = create_backend(params); if (!model.backend) { fprintf(stderr, "Failed to create backend\n"); return 1; } if (!load_model(params.model, model)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } yolo_image img(0,0,0); if (!load_image(params.fname_inp.c_str(), img)) { fprintf(stderr, "%s: failed to load image from '%s'\n", __func__, params.fname_inp.c_str()); return 1; } std::vector labels; if (!load_labels("data/coco.names", labels)) { fprintf(stderr, "%s: failed to load labels from 'data/coco.names'\n", __func__); return 1; } std::vector alphabet; if (!load_alphabet(alphabet)) { fprintf(stderr, "%s: failed to load alphabet\n", __func__); return 1; } struct ggml_init_params params0 = { /*.mem_size =*/ ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() }; struct ggml_context * ctx_cgraph = ggml_init(params0); struct ggml_cgraph * gf = build_graph(ctx_cgraph, model); ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend)); ggml_gallocr_alloc_graph(allocr, gf); const int64_t t_start_ms = ggml_time_ms(); detect(img, gf, model, params.thresh, labels, alphabet); const int64_t t_detect_ms = ggml_time_ms() - t_start_ms; if (!save_image(img, params.fname_out.c_str(), 80)) { fprintf(stderr, "%s: failed to save image to '%s'\n", __func__, params.fname_out.c_str()); return 1; } printf("Detected objects saved in '%s' (time: %f sec.)\n", params.fname_out.c_str(), t_detect_ms / 1000.0f); ggml_free(ctx_cgraph); ggml_gallocr_free(allocr); ggml_free(model.ctx); ggml_backend_buffer_free(model.buffer); ggml_backend_free(model.backend); return 0; } ggml-org-ggml-3678254/ggml.pc.in000066400000000000000000000004331512524704700162430ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ Name: ggml Description: The GGML Tensor Library for Machine Learning Version: @GGML_VERSION@ Cflags: -I${includedir} Libs: -L${libdir} -lggml ggml-org-ggml-3678254/include/000077500000000000000000000000001512524704700160075ustar00rootroot00000000000000ggml-org-ggml-3678254/include/ggml-alloc.h000066400000000000000000000070501512524704700202000ustar00rootroot00000000000000#pragma once #include "ggml.h" #ifdef __cplusplus extern "C" { #endif typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; typedef struct ggml_backend_buffer * ggml_backend_buffer_t; typedef struct ggml_backend * ggml_backend_t; // Tensor allocator struct ggml_tallocr { ggml_backend_buffer_t buffer; void * base; size_t alignment; size_t offset; }; GGML_API struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer); GGML_API enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor); // Graph allocator /* Example usage: ggml_gallocr_t galloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); // optional: create a worst-case graph and reserve the buffers to avoid reallocations ggml_gallocr_reserve(galloc, build_graph(max_batch)); // allocate the graph struct ggml_cgraph * graph = build_graph(batch); ggml_gallocr_alloc_graph(galloc, graph); printf("compute buffer size: %zu bytes\n", ggml_gallocr_get_buffer_size(galloc, 0)); // evaluate the graph ggml_backend_graph_compute(backend, graph); */ // special tensor flags for use with the graph allocator: // ggml_set_input(): all input tensors are allocated at the beginning of the graph in non-overlapping addresses // ggml_set_output(): output tensors are never freed and never overwritten typedef struct ggml_gallocr * ggml_gallocr_t; GGML_API ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft); GGML_API ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs); GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc); // pre-allocate buffers from a measure graph - does not allocate or modify the graph // call with a worst-case graph to avoid buffer reallocations // not strictly required for single buffer usage: ggml_gallocr_alloc_graph will reallocate the buffers automatically if needed // returns false if the buffer allocation failed // ggml_gallocr_resrve_n_size writes the buffer sizes per galloc buffer that would be allocated by ggml_gallocr_reserve_n to sizes GGML_API bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph * graph); GGML_API void ggml_gallocr_reserve_n_size( ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, size_t * sizes); GGML_API bool ggml_gallocr_reserve_n( ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids); // automatic reallocation if the topology changes when using a single buffer // returns false if using multiple buffers and a re-allocation is needed (call ggml_gallocr_reserve_n first to set the node buffers) GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph); GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id); // Utils // Create a buffer and allocate all the tensors in a ggml_context // ggml_backend_alloc_ctx_tensors_from_buft_size returns the size of the buffer that would be allocated by ggml_backend_alloc_ctx_tensors_from_buft GGML_API size_t ggml_backend_alloc_ctx_tensors_from_buft_size(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-backend.h000066400000000000000000000510271512524704700205000ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-alloc.h" #ifdef GGML_BACKEND_SHARED # if defined(_WIN32) && !defined(__MINGW32__) # ifdef GGML_BACKEND_BUILD # define GGML_BACKEND_API __declspec(dllexport) extern # else # define GGML_BACKEND_API __declspec(dllimport) extern # endif # else # define GGML_BACKEND_API __attribute__ ((visibility ("default"))) extern # endif #else # define GGML_BACKEND_API extern #endif #ifdef __cplusplus extern "C" { #endif typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t; typedef struct ggml_backend_buffer * ggml_backend_buffer_t; typedef struct ggml_backend_event * ggml_backend_event_t; typedef struct ggml_backend * ggml_backend_t; typedef void * ggml_backend_graph_plan_t; typedef struct ggml_backend_reg * ggml_backend_reg_t; typedef struct ggml_backend_device * ggml_backend_dev_t; // // Backend buffer type // GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft); GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size); GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft); GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); GGML_API ggml_backend_dev_t ggml_backend_buft_get_device (ggml_backend_buffer_type_t buft); // // Backend buffer // enum ggml_backend_buffer_usage { GGML_BACKEND_BUFFER_USAGE_ANY = 0, GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1, GGML_BACKEND_BUFFER_USAGE_COMPUTE = 2, }; GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer); GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); GGML_API enum ggml_status ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor); GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); GGML_API enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage (ggml_backend_buffer_t buffer); GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer); GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer); // tensor copy between different backends GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst); // // Backend (stream) // GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend); GGML_API const char * ggml_backend_name(ggml_backend_t backend); GGML_API void ggml_backend_free(ggml_backend_t backend); GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend); GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size); GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend); GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend); GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // "offset" refers to the offset in tensor->data for setting/getting data GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); GGML_API void ggml_backend_tensor_memset( struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size); GGML_API void ggml_backend_synchronize(ggml_backend_t backend); GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph); GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan); GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan); GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph); GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph); // NOTE: will be removed, use device version instead GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op); GGML_API bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft); GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op); // asynchronous copy // the copy is performed after all the currently queued operations in backend_src // backend_dst will wait for the copy to complete before performing other operations // automatic fallback to sync copy if async is not supported GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst); GGML_API ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend); // // Events // GGML_API ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device); GGML_API void ggml_backend_event_free(ggml_backend_event_t event); GGML_API void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend); GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event); GGML_API void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event); // // Backend device // enum ggml_backend_dev_type { // CPU device using system memory GGML_BACKEND_DEVICE_TYPE_CPU, // GPU device using dedicated memory GGML_BACKEND_DEVICE_TYPE_GPU, // integrated GPU device using host memory GGML_BACKEND_DEVICE_TYPE_IGPU, // accelerator devices intended to be used together with the CPU backend (e.g. BLAS or AMX) GGML_BACKEND_DEVICE_TYPE_ACCEL }; // functionality supported by the device struct ggml_backend_dev_caps { // asynchronous operations bool async; // pinned host buffer bool host_buffer; // creating buffers from host ptr bool buffer_from_host_ptr; // event synchronization bool events; }; // all the device properties struct ggml_backend_dev_props { // device name const char * name; // device description const char * description; // device free memory in bytes size_t memory_free; // device total memory in bytes size_t memory_total; // device type enum ggml_backend_dev_type type; // device id // for PCI devices, this should be the PCI bus id formatted as "domain:bus:device.function" (e.g. "0000:01:00.0") // if the id is unknown, this should be NULL const char * device_id; // device capabilities struct ggml_backend_dev_caps caps; }; GGML_API const char * ggml_backend_dev_name(ggml_backend_dev_t device); GGML_API const char * ggml_backend_dev_description(ggml_backend_dev_t device); GGML_API void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total); GGML_API enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device); GGML_API void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props); GGML_API ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device); GGML_API ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params); GGML_API ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device); GGML_API ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device); GGML_API ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size); GGML_API bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op); GGML_API bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft); GGML_API bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op); // // Backend (reg) // GGML_API const char * ggml_backend_reg_name(ggml_backend_reg_t reg); GGML_API size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg); GGML_API ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index); GGML_API void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name); // Common functions that may be obtained using ggml_backend_reg_get_proc_address // Split buffer type for tensor parallelism typedef ggml_backend_buffer_type_t (*ggml_backend_split_buffer_type_t)(int main_device, const float * tensor_split); // Set the number of threads for the backend typedef void (*ggml_backend_set_n_threads_t)(ggml_backend_t backend, int n_threads); // Get additional buffer types provided by the device (returns a NULL-terminated array) typedef ggml_backend_buffer_type_t * (*ggml_backend_dev_get_extra_bufts_t)(ggml_backend_dev_t device); // Set the abort callback for the backend typedef void (*ggml_backend_set_abort_callback_t)(ggml_backend_t backend, ggml_abort_callback abort_callback, void * abort_callback_data); // Get a list of feature flags supported by the backend (returns a NULL-terminated array) struct ggml_backend_feature { const char * name; const char * value; }; typedef struct ggml_backend_feature * (*ggml_backend_get_features_t)(ggml_backend_reg_t reg); // // Backend registry // GGML_API void ggml_backend_register(ggml_backend_reg_t reg); GGML_API void ggml_backend_device_register(ggml_backend_dev_t device); // Backend (reg) enumeration GGML_API size_t ggml_backend_reg_count(void); GGML_API ggml_backend_reg_t ggml_backend_reg_get(size_t index); GGML_API ggml_backend_reg_t ggml_backend_reg_by_name(const char * name); // Device enumeration GGML_API size_t ggml_backend_dev_count(void); GGML_API ggml_backend_dev_t ggml_backend_dev_get(size_t index); GGML_API ggml_backend_dev_t ggml_backend_dev_by_name(const char * name); GGML_API ggml_backend_dev_t ggml_backend_dev_by_type(enum ggml_backend_dev_type type); // Direct backend (stream) initialization // = ggml_backend_dev_init(ggml_backend_dev_by_name(name), params) GGML_API ggml_backend_t ggml_backend_init_by_name(const char * name, const char * params); // = ggml_backend_dev_init(ggml_backend_dev_by_type(type), params) GGML_API ggml_backend_t ggml_backend_init_by_type(enum ggml_backend_dev_type type, const char * params); // = ggml_backend_dev_init(ggml_backend_dev_by_type(GPU) OR ggml_backend_dev_by_type(CPU), NULL) GGML_API ggml_backend_t ggml_backend_init_best(void); // Load a backend from a dynamic library and register it GGML_API ggml_backend_reg_t ggml_backend_load(const char * path); // Unload a backend if loaded dynamically and unregister it GGML_API void ggml_backend_unload(ggml_backend_reg_t reg); // Load all known backends from dynamic libraries GGML_API void ggml_backend_load_all(void); GGML_API void ggml_backend_load_all_from_path(const char * dir_path); // // Backend scheduler // // The backend scheduler allows for multiple backend devices to be used together // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends // The backends are selected based on: // - the backend that supports the operation // - the location of the pre-allocated tensors (e.g. the weights) /* Example usage: // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned // preferrably to run on the same backend as the buffer ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false, true); // initialize buffers from a max size graph (optional) reserve_graph = build_graph(sched, max_batch_size); // manually assign nodes to a backend (optional, should not be needed in most cases) struct ggml_tensor * node = ggml_mul_mat(ctx, ...); ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu); ggml_backend_sched_reserve(sched, reserve_graph); // compute graph = build_graph(sched); // the graph and its tensors are single-use in terms of allocation, multi-use in terms of computation for (int i = 0; i < 10; ++i) { ggml_backend_sched_graph_compute(sched, graph); // on the first iteration the graph is allocated automatically } // if there are graph inputs: graph = build_graph(sched); // get a new graph that is not allocated (the metadata for the old graph is freed once ggml_free is called) ggml_backend_sched_reset(sched); // clear the allocation of the previous graph ggml_backend_sched_alloc_graph(sched, graph); // explicitly allocate the new graph but do not execute it ggml_backend_tensor_set(input_tensor, ...); // copy data to the newly allocated graph tensors ggml_backend_sched_graph_compute(sched, graph); // execute the graph // as an alternative to the above it is also possible to assign the inputs to a dedicated context and // allocate them statically via ggml_backend_alloc_ctx_tensors } */ typedef struct ggml_backend_sched * ggml_backend_sched_t; // Evaluation callback for each node in the graph (set with ggml_backend_sched_set_eval_callback) // when ask == true, the scheduler wants to know if the user wants to observe this node // this allows the scheduler to batch nodes together in order to evaluate them in a single call // // when ask == false, the scheduler is passing the node tensor to the user for observation // if the user returns false, the scheduler will cancel the graph compute // typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); // Initialize a backend scheduler, backends with low index are given priority over backends with high index GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload); GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); // Initialize backend buffers from a measure graph GGML_API void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes); GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); // returns success GGML_API int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched); GGML_API ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i); // Get the number of splits of the last graph GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched); GGML_API ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend); GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend); GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node); // Split graph without allocating it GGML_API void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); // Allocate and compute graph on the backend scheduler GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph); // returns success GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph); GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph); GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched); // Reset all assignments and allocators - must be called before changing the node backends or allocating a new graph. // This in effect deallocates all tensors that were previously allocated and leaves them with dangling pointers. // The correct way to use this API is to discard the deallocated tensors and create new ones. GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched); // Set a callback to be called for each resulting node during graph compute GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data); // // Utils // struct ggml_backend_graph_copy { ggml_backend_buffer_t buffer; struct ggml_context * ctx_allocated; struct ggml_context * ctx_unallocated; struct ggml_cgraph * graph; }; // Copy a graph to a different backend GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph); GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy); typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node); // Tensor initialization GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); GGML_API enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor); // CPU buffer types are always available GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size); GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-blas.h000066400000000000000000000011121512524704700200200ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_blas_init(void); GGML_BACKEND_API bool ggml_backend_is_blas(ggml_backend_t backend); // number of threads used for conversion to float // for openblas and blis, this will also set the number of threads used for blas operations GGML_BACKEND_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_blas_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-cann.h000066400000000000000000000107111512524704700200230ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "ggml-backend.h" #include "ggml.h" #ifdef __cplusplus extern "C" { #endif /** * @brief Maximum number of CANN devices supported. */ #define GGML_CANN_MAX_DEVICES 16 GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cann_reg(void); /** * @brief Initializes the CANN backend for a specified device. * * This function initializes the CANN backend for the given device. * It verifies the device index, allocates a context, and creates a backend * instance. * * @param device The index of the device to initialize. * @return A pointer to the initialized backend instance, or nullptr on failure. */ GGML_BACKEND_API ggml_backend_t ggml_backend_cann_init(int32_t device); /** * @brief Checks if a given backend is a CANN backend. * * This function verifies if the provided backend is a CANN backend by comparing * its GUID with the CANN backend's GUID. * * @param backend The backend instance to check. * @return True if the backend is a CANN backend, false otherwise. */ GGML_BACKEND_API bool ggml_backend_is_cann(ggml_backend_t backend); /** * @brief Retrieves the CANN buffer type for a specified device. * * This function initializes and returns the buffer type interface associated * with the given device. It ensures thread-safe access using a mutex. * * @param device The device index for which to retrieve the buffer type. * @return A pointer to the buffer type interface for the specified device, or * nullptr if the device index is out of range. */ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cann_buffer_type(int32_t device); /** * @brief Retrieves the number of CANN devices available. * * This function returns the number of CANN devices available based on * information obtained from `ggml_cann_info()`. * * @return The number of CANN devices available. */ GGML_BACKEND_API int32_t ggml_backend_cann_get_device_count(void); /** * @brief pinned host buffer for use with the CPU backend for faster copies between CPU and NPU. * * @return A pointer to the host buffer type interface. */ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type(void); /** * @brief Retrieves the description of a specific CANN device. * * This function sets the specified device, retrieves the SoC name, * and writes it into the provided description buffer. * * @param device The device index to retrieve the description for. * @param description Pointer to a buffer where the description will be written. * @param description_size Size of the description buffer. */ GGML_BACKEND_API void ggml_backend_cann_get_device_description( int32_t device, char* description, size_t description_size); /** * @brief Retrieves the memory information of a specific CANN device. * * This function sets the specified device, retrieves the free and total * memory information of the specified type (ACL_HBM_MEM), and stores them * in the provided pointers. * * @param device The device index to retrieve memory information for. * @param free Pointer to a variable where the free memory size will be stored. * @param total Pointer to a variable where the total memory size will be * stored. */ GGML_BACKEND_API void ggml_backend_cann_get_device_memory(int32_t device, size_t* free, size_t* total); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-cpp.h000066400000000000000000000031641512524704700176720ustar00rootroot00000000000000#pragma once #ifndef __cplusplus #error "This header is for C++ only" #endif #include "ggml.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "gguf.h" #include // Smart pointers for ggml types // ggml struct ggml_context_deleter { void operator()(ggml_context * ctx) { ggml_free(ctx); } }; struct gguf_context_deleter { void operator()(gguf_context * ctx) { gguf_free(ctx); } }; typedef std::unique_ptr ggml_context_ptr; typedef std::unique_ptr gguf_context_ptr; // ggml-alloc struct ggml_gallocr_deleter { void operator()(ggml_gallocr_t galloc) { ggml_gallocr_free(galloc); } }; typedef std::unique_ptr ggml_gallocr_ptr; // ggml-backend struct ggml_backend_deleter { void operator()(ggml_backend_t backend) { ggml_backend_free(backend); } }; struct ggml_backend_buffer_deleter { void operator()(ggml_backend_buffer_t buffer) { ggml_backend_buffer_free(buffer); } }; struct ggml_backend_event_deleter { void operator()(ggml_backend_event_t event) { ggml_backend_event_free(event); } }; struct ggml_backend_sched_deleter { void operator()(ggml_backend_sched_t sched) { ggml_backend_sched_free(sched); } }; typedef std::unique_ptr ggml_backend_ptr; typedef std::unique_ptr ggml_backend_buffer_ptr; typedef std::unique_ptr ggml_backend_event_ptr; typedef std::unique_ptr ggml_backend_sched_ptr; ggml-org-ggml-3678254/include/ggml-cpu.h000066400000000000000000000165501512524704700177020ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif // the compute plan that needs to be prepared for ggml_graph_compute() // since https://github.com/ggml-org/ggml/issues/287 struct ggml_cplan { size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()` uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()` int n_threads; struct ggml_threadpool * threadpool; // abort ggml_graph_compute when true ggml_abort_callback abort_callback; void * abort_callback_data; }; // numa strategies enum ggml_numa_strategy { GGML_NUMA_STRATEGY_DISABLED = 0, GGML_NUMA_STRATEGY_DISTRIBUTE = 1, GGML_NUMA_STRATEGY_ISOLATE = 2, GGML_NUMA_STRATEGY_NUMACTL = 3, GGML_NUMA_STRATEGY_MIRROR = 4, GGML_NUMA_STRATEGY_COUNT }; GGML_BACKEND_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems GGML_BACKEND_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node GGML_BACKEND_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); GGML_BACKEND_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); GGML_BACKEND_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); GGML_BACKEND_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); GGML_BACKEND_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); GGML_BACKEND_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); GGML_BACKEND_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3); GGML_BACKEND_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value); GGML_BACKEND_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); GGML_BACKEND_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); GGML_BACKEND_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3); GGML_BACKEND_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value); GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params); GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool); GGML_BACKEND_API int ggml_threadpool_get_n_threads (struct ggml_threadpool * threadpool); GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool); GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool); // ggml_graph_plan() has to be called before ggml_graph_compute() // when plan.work_size > 0, caller must allocate memory for plan.work_data GGML_BACKEND_API struct ggml_cplan ggml_graph_plan( const struct ggml_cgraph * cgraph, int n_threads, /* = GGML_DEFAULT_N_THREADS */ struct ggml_threadpool * threadpool /* = NULL */ ); GGML_BACKEND_API enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); // same as ggml_graph_compute() but the work data is allocated as a part of the context // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data GGML_BACKEND_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads); // // system info // // x86 GGML_BACKEND_API int ggml_cpu_has_sse3 (void); GGML_BACKEND_API int ggml_cpu_has_ssse3 (void); GGML_BACKEND_API int ggml_cpu_has_avx (void); GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void); GGML_BACKEND_API int ggml_cpu_has_avx2 (void); GGML_BACKEND_API int ggml_cpu_has_bmi2 (void); GGML_BACKEND_API int ggml_cpu_has_f16c (void); GGML_BACKEND_API int ggml_cpu_has_fma (void); GGML_BACKEND_API int ggml_cpu_has_avx512 (void); GGML_BACKEND_API int ggml_cpu_has_avx512_vbmi(void); GGML_BACKEND_API int ggml_cpu_has_avx512_vnni(void); GGML_BACKEND_API int ggml_cpu_has_avx512_bf16(void); GGML_BACKEND_API int ggml_cpu_has_amx_int8 (void); // ARM GGML_BACKEND_API int ggml_cpu_has_neon (void); GGML_BACKEND_API int ggml_cpu_has_arm_fma (void); GGML_BACKEND_API int ggml_cpu_has_fp16_va (void); GGML_BACKEND_API int ggml_cpu_has_dotprod (void); GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void); GGML_BACKEND_API int ggml_cpu_has_sve (void); GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes GGML_BACKEND_API int ggml_cpu_has_sme (void); // other GGML_BACKEND_API int ggml_cpu_has_riscv_v (void); GGML_BACKEND_API int ggml_cpu_get_rvv_vlen (void); // risc-v vector length in bytes GGML_BACKEND_API int ggml_cpu_has_vsx (void); GGML_BACKEND_API int ggml_cpu_has_vxe (void); GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void); GGML_BACKEND_API int ggml_cpu_has_llamafile (void); // Internal types and functions exposed for tests and benchmarks typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx, const void * GGML_RESTRICT y, size_t by, int nrc); struct ggml_type_traits_cpu { ggml_from_float_t from_float; ggml_vec_dot_t vec_dot; enum ggml_type vec_dot_type; int64_t nrows; // number of rows to process simultaneously }; GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type); GGML_BACKEND_API void ggml_cpu_init(void); // // CPU backend // GGML_BACKEND_API ggml_backend_t ggml_backend_cpu_init(void); GGML_BACKEND_API bool ggml_backend_is_cpu (ggml_backend_t backend); GGML_BACKEND_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads); GGML_BACKEND_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool); GGML_BACKEND_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void); GGML_BACKEND_API void ggml_cpu_fp32_to_fp32(const float *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_i32 (const float *, int32_t *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_fp16(const float *, ggml_fp16_t *, int64_t); GGML_BACKEND_API void ggml_cpu_fp16_to_fp32(const ggml_fp16_t *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_bf16(const float *, ggml_bf16_t *, int64_t); GGML_BACKEND_API void ggml_cpu_bf16_to_fp32(const ggml_bf16_t *, float *, int64_t); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-cuda.h000066400000000000000000000030611512524704700200200ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif #ifdef GGML_USE_HIP #define GGML_CUDA_NAME "ROCm" #define GGML_CUBLAS_NAME "hipBLAS" #elif defined(GGML_USE_MUSA) #define GGML_CUDA_NAME "MUSA" #define GGML_CUBLAS_NAME "muBLAS" #else #define GGML_CUDA_NAME "CUDA" #define GGML_CUBLAS_NAME "cuBLAS" #endif #define GGML_CUDA_MAX_DEVICES 16 // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_cuda_init(int device); GGML_BACKEND_API bool ggml_backend_is_cuda(ggml_backend_t backend); // device buffer GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device); // split tensor buffer that splits matrices by rows across multiple devices GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split); // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void); GGML_BACKEND_API int ggml_backend_cuda_get_device_count(void); GGML_BACKEND_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size); GGML_BACKEND_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total); GGML_BACKEND_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size); GGML_BACKEND_API void ggml_backend_cuda_unregister_host_buffer(void * buffer); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cuda_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-hexagon.h000066400000000000000000000005371512524704700205420ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_hexagon_init(void); GGML_BACKEND_API bool ggml_backend_is_hexagon(ggml_backend_t backend); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_hexagon_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-metal.h000066400000000000000000000041351512524704700202110ustar00rootroot00000000000000// Note: this description is outdated // // An interface allowing to compute ggml_cgraph with Metal // // This is a fully functional interface that extends ggml with GPU support for Apple devices. // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.) // // How it works? // // As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this // interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you // use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.) // // You only need to make sure that all memory buffers that you used during the graph creation // are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is // used during the graph evaluation to determine the arguments of the compute kernels. // // Synchronization between device and host memory (for example for input and output tensors) // is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions. // #pragma once #include "ggml.h" #include "ggml-backend.h" #include #include struct ggml_tensor; struct ggml_cgraph; #ifdef __cplusplus extern "C" { #endif // // backend API // user-code should use only these functions // // TODO: remove in the future GGML_BACKEND_API ggml_backend_t ggml_backend_metal_init(void); GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend); GGML_BACKEND_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data); // helper to check if the device supports a specific family // ideally, the user code should be doing these checks // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf GGML_BACKEND_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family); // capture all command buffers committed the next time `ggml_backend_graph_compute` is called GGML_BACKEND_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_metal_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-opencl.h000066400000000000000000000011051512524704700203610ustar00rootroot00000000000000#ifndef GGML_OPENCL_H #define GGML_OPENCL_H #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif // // backend API // GGML_BACKEND_API ggml_backend_t ggml_backend_opencl_init(void); GGML_BACKEND_API bool ggml_backend_is_opencl(ggml_backend_t backend); GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void); GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_opencl_reg(void); #ifdef __cplusplus } #endif #endif // GGML_OPENCL_H ggml-org-ggml-3678254/include/ggml-opt.h000066400000000000000000000333271512524704700177160ustar00rootroot00000000000000// This file contains functionality for training models using GGML. // It is not strictly needed vs. just vanilla GGML but it provides a more high-level interface for common needs such as datasets. // At the bottom of this file especially there are relatively high-level functions that are suitable use or adaptation in user code. // // Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de) #pragma once #include "ggml.h" #include "ggml-backend.h" #include #ifdef __cplusplus extern "C" { #endif struct ggml_opt_dataset; struct ggml_opt_context; struct ggml_opt_result; typedef struct ggml_opt_dataset * ggml_opt_dataset_t; typedef struct ggml_opt_context * ggml_opt_context_t; typedef struct ggml_opt_result * ggml_opt_result_t; // ====== Loss ====== // built-in loss types, i.e. the built-in quantities minimized by the optimizer // custom loss types can be defined via mean or sum which simply reduce the outputs for all datapoints to a single value enum ggml_opt_loss_type { GGML_OPT_LOSS_TYPE_MEAN, GGML_OPT_LOSS_TYPE_SUM, GGML_OPT_LOSS_TYPE_CROSS_ENTROPY, GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR, }; // ====== Dataset ====== GGML_API ggml_opt_dataset_t ggml_opt_dataset_init( enum ggml_type type_data, // the type for the internal data tensor enum ggml_type type_label, // the type for the internal labels tensor int64_t ne_datapoint, // number of elements per datapoint int64_t ne_label, // number of elements per label int64_t ndata, // total number of datapoints/labels int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied) GGML_API void ggml_opt_dataset_free(ggml_opt_dataset_t dataset); // get underlying tensors that store the data GGML_API int64_t ggml_opt_dataset_ndata (ggml_opt_dataset_t dataset); GGML_API struct ggml_tensor * ggml_opt_dataset_data (ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata] GGML_API struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset); // shape = [nd_label, ndata] // shuffle idata first datapoints from dataset with RNG from opt_ctx, shuffle all datapoints if idata is negative GGML_API void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata); // get batch at position ibatch from dataset and copy the data to data_batch and labels_batch GGML_API void ggml_opt_dataset_get_batch( ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch] struct ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch] int64_t ibatch); GGML_API void ggml_opt_dataset_get_batch_host( ggml_opt_dataset_t dataset, void * data_batch, size_t nb_data_batch, void * labels_batch, int64_t ibatch); // ====== Model / Context ====== enum ggml_opt_build_type { GGML_OPT_BUILD_TYPE_FORWARD = 10, GGML_OPT_BUILD_TYPE_GRAD = 20, GGML_OPT_BUILD_TYPE_OPT = 30, }; enum ggml_opt_optimizer_type { GGML_OPT_OPTIMIZER_TYPE_ADAMW, GGML_OPT_OPTIMIZER_TYPE_SGD, GGML_OPT_OPTIMIZER_TYPE_COUNT }; // parameters that control which optimizer is used and how said optimizer tries to find the minimal loss struct ggml_opt_optimizer_params { struct { float alpha; // learning rate float beta1; // first AdamW momentum float beta2; // second AdamW momentum float eps; // epsilon for numerical stability float wd; // weight decay - 0.0f to disable } adamw; struct { float alpha; // learning rate float wd; // weight decay } sgd; }; // callback to calculate optimizer parameters prior to a backward pass // userdata can be used to pass arbitrary data typedef struct ggml_opt_optimizer_params (*ggml_opt_get_optimizer_params)(void * userdata); // returns the default optimizer params (constant, hard-coded values) // userdata is not used GGML_API struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata); // casts userdata to ggml_opt_optimizer_params and returns it GGML_API struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata); // parameters for initializing a new optimization context struct ggml_opt_params { ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs // by default the forward graph needs to be reconstructed for each eval // if ctx_compute, inputs, and outputs are set the graphs are instead allocated statically struct ggml_context * ctx_compute; struct ggml_tensor * inputs; struct ggml_tensor * outputs; enum ggml_opt_loss_type loss_type; enum ggml_opt_build_type build_type; int32_t opt_period; // after how many gradient accumulation steps an optimizer step should be done ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters void * get_opt_pars_ud; // userdata for calculating optimizer parameters // only GGML_OPT_OPTIMIZER_TYPE_ADAMW needs m, v momenta per parameter tensor enum ggml_opt_optimizer_type optimizer; }; // get parameters for an optimization context with defaults set where possible // parameters for which no sensible defaults exist are supplied as arguments to this function GGML_API struct ggml_opt_params ggml_opt_default_params( ggml_backend_sched_t backend_sched, enum ggml_opt_loss_type loss_type); GGML_API ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params); GGML_API void ggml_opt_free(ggml_opt_context_t opt_ctx); // set gradients to zero, initilize loss, and optionally reset the optimizer GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer); GGML_API bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically // get underlying tensors that store data // if not using static graphs these pointers become invalid with the next call to ggml_opt_alloc GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor GGML_API struct ggml_tensor * ggml_opt_outputs( ggml_opt_context_t opt_ctx); // forward graph output tensor GGML_API struct ggml_tensor * ggml_opt_labels( ggml_opt_context_t opt_ctx); // labels to compare outputs against GGML_API struct ggml_tensor * ggml_opt_loss( ggml_opt_context_t opt_ctx); // scalar tensor that contains the loss GGML_API struct ggml_tensor * ggml_opt_pred( ggml_opt_context_t opt_ctx); // predictions made by outputs GGML_API struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels // get the gradient accumulator for a node from the forward graph GGML_API struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node); GGML_API enum ggml_opt_optimizer_type ggml_opt_context_optimizer_type(ggml_opt_context_t); //TODO consistent naming scheme GGML_API const char * ggml_opt_optimizer_name(enum ggml_opt_optimizer_type); // ====== Optimization Result ====== GGML_API ggml_opt_result_t ggml_opt_result_init(void); GGML_API void ggml_opt_result_free(ggml_opt_result_t result); GGML_API void ggml_opt_result_reset(ggml_opt_result_t result); // get data from result, uncertainties are optional and can be ignored by passing NULL GGML_API void ggml_opt_result_ndata( ggml_opt_result_t result, int64_t * ndata); // writes 1 value, number of datapoints GGML_API void ggml_opt_result_loss( ggml_opt_result_t result, double * loss, double * unc); // writes 1 value GGML_API void ggml_opt_result_pred( ggml_opt_result_t result, int32_t * pred); // writes ndata values GGML_API void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc); // writes 1 value // ====== Computation ====== // if not using static graphs, this function must be called prior to ggml_opt_alloc GGML_API void ggml_opt_prepare_alloc( ggml_opt_context_t opt_ctx, struct ggml_context * ctx_compute, struct ggml_cgraph * gf, struct ggml_tensor * inputs, struct ggml_tensor * outputs); // allocate the next graph for evaluation, either forward or forward + backward // must be called exactly once prior to calling ggml_opt_eval GGML_API void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward); // do forward pass, increment result if not NULL, do backward pass if allocated GGML_API void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result); // ############################################################################ // ## The high-level functions start here. They do not depend on any private ## // ## functions or structs and can be copied to and adapted for user code. ## // ############################################################################ // ====== Intended Usage ====== // // 1. Select the appropriate loss for your problem. // 2. Create a dataset and set the data for the "data" tensor. Also set the "labels" tensor if your loss needs them. // Setting the shard size to 1 will be fine, it's the granularity with which data is shuffled/loaded (bigger values are faster). // 3. Create a GGML graph for your model with no_alloc == true. Use two separate contexts for the tensors. // The first context should contain the model parameters and inputs and be allocated statically in user code. // The second context should contain all other tensors and will be (re)allocated automatically. // Due to this automated allocation the data of the second context is not defined when accessed in user code. // Note that the second dimension of the inputs/outputs are interpreted as the number of datapoints in those tensors. // 4. Call ggml_opt_fit. If you need more control you can use ggml_opt_epoch instead. // signature for a callback while evaluating opt_ctx on dataset, called after an evaluation typedef void (*ggml_opt_epoch_callback)( bool train, // true after training evaluation, false after validation evaluation ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, ggml_opt_result_t result, // result associated with the dataset subsection int64_t ibatch, // number of batches that have been evaluated so far int64_t ibatch_max, // total number of batches in this dataset subsection int64_t t_start_us); // time at which the evaluation on the dataset subsection was started // do training on front of dataset, do evaluation only on back of dataset GGML_API void ggml_opt_epoch( ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, ggml_opt_result_t result_train, // result to increment during training, ignored if NULL ggml_opt_result_t result_eval, // result to increment during evaluation, ignored if NULL int64_t idata_split, // data index at which to split training and evaluation ggml_opt_epoch_callback callback_train, ggml_opt_epoch_callback callback_eval); // callback that prints a progress bar on stderr GGML_API void ggml_opt_epoch_callback_progress_bar( bool train, ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, ggml_opt_result_t result, int64_t ibatch, int64_t ibatch_max, int64_t t_start_us); // fit model defined by inputs and outputs to dataset GGML_API void ggml_opt_fit( ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs struct ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs struct ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch] struct ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used ggml_opt_dataset_t dataset, // dataset with data and optionally also labels enum ggml_opt_loss_type loss_type, // loss to minimize enum ggml_opt_optimizer_type optimizer, // sgd or adamw ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t) int64_t nepoch, // how many times the dataset should be iterated over int64_t nbatch_logical, // datapoints optimizer step, must be a multiple of ndata_batch in inputs/outputs float val_split, // fraction of the dataset to use for validation, must be in [0.0f, 1.0f) bool silent); // whether or not info prints to stderr should be suppressed #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-rpc.h000066400000000000000000000020401512524704700176640ustar00rootroot00000000000000#pragma once #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif #define RPC_PROTO_MAJOR_VERSION 3 #define RPC_PROTO_MINOR_VERSION 6 #define RPC_PROTO_PATCH_VERSION 0 #define GGML_RPC_MAX_SERVERS 16 // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint, uint32_t device); GGML_BACKEND_API bool ggml_backend_is_rpc(ggml_backend_t backend); GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint, uint32_t device); GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total); GGML_BACKEND_API void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir, size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_add_server(const char * endpoint); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-sycl.h000066400000000000000000000033431512524704700200610ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // #pragma once #include "ggml.h" #include "ggml-backend.h" #define GGML_SYCL_NAME "SYCL" #define GGML_SYCL_MAX_DEVICES 48 #ifdef __cplusplus extern "C" { #endif // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_sycl_init(int device); GGML_BACKEND_API bool ggml_backend_is_sycl(ggml_backend_t backend); // devide buffer GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device); // split tensor buffer that splits matrices by rows across multiple devices GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split); // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void); GGML_BACKEND_API void ggml_backend_sycl_print_sycl_devices(void); GGML_BACKEND_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len); GGML_BACKEND_API void ggml_backend_sycl_get_device_description(int device, char *description, size_t description_size); GGML_BACKEND_API int ggml_backend_sycl_get_device_count(); GGML_BACKEND_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total); // SYCL doesn't support registering host memory, keep here for reference // GGML_BACKEND_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size); // GGML_BACKEND_API void ggml_backend_sycl_unregister_host_buffer(void * buffer); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_sycl_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-vulkan.h000066400000000000000000000016701512524704700204100ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif #define GGML_VK_NAME "Vulkan" #define GGML_VK_MAX_DEVICES 16 // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num); GGML_BACKEND_API bool ggml_backend_is_vk(ggml_backend_t backend); GGML_BACKEND_API int ggml_backend_vk_get_device_count(void); GGML_BACKEND_API void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size); GGML_BACKEND_API void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total); GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num); // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_vk_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-webgpu.h000066400000000000000000000005101512524704700203710ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif #define GGML_WEBGPU_NAME "WebGPU" // Needed for examples in ggml GGML_BACKEND_API ggml_backend_t ggml_backend_webgpu_init(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_webgpu_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-zdnn.h000066400000000000000000000004441512524704700200570ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif // device buffer GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_zdnn_buffer_type(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_zdnn_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml-zendnn.h000066400000000000000000000007621512524704700204050ustar00rootroot00000000000000#pragma once #include "ggml-backend.h" #include "ggml.h" #ifdef __cplusplus extern "C" { #endif // backend API GGML_BACKEND_API ggml_backend_t ggml_backend_zendnn_init(void); GGML_BACKEND_API bool ggml_backend_is_zendnn(ggml_backend_t backend); // number of threads used for zendnn operations GGML_BACKEND_API void ggml_backend_zendnn_set_n_threads(ggml_backend_t backend_zendnn, int n_threads); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_zendnn_reg(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/ggml.h000066400000000000000000003067071512524704700171230ustar00rootroot00000000000000#pragma once // // GGML Tensor Library // // This documentation is still a work in progress. // If you wish some specific topics to be covered, feel free to drop a comment: // // https://github.com/ggerganov/whisper.cpp/issues/40 // // ## Overview // // This library implements: // // - a set of tensor operations // - automatic differentiation // - basic optimization algorithms // // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes, // but is not limited to, the following: // // - linear regression // - support vector machines // - neural networks // // The library allows the user to define a certain function using the available tensor operations. This function // definition is represented internally via a computation graph. Each tensor operation in the function definition // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized // using one of the available optimization algorithms. // // For example, here we define the function: f(x) = a*x^2 + b // // { // struct ggml_init_params params = { // .mem_size = 16*1024*1024, // .mem_buffer = NULL, // }; // // // memory allocation happens here // struct ggml_context * ctx = ggml_init(params); // // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); // // ggml_set_param(ctx, x); // x is an input variable // // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); // struct ggml_tensor * x2 = ggml_mul(ctx, x, x); // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b); // // ... // } // // Notice that the function definition above does not involve any actual computation. The computation is performed only // when the user explicitly requests it. For example, to compute the function's value at x = 2.0: // // { // ... // // struct ggml_cgraph * gf = ggml_new_graph(ctx); // ggml_build_forward_expand(gf, f); // // // set the input variable and parameter values // ggml_set_f32(x, 2.0f); // ggml_set_f32(a, 3.0f); // ggml_set_f32(b, 4.0f); // // ggml_graph_compute_with_ctx(ctx, &gf, n_threads); // // printf("f = %f\n", ggml_get_f32_1d(f, 0)); // // ... // } // // The actual computation is performed in the ggml_graph_compute() function. // // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was // actually needed. // // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic // differentiation and optimization algorithms. // // The described approach allows to define the function graph once and then compute its forward or backward graphs // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way // the user can avoid the memory allocation overhead at runtime. // // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class // citizens, but in theory the library can be extended to support FP8 and integer data types. // // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary // and binary operations. Most of the available operations fall into one of these two categories. With time, it became // clear that the library needs to support more complex operations. The way to support these operations is not clear // yet, but a few examples are demonstrated in the following operations: // // - ggml_permute() // - ggml_conv_1d_1s() // - ggml_conv_1d_2s() // // For each tensor operator, the library implements a forward and backward computation function. The forward function // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a // calculus class, or watch the following video: // // What is Automatic Differentiation? // https://www.youtube.com/watch?v=wG_nF1awSSY // // // ## Tensor data (struct ggml_tensor) // // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example: // // { // struct ggml_tensor * c = ggml_add(ctx, a, b); // // assert(c->src[0] == a); // assert(c->src[1] == b); // } // // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and // permutation. All tensor operations have to take the stride into account and not assume that the tensor is // contiguous in memory. // // The data of the tensor is accessed via the "data" pointer. For example: // // { // const int nx = 2; // const int ny = 3; // // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny); // // for (int y = 0; y < ny; y++) { // for (int x = 0; x < nx; x++) { // *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y; // } // } // // ... // } // // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used. // // ## The matrix multiplication operator (ggml_mul_mat) // // TODO // // // ## Multi-threading // // TODO // // // ## Overview of ggml.c // // TODO // // // ## SIMD optimizations // // TODO // // // ## Debugging ggml // // TODO // // #ifdef GGML_SHARED # if defined(_WIN32) && !defined(__MINGW32__) # ifdef GGML_BUILD # define GGML_API __declspec(dllexport) extern # else # define GGML_API __declspec(dllimport) extern # endif # else # define GGML_API __attribute__ ((visibility ("default"))) extern # endif #else # define GGML_API extern #endif // TODO: support for clang #ifdef __GNUC__ # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint))) #elif defined(_MSC_VER) # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func #else # define GGML_DEPRECATED(func, hint) func #endif #ifndef __GNUC__ # define GGML_ATTRIBUTE_FORMAT(...) #elif defined(__MINGW32__) && !defined(__clang__) # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) #else # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) #endif #if defined(_WIN32) && !defined(_WIN32_WINNT) # define _WIN32_WINNT 0x0A00 #endif #include #include #include #include #define GGML_FILE_MAGIC 0x67676d6c // "ggml" #define GGML_FILE_VERSION 2 #define GGML_QNT_VERSION 2 // bump this on quantization format changes #define GGML_QNT_VERSION_FACTOR 1000 // do not change this #define GGML_MAX_DIMS 4 #define GGML_MAX_PARAMS 2048 #define GGML_MAX_SRC 10 #define GGML_MAX_N_THREADS 512 #define GGML_MAX_OP_PARAMS 64 #ifndef GGML_MAX_NAME # define GGML_MAX_NAME 64 #endif #define GGML_DEFAULT_N_THREADS 4 #define GGML_DEFAULT_GRAPH_SIZE 2048 #if UINTPTR_MAX == 0xFFFFFFFF #define GGML_MEM_ALIGN 4 #else #define GGML_MEM_ALIGN 16 #endif #define GGML_EXIT_SUCCESS 0 #define GGML_EXIT_ABORTED 1 // TODO: convert to enum https://github.com/ggml-org/llama.cpp/pull/16187#discussion_r2388538726 #define GGML_ROPE_TYPE_NORMAL 0 #define GGML_ROPE_TYPE_NEOX 2 #define GGML_ROPE_TYPE_MROPE 8 #define GGML_ROPE_TYPE_VISION 24 #define GGML_ROPE_TYPE_IMROPE 40 // binary: 101000 #define GGML_MROPE_SECTIONS 4 #define GGML_UNUSED(x) (void)(x) #ifdef __CUDACC__ template __host__ __device__ constexpr inline void ggml_unused_vars_impl(Args&&...) noexcept {} #define GGML_UNUSED_VARS(...) ggml_unused_vars_impl(__VA_ARGS__) #else #define GGML_UNUSED_VARS(...) do { (void)sizeof((__VA_ARGS__, 0)); } while(0) #endif // __CUDACC__ #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) #ifndef NDEBUG # define GGML_UNREACHABLE() do { fprintf(stderr, "statement should be unreachable\n"); abort(); } while(0) #elif defined(__GNUC__) # define GGML_UNREACHABLE() __builtin_unreachable() #elif defined(_MSC_VER) # define GGML_UNREACHABLE() __assume(0) #else # define GGML_UNREACHABLE() ((void) 0) #endif #ifdef __cplusplus # define GGML_NORETURN [[noreturn]] #elif defined(_MSC_VER) # define GGML_NORETURN __declspec(noreturn) #else # define GGML_NORETURN _Noreturn #endif #define GGML_ABORT(...) ggml_abort(__FILE__, __LINE__, __VA_ARGS__) #define GGML_ASSERT(x) if (!(x)) GGML_ABORT("GGML_ASSERT(%s) failed", #x) // used to copy the number of elements and stride in bytes of tensors into local variables. // main purpose is to reduce code duplication and improve readability. // // example: // // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); // #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \ const type prefix##0 = (pointer) ? (pointer)->array[0] : 0; \ GGML_UNUSED(prefix##0); #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \ GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \ const type prefix##1 = (pointer) ? (pointer)->array[1] : 0; \ GGML_UNUSED(prefix##1); #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \ GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \ const type prefix##2 = (pointer) ? (pointer)->array[2] : 0; \ GGML_UNUSED(prefix##2); #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \ GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \ const type prefix##3 = (pointer) ? (pointer)->array[3] : 0; \ GGML_UNUSED(prefix##3); #define GGML_TENSOR_UNARY_OP_LOCALS \ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ GGML_TENSOR_LOCALS(size_t, nb, dst, nb) #define GGML_TENSOR_BINARY_OP_LOCALS \ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ GGML_TENSOR_LOCALS(size_t, nb, dst, nb) #define GGML_TENSOR_TERNARY_OP_LOCALS \ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ GGML_TENSOR_LOCALS(int64_t, ne2, src2, ne) \ GGML_TENSOR_LOCALS(size_t, nb2, src2, nb) \ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ GGML_TENSOR_LOCALS(size_t, nb, dst, nb) #define GGML_TENSOR_BINARY_OP_LOCALS01 \ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) #ifdef __cplusplus extern "C" { #endif // Function type used in fatal error callbacks typedef void (*ggml_abort_callback_t)(const char * error_message); // Set the abort callback (passing null will restore original abort functionality: printing a message to stdout) // Returns the old callback for chaining GGML_API ggml_abort_callback_t ggml_set_abort_callback(ggml_abort_callback_t callback); GGML_NORETURN GGML_ATTRIBUTE_FORMAT(3, 4) GGML_API void ggml_abort(const char * file, int line, const char * fmt, ...); enum ggml_status { GGML_STATUS_ALLOC_FAILED = -2, GGML_STATUS_FAILED = -1, GGML_STATUS_SUCCESS = 0, GGML_STATUS_ABORTED = 1, }; // get ggml_status name string GGML_API const char * ggml_status_to_string(enum ggml_status status); // ieee 754-2008 half-precision float16 // todo: make this not an integral type typedef uint16_t ggml_fp16_t; GGML_API float ggml_fp16_to_fp32(ggml_fp16_t); GGML_API ggml_fp16_t ggml_fp32_to_fp16(float); GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t); GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t); // google brain half-precision bfloat16 typedef struct { uint16_t bits; } ggml_bf16_t; GGML_API ggml_bf16_t ggml_fp32_to_bf16(float); GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16 GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t); GGML_API void ggml_fp32_to_bf16_row_ref(const float *, ggml_bf16_t *, int64_t); GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t); struct ggml_object; struct ggml_context; struct ggml_cgraph; // NOTE: always add types at the end of the enum to keep backward compatibility enum ggml_type { GGML_TYPE_F32 = 0, GGML_TYPE_F16 = 1, GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_1 = 3, // GGML_TYPE_Q4_2 = 4, support has been removed // GGML_TYPE_Q4_3 = 5, support has been removed GGML_TYPE_Q5_0 = 6, GGML_TYPE_Q5_1 = 7, GGML_TYPE_Q8_0 = 8, GGML_TYPE_Q8_1 = 9, GGML_TYPE_Q2_K = 10, GGML_TYPE_Q3_K = 11, GGML_TYPE_Q4_K = 12, GGML_TYPE_Q5_K = 13, GGML_TYPE_Q6_K = 14, GGML_TYPE_Q8_K = 15, GGML_TYPE_IQ2_XXS = 16, GGML_TYPE_IQ2_XS = 17, GGML_TYPE_IQ3_XXS = 18, GGML_TYPE_IQ1_S = 19, GGML_TYPE_IQ4_NL = 20, GGML_TYPE_IQ3_S = 21, GGML_TYPE_IQ2_S = 22, GGML_TYPE_IQ4_XS = 23, GGML_TYPE_I8 = 24, GGML_TYPE_I16 = 25, GGML_TYPE_I32 = 26, GGML_TYPE_I64 = 27, GGML_TYPE_F64 = 28, GGML_TYPE_IQ1_M = 29, GGML_TYPE_BF16 = 30, // GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files // GGML_TYPE_Q4_0_4_8 = 32, // GGML_TYPE_Q4_0_8_8 = 33, GGML_TYPE_TQ1_0 = 34, GGML_TYPE_TQ2_0 = 35, // GGML_TYPE_IQ4_NL_4_4 = 36, // GGML_TYPE_IQ4_NL_4_8 = 37, // GGML_TYPE_IQ4_NL_8_8 = 38, GGML_TYPE_MXFP4 = 39, // MXFP4 (1 block) GGML_TYPE_COUNT = 40, }; // precision enum ggml_prec { GGML_PREC_DEFAULT = 0, // stored as ggml_tensor.op_params, 0 by default GGML_PREC_F32 = 10, }; // model file types enum ggml_ftype { GGML_FTYPE_UNKNOWN = -1, GGML_FTYPE_ALL_F32 = 0, GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors GGML_FTYPE_MOSTLY_IQ2_XS = 16, // except 1d tensors GGML_FTYPE_MOSTLY_IQ3_XXS = 17, // except 1d tensors GGML_FTYPE_MOSTLY_IQ1_S = 18, // except 1d tensors GGML_FTYPE_MOSTLY_IQ4_NL = 19, // except 1d tensors GGML_FTYPE_MOSTLY_IQ3_S = 20, // except 1d tensors GGML_FTYPE_MOSTLY_IQ2_S = 21, // except 1d tensors GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors GGML_FTYPE_MOSTLY_MXFP4 = 25, // except 1d tensors }; // available tensor operations: enum ggml_op { GGML_OP_NONE = 0, GGML_OP_DUP, GGML_OP_ADD, GGML_OP_ADD_ID, GGML_OP_ADD1, GGML_OP_ACC, GGML_OP_SUB, GGML_OP_MUL, GGML_OP_DIV, GGML_OP_SQR, GGML_OP_SQRT, GGML_OP_LOG, GGML_OP_SIN, GGML_OP_COS, GGML_OP_SUM, GGML_OP_SUM_ROWS, GGML_OP_CUMSUM, GGML_OP_MEAN, GGML_OP_ARGMAX, GGML_OP_COUNT_EQUAL, GGML_OP_REPEAT, GGML_OP_REPEAT_BACK, GGML_OP_CONCAT, GGML_OP_SILU_BACK, GGML_OP_NORM, // normalize GGML_OP_RMS_NORM, GGML_OP_RMS_NORM_BACK, GGML_OP_GROUP_NORM, GGML_OP_L2_NORM, GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID, GGML_OP_OUT_PROD, GGML_OP_SCALE, GGML_OP_SET, GGML_OP_CPY, GGML_OP_CONT, GGML_OP_RESHAPE, GGML_OP_VIEW, GGML_OP_PERMUTE, GGML_OP_TRANSPOSE, GGML_OP_GET_ROWS, GGML_OP_GET_ROWS_BACK, GGML_OP_SET_ROWS, GGML_OP_DIAG, GGML_OP_DIAG_MASK_INF, GGML_OP_DIAG_MASK_ZERO, GGML_OP_SOFT_MAX, GGML_OP_SOFT_MAX_BACK, GGML_OP_ROPE, GGML_OP_ROPE_BACK, GGML_OP_CLAMP, GGML_OP_CONV_TRANSPOSE_1D, GGML_OP_IM2COL, GGML_OP_IM2COL_BACK, GGML_OP_IM2COL_3D, GGML_OP_CONV_2D, GGML_OP_CONV_3D, GGML_OP_CONV_2D_DW, GGML_OP_CONV_TRANSPOSE_2D, GGML_OP_POOL_1D, GGML_OP_POOL_2D, GGML_OP_POOL_2D_BACK, GGML_OP_UPSCALE, GGML_OP_PAD, GGML_OP_PAD_REFLECT_1D, GGML_OP_ROLL, GGML_OP_ARANGE, GGML_OP_TIMESTEP_EMBEDDING, GGML_OP_ARGSORT, GGML_OP_TOP_K, GGML_OP_LEAKY_RELU, GGML_OP_TRI, GGML_OP_FILL, GGML_OP_FLASH_ATTN_EXT, GGML_OP_FLASH_ATTN_BACK, GGML_OP_SSM_CONV, GGML_OP_SSM_SCAN, GGML_OP_WIN_PART, GGML_OP_WIN_UNPART, GGML_OP_GET_REL_POS, GGML_OP_ADD_REL_POS, GGML_OP_RWKV_WKV6, GGML_OP_GATED_LINEAR_ATTN, GGML_OP_RWKV_WKV7, GGML_OP_SOLVE_TRI, GGML_OP_UNARY, GGML_OP_MAP_CUSTOM1, GGML_OP_MAP_CUSTOM2, GGML_OP_MAP_CUSTOM3, GGML_OP_CUSTOM, GGML_OP_CROSS_ENTROPY_LOSS, GGML_OP_CROSS_ENTROPY_LOSS_BACK, GGML_OP_OPT_STEP_ADAMW, GGML_OP_OPT_STEP_SGD, GGML_OP_GLU, GGML_OP_COUNT, }; enum ggml_unary_op { GGML_UNARY_OP_ABS, GGML_UNARY_OP_SGN, GGML_UNARY_OP_NEG, GGML_UNARY_OP_STEP, GGML_UNARY_OP_TANH, GGML_UNARY_OP_ELU, GGML_UNARY_OP_RELU, GGML_UNARY_OP_SIGMOID, GGML_UNARY_OP_GELU, GGML_UNARY_OP_GELU_QUICK, GGML_UNARY_OP_SILU, GGML_UNARY_OP_HARDSWISH, GGML_UNARY_OP_HARDSIGMOID, GGML_UNARY_OP_EXP, GGML_UNARY_OP_EXPM1, GGML_UNARY_OP_SOFTPLUS, GGML_UNARY_OP_GELU_ERF, GGML_UNARY_OP_XIELU, GGML_UNARY_OP_FLOOR, GGML_UNARY_OP_CEIL, GGML_UNARY_OP_ROUND, GGML_UNARY_OP_TRUNC, GGML_UNARY_OP_COUNT, }; enum ggml_glu_op { GGML_GLU_OP_REGLU, GGML_GLU_OP_GEGLU, GGML_GLU_OP_SWIGLU, GGML_GLU_OP_SWIGLU_OAI, GGML_GLU_OP_GEGLU_ERF, GGML_GLU_OP_GEGLU_QUICK, GGML_GLU_OP_COUNT, }; enum ggml_object_type { GGML_OBJECT_TYPE_TENSOR, GGML_OBJECT_TYPE_GRAPH, GGML_OBJECT_TYPE_WORK_BUFFER }; enum ggml_log_level { GGML_LOG_LEVEL_NONE = 0, GGML_LOG_LEVEL_DEBUG = 1, GGML_LOG_LEVEL_INFO = 2, GGML_LOG_LEVEL_WARN = 3, GGML_LOG_LEVEL_ERROR = 4, GGML_LOG_LEVEL_CONT = 5, // continue previous log }; // this tensor... enum ggml_tensor_flag { GGML_TENSOR_FLAG_INPUT = 1, // ...is an input for the GGML compute graph GGML_TENSOR_FLAG_OUTPUT = 2, // ...is an output for the GGML compute graph GGML_TENSOR_FLAG_PARAM = 4, // ...contains trainable parameters GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up) }; enum ggml_tri_type { GGML_TRI_TYPE_UPPER_DIAG = 0, GGML_TRI_TYPE_UPPER = 1, GGML_TRI_TYPE_LOWER_DIAG = 2, GGML_TRI_TYPE_LOWER = 3 }; struct ggml_init_params { // memory pool size_t mem_size; // bytes void * mem_buffer; // if NULL, memory will be allocated internally bool no_alloc; // don't allocate memory for the tensor data }; // n-dimensional tensor struct ggml_tensor { enum ggml_type type; struct ggml_backend_buffer * buffer; int64_t ne[GGML_MAX_DIMS]; // number of elements size_t nb[GGML_MAX_DIMS]; // stride in bytes: // nb[0] = ggml_type_size(type) // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding // nb[i] = nb[i-1] * ne[i-1] // compute data enum ggml_op op; // op params - allocated as int32_t for alignment int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; int32_t flags; struct ggml_tensor * src[GGML_MAX_SRC]; // source tensor and offset for views struct ggml_tensor * view_src; size_t view_offs; void * data; char name[GGML_MAX_NAME]; void * extra; // extra things e.g. for ggml-cuda.cu char padding[8]; }; static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor); // Abort callback // If not NULL, called before ggml computation // If it returns true, the computation is aborted typedef bool (*ggml_abort_callback)(void * data); // // GUID // // GUID types typedef uint8_t ggml_guid[16]; typedef ggml_guid * ggml_guid_t; GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b); // misc GGML_API const char * ggml_version(void); GGML_API const char * ggml_commit(void); GGML_API void ggml_time_init(void); // call this once at the beginning of the program GGML_API int64_t ggml_time_ms(void); GGML_API int64_t ggml_time_us(void); GGML_API int64_t ggml_cycles(void); GGML_API int64_t ggml_cycles_per_ms(void); // accepts a UTF-8 path, even on Windows GGML_API FILE * ggml_fopen(const char * fname, const char * mode); GGML_API void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_print_objects(const struct ggml_context * ctx); GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor); GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor); GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor); GGML_API size_t ggml_nbytes_pad(const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN GGML_API int64_t ggml_blck_size(enum ggml_type type); GGML_API size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block GGML_API size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row GGML_DEPRECATED( GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float "use ggml_row_size() instead"); GGML_API const char * ggml_type_name(enum ggml_type type); GGML_API const char * ggml_op_name (enum ggml_op op); GGML_API const char * ggml_op_symbol(enum ggml_op op); GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op); GGML_API const char * ggml_glu_op_name(enum ggml_glu_op op); GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); GGML_API bool ggml_is_quantized(enum ggml_type type); // TODO: temporary until model loading of ggml examples is refactored GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype); GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor); GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor); GGML_API bool ggml_is_empty (const struct ggml_tensor * tensor); GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor); GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor); GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor); GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor); GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars // returns whether the tensor elements can be iterated over with a flattened index (no gaps, no permutation) GGML_API bool ggml_is_contiguous (const struct ggml_tensor * tensor); GGML_API bool ggml_is_contiguous_0(const struct ggml_tensor * tensor); // same as ggml_is_contiguous() GGML_API bool ggml_is_contiguous_1(const struct ggml_tensor * tensor); // contiguous for dims >= 1 GGML_API bool ggml_is_contiguous_2(const struct ggml_tensor * tensor); // contiguous for dims >= 2 // returns whether the tensor elements are allocated as one contiguous block of memory (no gaps, but permutation ok) GGML_API bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor); // true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN GGML_API bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor); // true if the elements in dimension 0 are contiguous, or there is just 1 block of elements GGML_API bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor); GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); GGML_API bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1); // use this to compute the memory overhead of a tensor GGML_API size_t ggml_tensor_overhead(void); GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes); // main GGML_API struct ggml_context * ggml_init (struct ggml_init_params params); GGML_API void ggml_reset(struct ggml_context * ctx); GGML_API void ggml_free (struct ggml_context * ctx); GGML_API size_t ggml_used_mem(const struct ggml_context * ctx); GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx); GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc); GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx); GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx); GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx); GGML_API struct ggml_tensor * ggml_new_tensor( struct ggml_context * ctx, enum ggml_type type, int n_dims, const int64_t *ne); GGML_API struct ggml_tensor * ggml_new_tensor_1d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0); GGML_API struct ggml_tensor * ggml_new_tensor_2d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1); GGML_API struct ggml_tensor * ggml_new_tensor_3d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2); GGML_API struct ggml_tensor * ggml_new_tensor_4d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); GGML_API void * ggml_new_buffer(struct ggml_context * ctx, size_t nbytes); GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src); // Context tensor enumeration and lookup GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx); GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); // Converts a flat index into coordinates GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor); GGML_API enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor); GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name); GGML_ATTRIBUTE_FORMAT(2, 3) GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...); // Tensor flags GGML_API void ggml_set_input(struct ggml_tensor * tensor); GGML_API void ggml_set_output(struct ggml_tensor * tensor); GGML_API void ggml_set_param(struct ggml_tensor * tensor); GGML_API void ggml_set_loss(struct ggml_tensor * tensor); // // operations on tensors with backpropagation // GGML_API struct ggml_tensor * ggml_dup( struct ggml_context * ctx, struct ggml_tensor * a); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_dup_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_add( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_add_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_add_cast( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, enum ggml_type type); // dst[i0, i1, i2] = a[i0, i1, i2] + b[i0, ids[i1, i2]] GGML_API struct ggml_tensor * ggml_add_id( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * ids); GGML_API struct ggml_tensor * ggml_add1( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_add1_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // dst = a // view(dst, nb1, nb2, nb3, offset) += b // return dst GGML_API struct ggml_tensor * ggml_acc( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); GGML_API struct ggml_tensor * ggml_acc_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); GGML_API struct ggml_tensor * ggml_sub( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_sub_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_mul( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_mul_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_div( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_div_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_sqr( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sqr_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sqrt( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sqrt_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_log( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_log_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_expm1( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_expm1_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_softplus( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_softplus_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sin( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sin_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_cos( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_cos_inplace( struct ggml_context * ctx, struct ggml_tensor * a); // return scalar GGML_API struct ggml_tensor * ggml_sum( struct ggml_context * ctx, struct ggml_tensor * a); // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d] GGML_API struct ggml_tensor * ggml_sum_rows( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_cumsum( struct ggml_context * ctx, struct ggml_tensor * a); // mean along rows GGML_API struct ggml_tensor * ggml_mean( struct ggml_context * ctx, struct ggml_tensor * a); // argmax along rows GGML_API struct ggml_tensor * ggml_argmax( struct ggml_context * ctx, struct ggml_tensor * a); // count number of equal elements in a and b GGML_API struct ggml_tensor * ggml_count_equal( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // if a is the same shape as b, and a is not parameter, return a // otherwise, return a new tensor: repeat(a) to fit in b GGML_API struct ggml_tensor * ggml_repeat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // repeat a to the specified shape GGML_API struct ggml_tensor * ggml_repeat_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); // sums repetitions in a into shape of b GGML_API struct ggml_tensor * ggml_repeat_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // sum up values that are adjacent in dims > 0 instead of repeated with same stride // concat a and b along dim // used in stable-diffusion GGML_API struct ggml_tensor * ggml_concat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int dim); GGML_API struct ggml_tensor * ggml_abs( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_abs_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sgn( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sgn_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_neg( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_neg_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_step( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_step_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_tanh( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_tanh_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_elu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_elu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_relu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_leaky_relu( struct ggml_context * ctx, struct ggml_tensor * a, float negative_slope, bool inplace); GGML_API struct ggml_tensor * ggml_relu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sigmoid( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_sigmoid_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_gelu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_gelu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); // GELU using erf (error function) when possible // some backends may fallback to approximation based on Abramowitz and Stegun formula GGML_API struct ggml_tensor * ggml_gelu_erf( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_gelu_erf_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_gelu_quick( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_gelu_quick_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_silu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_silu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); // a - x // b - dy GGML_API struct ggml_tensor * ggml_silu_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // hardswish(x) = x * relu6(x + 3) / 6 GGML_API struct ggml_tensor * ggml_hardswish( struct ggml_context * ctx, struct ggml_tensor * a); // hardsigmoid(x) = relu6(x + 3) / 6 GGML_API struct ggml_tensor * ggml_hardsigmoid( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_exp( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_exp_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_floor( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_floor_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_ceil( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_ceil_inplace( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_round( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_round_inplace( struct ggml_context * ctx, struct ggml_tensor * a); /** * Truncates the fractional part of each element in the tensor (towards zero). * For example: trunc(3.7) = 3.0, trunc(-2.9) = -2.0 * Similar to std::trunc in C/C++. */ GGML_API struct ggml_tensor * ggml_trunc( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_trunc_inplace( struct ggml_context * ctx, struct ggml_tensor * a); // xIELU activation function // x = x * (c_a(alpha_n) + c_b(alpha_p, beta) * sigmoid(beta * x)) + eps * (x > 0) // where c_a = softplus and c_b(a, b) = softplus(a) + b are constraining functions // that constrain the positive and negative source alpha values respectively GGML_API struct ggml_tensor * ggml_xielu( struct ggml_context * ctx, struct ggml_tensor * a, float alpha_n, float alpha_p, float beta, float eps); // gated linear unit ops // A: n columns, r rows, // result is n / 2 columns, r rows, // expects gate in second half of row, unless swapped is true GGML_API struct ggml_tensor * ggml_glu( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_glu_op op, bool swapped); GGML_API struct ggml_tensor * ggml_reglu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_reglu_swapped( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu_swapped( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_swiglu( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_swiglu_swapped( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu_erf( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu_erf_swapped( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu_quick( struct ggml_context * ctx, struct ggml_tensor * a); GGML_API struct ggml_tensor * ggml_geglu_quick_swapped( struct ggml_context * ctx, struct ggml_tensor * a); // A: n columns, r rows, // B: n columns, r rows, GGML_API struct ggml_tensor * ggml_glu_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, enum ggml_glu_op op); GGML_API struct ggml_tensor * ggml_reglu_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_geglu_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_swiglu_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_geglu_erf_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_geglu_quick_split( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); GGML_API struct ggml_tensor * ggml_swiglu_oai( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, float alpha, float limit); // normalize along rows GGML_API struct ggml_tensor * ggml_norm( struct ggml_context * ctx, struct ggml_tensor * a, float eps); GGML_API struct ggml_tensor * ggml_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float eps); GGML_API struct ggml_tensor * ggml_rms_norm( struct ggml_context * ctx, struct ggml_tensor * a, float eps); GGML_API struct ggml_tensor * ggml_rms_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float eps); // group normalize along ne0*ne1*n_groups // used in stable-diffusion GGML_API struct ggml_tensor * ggml_group_norm( struct ggml_context * ctx, struct ggml_tensor * a, int n_groups, float eps); GGML_API struct ggml_tensor * ggml_group_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_groups, float eps); // l2 normalize along rows // used in rwkv v7 GGML_API struct ggml_tensor * ggml_l2_norm( struct ggml_context * ctx, struct ggml_tensor * a, float eps); GGML_API struct ggml_tensor * ggml_l2_norm_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float eps); // a - x // b - dy GGML_API struct ggml_tensor * ggml_rms_norm_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, float eps); // A: k columns, n rows => [ne03, ne02, n, k] // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k] // result is n columns, m rows => [ne03 * x, ne02 * y, m, n] GGML_API struct ggml_tensor * ggml_mul_mat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // change the precision of a matrix multiplication // set to GGML_PREC_F32 for higher precision (useful for phi-2) GGML_API void ggml_mul_mat_set_prec( struct ggml_tensor * a, enum ggml_prec prec); // indirect matrix multiplication GGML_API struct ggml_tensor * ggml_mul_mat_id( struct ggml_context * ctx, struct ggml_tensor * as, struct ggml_tensor * b, struct ggml_tensor * ids); // A: m columns, n rows, // B: p columns, n rows, // result is m columns, p rows GGML_API struct ggml_tensor * ggml_out_prod( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // // operations on tensors without backpropagation // GGML_API struct ggml_tensor * ggml_scale( struct ggml_context * ctx, struct ggml_tensor * a, float s); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_scale_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float s); // x = s * a + b GGML_API struct ggml_tensor * ggml_scale_bias( struct ggml_context * ctx, struct ggml_tensor * a, float s, float b); GGML_API struct ggml_tensor * ggml_scale_bias_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float s, float b); // b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); // in bytes // b -> view(a,offset,nb1,nb2,3), return view(a) GGML_API struct ggml_tensor * ggml_set_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); // in bytes GGML_API struct ggml_tensor * ggml_set_1d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t offset); // in bytes GGML_API struct ggml_tensor * ggml_set_1d_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t offset); // in bytes // b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set_2d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t offset); // in bytes // b -> view(a,offset,nb1,nb2,3), return view(a) GGML_API struct ggml_tensor * ggml_set_2d_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t offset); // in bytes // a -> b, return view(b) GGML_API struct ggml_tensor * ggml_cpy( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // note: casting from f32 to i32 will discard the fractional part GGML_API struct ggml_tensor * ggml_cast( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_type type); // make contiguous GGML_API struct ggml_tensor * ggml_cont( struct ggml_context * ctx, struct ggml_tensor * a); // make contiguous, with new shape GGML_API struct ggml_tensor * ggml_cont_1d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0); GGML_API struct ggml_tensor * ggml_cont_2d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1); GGML_API struct ggml_tensor * ggml_cont_3d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2); GGML_API struct ggml_tensor * ggml_cont_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); // return view(a), b specifies the new shape // TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // return view(a) // TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape_1d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0); GGML_API struct ggml_tensor * ggml_reshape_2d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1); // return view(a) // TODO: when we start computing gradient, make a copy instead of view GGML_API struct ggml_tensor * ggml_reshape_3d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2); GGML_API struct ggml_tensor * ggml_reshape_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); // offset in bytes GGML_API struct ggml_tensor * ggml_view_1d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, size_t offset); GGML_API struct ggml_tensor * ggml_view_2d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, size_t nb1, // row stride in bytes size_t offset); GGML_API struct ggml_tensor * ggml_view_3d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, size_t nb1, // row stride in bytes size_t nb2, // slice stride in bytes size_t offset); GGML_API struct ggml_tensor * ggml_view_4d( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, size_t nb1, // row stride in bytes size_t nb2, // slice stride in bytes size_t nb3, size_t offset); GGML_API struct ggml_tensor * ggml_permute( struct ggml_context * ctx, struct ggml_tensor * a, int axis0, int axis1, int axis2, int axis3); // alias for ggml_permute(ctx, a, 1, 0, 2, 3) GGML_API struct ggml_tensor * ggml_transpose( struct ggml_context * ctx, struct ggml_tensor * a); // supports 4D a: // a [n_embd, ne1, ne2, ne3] // b I32 [n_rows, ne2, ne3, 1] // // return [n_embd, n_rows, ne2, ne3] GGML_API struct ggml_tensor * ggml_get_rows( struct ggml_context * ctx, struct ggml_tensor * a, // data struct ggml_tensor * b); // row indices GGML_API struct ggml_tensor * ggml_get_rows_back( struct ggml_context * ctx, struct ggml_tensor * a, // gradients of ggml_get_rows result struct ggml_tensor * b, // row indices struct ggml_tensor * c); // data for ggml_get_rows, only used for its shape // a TD [n_embd, ne1, ne2, ne3] // b TS [n_embd, n_rows, ne02, ne03] | ne02 == ne2, ne03 == ne3 // c I64 [n_rows, ne11, ne12, 1] | c[i] in [0, ne1) // // undefined behavior if destination rows overlap // // broadcast: // ne2 % ne11 == 0 // ne3 % ne12 == 0 // // return view(a) GGML_API struct ggml_tensor * ggml_set_rows( struct ggml_context * ctx, struct ggml_tensor * a, // destination struct ggml_tensor * b, // source struct ggml_tensor * c); // row indices GGML_API struct ggml_tensor * ggml_diag( struct ggml_context * ctx, struct ggml_tensor * a); // set elements above the diagonal to -INF GGML_API struct ggml_tensor * ggml_diag_mask_inf( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); // set elements above the diagonal to 0 GGML_API struct ggml_tensor * ggml_diag_mask_zero( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace( struct ggml_context * ctx, struct ggml_tensor * a, int n_past); GGML_API struct ggml_tensor * ggml_soft_max( struct ggml_context * ctx, struct ggml_tensor * a); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_soft_max_inplace( struct ggml_context * ctx, struct ggml_tensor * a); // a [ne0, ne01, ne02, ne03] // mask [ne0, ne11, ne12, ne13] | ne11 >= ne01, F16 or F32, optional // // broadcast: // ne02 % ne12 == 0 // ne03 % ne13 == 0 // // fused soft_max(a*scale + mask*(ALiBi slope)) // max_bias = 0.0f for no ALiBi GGML_API struct ggml_tensor * ggml_soft_max_ext( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, float scale, float max_bias); GGML_API struct ggml_tensor * ggml_soft_max_ext_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * mask, float scale, float max_bias); GGML_API void ggml_soft_max_add_sinks( struct ggml_tensor * a, struct ggml_tensor * sinks); GGML_API struct ggml_tensor * ggml_soft_max_ext_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, float scale, float max_bias); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_soft_max_ext_back_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, float scale, float max_bias); // rotary position embedding // if (mode & 1) - skip n_past elements (NOT SUPPORTED) // if (mode & GGML_ROPE_TYPE_NEOX) - GPT-NeoX style // // b is an int32 vector with size a->ne[2], it contains the positions GGML_API struct ggml_tensor * ggml_rope( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode); // custom RoPE // c is freq factors (e.g. phi3-128k), (optional) GGML_API struct ggml_tensor * ggml_rope_ext( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); GGML_API struct ggml_tensor * ggml_rope_multi( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int sections[GGML_MROPE_SECTIONS], int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_ext_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); GGML_API struct ggml_tensor * ggml_rope_multi_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int sections[GGML_MROPE_SECTIONS], int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow), "use ggml_rope_ext instead"); GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow), "use ggml_rope_ext_inplace instead"); // compute correction dims for YaRN RoPE scaling GGML_API void ggml_rope_yarn_corr_dims( int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]); // rotary position embedding backward, i.e compute dx from dy // a - dy GGML_API struct ggml_tensor * ggml_rope_ext_back( struct ggml_context * ctx, struct ggml_tensor * a, // gradients of ggml_rope result struct ggml_tensor * b, // positions struct ggml_tensor * c, // freq factors int n_dims, int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); GGML_API struct ggml_tensor * ggml_rope_multi_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, int n_dims, int sections[4], int mode, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow); // clamp // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_clamp( struct ggml_context * ctx, struct ggml_tensor * a, float min, float max); // im2col // converts data into a format that effectively results in a convolution when combined with matrix multiplication GGML_API struct ggml_tensor * ggml_im2col( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride dimension 0 int s1, // stride dimension 1 int p0, // padding dimension 0 int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1, // dilation dimension 1 bool is_2D, enum ggml_type dst_type); GGML_API struct ggml_tensor * ggml_im2col_back( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // gradient of im2col output int64_t * ne, // shape of im2col input int s0, // stride dimension 0 int s1, // stride dimension 1 int p0, // padding dimension 0 int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1, // dilation dimension 1 bool is_2D); GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride int p0, // padding int d0); // dilation // conv_1d with padding = half // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d) GGML_API struct ggml_tensor* ggml_conv_1d_ph( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s, // stride int d); // dilation // depthwise // TODO: this is very likely wrong for some cases! - needs more testing GGML_API struct ggml_tensor * ggml_conv_1d_dw( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride int p0, // padding int d0); // dilation GGML_API struct ggml_tensor * ggml_conv_1d_dw_ph( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride int d0); // dilation GGML_API struct ggml_tensor * ggml_conv_transpose_1d( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride int p0, // padding int d0); // dilation GGML_API struct ggml_tensor * ggml_conv_2d( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride dimension 0 int s1, // stride dimension 1 int p0, // padding dimension 0 int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1); // dilation dimension 1 GGML_API struct ggml_tensor * ggml_im2col_3d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int64_t IC, int s0, // stride width int s1, // stride height int s2, // stride depth int p0, // padding width int p1, // padding height int p2, // padding depth int d0, // dilation width int d1, // dilation height int d2, // dilation depth enum ggml_type dst_type); // a: [OC*IC, KD, KH, KW] // b: [N*IC, ID, IH, IW] // result: [N*OC, OD, OH, OW] GGML_API struct ggml_tensor * ggml_conv_3d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int64_t IC, int s0, // stride width int s1, // stride height int s2, // stride depth int p0, // padding width int p1, // padding height int p2, // padding depth int d0, // dilation width int d1, // dilation height int d2 // dilation depth ); // kernel size is a->ne[0] x a->ne[1] // stride is equal to kernel size // padding is zero // example: // a: 16 16 3 768 // b: 1024 1024 3 1 // res: 64 64 768 1 // used in sam GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // kernel size is a->ne[0] x a->ne[1] // stride is 1 // padding is half // example: // a: 3 3 256 256 // b: 64 64 256 1 // res: 64 64 256 1 // used in sam GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b); // depthwise (via im2col and mul_mat) GGML_API struct ggml_tensor * ggml_conv_2d_dw( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel struct ggml_tensor * b, // data int s0, // stride dimension 0 int s1, // stride dimension 1 int p0, // padding dimension 0 int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1); // dilation dimension 1 // Depthwise 2D convolution // may be faster than ggml_conv_2d_dw, but not available in all backends // a: KW KH 1 C convolution kernel // b: W H C N input data // res: W_out H_out C N GGML_API struct ggml_tensor * ggml_conv_2d_dw_direct( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int stride0, int stride1, int pad0, int pad1, int dilation0, int dilation1); GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int stride); GGML_API struct ggml_tensor * ggml_conv_2d_direct( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] struct ggml_tensor * b, // input data [W, H, C, N] int s0, // stride dimension 0 int s1, // stride dimension 1 int p0, // padding dimension 0 int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1); // dilation dimension 1 GGML_API struct ggml_tensor * ggml_conv_3d_direct( struct ggml_context * ctx, struct ggml_tensor * a, // kernel [KW, KH, KD, IC * OC] struct ggml_tensor * b, // input [W, H, D, C * N] int s0, // stride int s1, int s2, int p0, // padding int p1, int p2, int d0, // dilation int d1, int d2, int n_channels, int n_batch, int n_channels_out); enum ggml_op_pool { GGML_OP_POOL_MAX, GGML_OP_POOL_AVG, GGML_OP_POOL_COUNT, }; GGML_API struct ggml_tensor * ggml_pool_1d( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, // kernel size int s0, // stride int p0); // padding // the result will have 2*p0 padding for the first dimension // and 2*p1 padding for the second dimension GGML_API struct ggml_tensor * ggml_pool_2d( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_op_pool op, int k0, int k1, int s0, int s1, float p0, float p1); GGML_API struct ggml_tensor * ggml_pool_2d_back( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * af, // "a"/input used in forward pass enum ggml_op_pool op, int k0, int k1, int s0, int s1, float p0, float p1); enum ggml_scale_mode { GGML_SCALE_MODE_NEAREST = 0, GGML_SCALE_MODE_BILINEAR = 1, GGML_SCALE_MODE_BICUBIC = 2, GGML_SCALE_MODE_COUNT }; enum ggml_scale_flag { GGML_SCALE_FLAG_ALIGN_CORNERS = (1 << 8), GGML_SCALE_FLAG_ANTIALIAS = (1 << 9), }; // interpolate // multiplies ne0 and ne1 by scale factor GGML_API struct ggml_tensor * ggml_upscale( struct ggml_context * ctx, struct ggml_tensor * a, int scale_factor, enum ggml_scale_mode mode); // interpolate // interpolate scale to specified dimensions GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_upscale_ext( struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2, int ne3, enum ggml_scale_mode mode), "use ggml_interpolate instead"); // Up- or downsamples the input to the specified size. // 2D scale modes (eg. bilinear) are applied to the first two dimensions. GGML_API struct ggml_tensor * ggml_interpolate( struct ggml_context * ctx, struct ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, uint32_t mode); // ggml_scale_mode [ | ggml_scale_flag...] // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0] GGML_API struct ggml_tensor * ggml_pad( struct ggml_context * ctx, struct ggml_tensor * a, int p0, int p1, int p2, int p3); // pad each dimension with values on the other side of the torus (looping around) GGML_API struct ggml_tensor * ggml_pad_circular( struct ggml_context * ctx, struct ggml_tensor * a, int p0, int p1, int p2, int p3); GGML_API struct ggml_tensor * ggml_pad_ext( struct ggml_context * ctx, struct ggml_tensor * a, int lp0, int rp0, int lp1, int rp1, int lp2, int rp2, int lp3, int rp3 ); // pad each dimension with values on the other side of the torus (looping around) GGML_API struct ggml_tensor * ggml_pad_ext_circular( struct ggml_context * ctx, struct ggml_tensor * a, int lp0, int rp0, int lp1, int rp1, int lp2, int rp2, int lp3, int rp3); // pad each dimension with reflection: [a, b, c, d] -> [b, a, b, c, d, c] GGML_API struct ggml_tensor * ggml_pad_reflect_1d( struct ggml_context * ctx, struct ggml_tensor * a, int p0, int p1); // Move tensor elements by an offset given for each dimension. Elements that // are shifted beyond the last position are wrapped around to the beginning. GGML_API struct ggml_tensor * ggml_roll( struct ggml_context * ctx, struct ggml_tensor * a, int shift0, int shift1, int shift2, int shift3); // Convert matrix into a triangular one (upper, strict upper, lower or strict lower) by writing // zeroes everywhere outside the masked area GGML_API struct ggml_tensor * ggml_tri( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_tri_type type); // Fill tensor a with constant c GGML_API struct ggml_tensor * ggml_fill( struct ggml_context * ctx, struct ggml_tensor * a, float c); GGML_API struct ggml_tensor * ggml_fill_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float c); // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151 // timesteps: [N,] // return: [N, dim] GGML_API struct ggml_tensor * ggml_timestep_embedding( struct ggml_context * ctx, struct ggml_tensor * timesteps, int dim, int max_period); // sort rows enum ggml_sort_order { GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC, }; GGML_API struct ggml_tensor * ggml_argsort( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_sort_order order); // similar to ggml_top_k but implemented as `argsort` + `view` GGML_API struct ggml_tensor * ggml_argsort_top_k( struct ggml_context * ctx, struct ggml_tensor * a, int k); // top k elements per row // note: the resulting top k indices are in no particular order GGML_API struct ggml_tensor * ggml_top_k( struct ggml_context * ctx, struct ggml_tensor * a, int k); GGML_API struct ggml_tensor * ggml_arange( struct ggml_context * ctx, float start, float stop, float step); // q: [n_embd_k, n_batch, n_head, ne3 ] // k: [n_embd_k, n_kv, n_head_kv, ne3 ] // v: [n_embd_v, n_kv, n_head_kv, ne3 ] !! not transposed !! // mask: [n_kv, n_batch, ne32, ne33] // res: [n_embd_v, n_head, n_batch, ne3 ] !! permuted !! // // broadcast: // n_head % n_head_kv == 0 // n_head % ne32 == 0 // ne3 % ne33 == 0 // GGML_API struct ggml_tensor * ggml_flash_attn_ext( struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * mask, float scale, float max_bias, float logit_softcap); GGML_API void ggml_flash_attn_ext_set_prec( struct ggml_tensor * a, enum ggml_prec prec); GGML_API enum ggml_prec ggml_flash_attn_ext_get_prec( const struct ggml_tensor * a); GGML_API void ggml_flash_attn_ext_add_sinks( struct ggml_tensor * a, struct ggml_tensor * sinks); // TODO: needs to be adapted to ggml_flash_attn_ext GGML_API struct ggml_tensor * ggml_flash_attn_back( struct ggml_context * ctx, struct ggml_tensor * q, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * d, bool masked); GGML_API struct ggml_tensor * ggml_ssm_conv( struct ggml_context * ctx, struct ggml_tensor * sx, struct ggml_tensor * c); GGML_API struct ggml_tensor * ggml_ssm_scan( struct ggml_context * ctx, struct ggml_tensor * s, struct ggml_tensor * x, struct ggml_tensor * dt, struct ggml_tensor * A, struct ggml_tensor * B, struct ggml_tensor * C, struct ggml_tensor * ids); // partition into non-overlapping windows with padding if needed // example: // a: 768 64 64 1 // w: 14 // res: 768 14 14 25 // used in sam GGML_API struct ggml_tensor * ggml_win_part( struct ggml_context * ctx, struct ggml_tensor * a, int w); // reverse of ggml_win_part // used in sam GGML_API struct ggml_tensor * ggml_win_unpart( struct ggml_context * ctx, struct ggml_tensor * a, int w0, int h0, int w); GGML_API struct ggml_tensor * ggml_unary( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op); GGML_API struct ggml_tensor * ggml_unary_inplace( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_unary_op op); // used in sam GGML_API struct ggml_tensor * ggml_get_rel_pos( struct ggml_context * ctx, struct ggml_tensor * a, int qh, int kh); // used in sam GGML_API struct ggml_tensor * ggml_add_rel_pos( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * pw, struct ggml_tensor * ph); GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * pw, struct ggml_tensor * ph); GGML_API struct ggml_tensor * ggml_rwkv_wkv6( struct ggml_context * ctx, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * r, struct ggml_tensor * tf, struct ggml_tensor * td, struct ggml_tensor * state); GGML_API struct ggml_tensor * ggml_gated_linear_attn( struct ggml_context * ctx, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * q, struct ggml_tensor * g, struct ggml_tensor * state, float scale); GGML_API struct ggml_tensor * ggml_rwkv_wkv7( struct ggml_context * ctx, struct ggml_tensor * r, struct ggml_tensor * w, struct ggml_tensor * k, struct ggml_tensor * v, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * state); /* Solves a specific equation of the form Ax=B, where A is a triangular matrix * without zeroes on the diagonal (i.e. invertible). * B can have any number of columns, but must have the same number of rows as A * If A is [n, n] and B is [n, m], then the result will be [n, m] as well * Has O(n^3) complexity (unlike most matrix ops out there), so use on cases * where n > 100 sparingly, pre-chunk if necessary. * * If left = false, solves xA=B instead * If lower = false, assumes upper triangular instead * If uni = true, assumes diagonal of A to be all ones (will override actual values) * * TODO: currently only lower, right, non-unitriangular variant is implemented */ GGML_API struct ggml_tensor * ggml_solve_tri( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, bool left, bool lower, bool uni); // custom operators typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata); typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata); typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata); #define GGML_N_TASKS_MAX (-1) // n_tasks == GGML_N_TASKS_MAX means to use max number of tasks GGML_API struct ggml_tensor * ggml_map_custom1( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_map_custom1_inplace( struct ggml_context * ctx, struct ggml_tensor * a, ggml_custom1_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_map_custom2( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_map_custom2_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, ggml_custom2_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_map_custom3( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_map_custom3_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_tensor * c, ggml_custom3_op_t fun, int n_tasks, void * userdata); typedef void (*ggml_custom_op_t)(struct ggml_tensor * dst , int ith, int nth, void * userdata); GGML_API struct ggml_tensor * ggml_custom_4d( struct ggml_context * ctx, enum ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, struct ggml_tensor ** args, int n_args, ggml_custom_op_t fun, int n_tasks, void * userdata); GGML_API struct ggml_tensor * ggml_custom_inplace( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor ** args, int n_args, ggml_custom_op_t fun, int n_tasks, void * userdata); // loss function GGML_API struct ggml_tensor * ggml_cross_entropy_loss( struct ggml_context * ctx, struct ggml_tensor * a, // logits struct ggml_tensor * b); // labels GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back( struct ggml_context * ctx, struct ggml_tensor * a, // logits struct ggml_tensor * b, // labels struct ggml_tensor * c); // gradients of cross_entropy_loss result // AdamW optimizer step // Paper: https://arxiv.org/pdf/1711.05101v3.pdf // PyTorch: https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html GGML_API struct ggml_tensor * ggml_opt_step_adamw( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * grad, struct ggml_tensor * m, struct ggml_tensor * v, struct ggml_tensor * adamw_params); // parameters such as the learning rate // stochastic gradient descent step (with weight decay) GGML_API struct ggml_tensor * ggml_opt_step_sgd( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * grad, struct ggml_tensor * sgd_params); // alpha, weight decay // // automatic differentiation // GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API void ggml_build_backward_expand( struct ggml_context * ctx, // context for gradient computation struct ggml_cgraph * cgraph, struct ggml_tensor ** grad_accs); // graph allocation in a context GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false GGML_API struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads); GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph, bool force_grads); GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst); GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // set regular grads + optimizer momenta to 0, set loss grad to 1 GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph); GGML_API int ggml_graph_size (struct ggml_cgraph * cgraph); GGML_API struct ggml_tensor * ggml_graph_node (struct ggml_cgraph * cgraph, int i); // if i < 0, returns nodes[n_nodes + i] GGML_API struct ggml_tensor ** ggml_graph_nodes (struct ggml_cgraph * cgraph); GGML_API int ggml_graph_n_nodes(struct ggml_cgraph * cgraph); GGML_API void ggml_graph_add_node(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API size_t ggml_graph_overhead(void); GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads); GGML_API struct ggml_tensor * ggml_graph_get_tensor (const struct ggml_cgraph * cgraph, const char * name); GGML_API struct ggml_tensor * ggml_graph_get_grad (const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); GGML_API struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); // print info and performance information for the graph GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); // dump the graph into a file using the dot format GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); // TODO these functions were sandwiched in the old optimization interface, is there a better place for them? typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); // Set callback for all future logging events. // If this is not called, or NULL is supplied, everything is output on stderr. GGML_API void ggml_log_get(ggml_log_callback * log_callback, void ** user_data); GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); // // quantization // // - ggml_quantize_init can be called multiple times with the same type // it will only initialize the quantization tables for the first call or after ggml_quantize_free // automatically called by ggml_quantize_chunk for convenience // // - ggml_quantize_free will free any memory allocated by ggml_quantize_init // call this at the end of the program to avoid memory leaks // // note: these are thread-safe // GGML_API void ggml_quantize_init(enum ggml_type type); GGML_API void ggml_quantize_free(void); // some quantization type cannot be used without an importance matrix GGML_API bool ggml_quantize_requires_imatrix(enum ggml_type type); // calls ggml_quantize_init internally (i.e. can allocate memory) GGML_API size_t ggml_quantize_chunk( enum ggml_type type, const float * src, void * dst, int64_t start, int64_t nrows, int64_t n_per_row, const float * imatrix); #ifdef __cplusplus // restrict not standard in C++ # if defined(__GNUC__) # define GGML_RESTRICT __restrict__ # elif defined(__clang__) # define GGML_RESTRICT __restrict # elif defined(_MSC_VER) # define GGML_RESTRICT __restrict # else # define GGML_RESTRICT # endif #else # if defined (_MSC_VER) && (__STDC_VERSION__ < 201112L) # define GGML_RESTRICT __restrict # else # define GGML_RESTRICT restrict # endif #endif typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); struct ggml_type_traits { const char * type_name; int64_t blck_size; int64_t blck_size_interleave; // interleave elements in blocks size_t type_size; bool is_quantized; ggml_to_float_t to_float; ggml_from_float_t from_float_ref; }; GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type); // ggml threadpool // TODO: currently, only a few functions are in the base ggml API, while the rest are in the CPU backend // the goal should be to create an API that other backends can use move everything to the ggml base // scheduling priorities enum ggml_sched_priority { GGML_SCHED_PRIO_LOW = -1, GGML_SCHED_PRIO_NORMAL, GGML_SCHED_PRIO_MEDIUM, GGML_SCHED_PRIO_HIGH, GGML_SCHED_PRIO_REALTIME }; // threadpool params // Use ggml_threadpool_params_default() or ggml_threadpool_params_init() to populate the defaults struct ggml_threadpool_params { bool cpumask[GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings) int n_threads; // number of threads enum ggml_sched_priority prio; // thread priority uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling) bool strict_cpu; // strict cpu placement bool paused; // start in paused state }; struct ggml_threadpool; // forward declaration, see ggml.c typedef struct ggml_threadpool * ggml_threadpool_t; GGML_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads); GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads); GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/include/gguf.h000066400000000000000000000237601512524704700171200ustar00rootroot00000000000000// This file contains functionality related to "GGUF" files, the binary file format used by ggml. // GGUF files have the following structure: // // 1. File magic "GGUF" (4 bytes). // 2. File version (uint32_t). // 3. Number of ggml tensors in file (int64_t). // 4. Number of key-value-pairs in file (int64_t). // 5. For each KV pair: // 1. The key (string). // 2. The value type (gguf_type). // 3a. If the value type is GGUF_TYPE_ARRAY: // 1. The type of the array (gguf_type). // 2. The number of elements in the array (uint64_t). // 3. The binary representation of each element in the array. // 3b. Otherwise: // 1. The binary representation of the value. // 6. For each ggml tensor: // 1. The tensor name (string). // 2. The number of dimensions of the tensor (uint32_t). // 3. For each dimension: // 1. The size of the tensor in the dimension (int64_t). // 4. The tensor data type (ggml_type). // 5. The tensor data offset in the tensor data binary blob (uint64_t). // 7. The tensor data binary blob (optional, aligned). // // Strings are serialized as the string length (uint64_t) followed by the C string without the null terminator. // All enums are stored as int32_t. // All bool values are stored as int8_t. // If the special key "general.alignment" (uint32_t) is defined it is used for alignment, // otherwise GGUF_DEFAULT_ALIGNMENT is used. // // Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de) #pragma once #include "ggml.h" #include #include #define GGUF_MAGIC "GGUF" #define GGUF_VERSION 3 #define GGUF_KEY_GENERAL_ALIGNMENT "general.alignment" #define GGUF_DEFAULT_ALIGNMENT 32 #ifdef __cplusplus extern "C" { #endif // types that can be stored as GGUF KV data enum gguf_type { GGUF_TYPE_UINT8 = 0, GGUF_TYPE_INT8 = 1, GGUF_TYPE_UINT16 = 2, GGUF_TYPE_INT16 = 3, GGUF_TYPE_UINT32 = 4, GGUF_TYPE_INT32 = 5, GGUF_TYPE_FLOAT32 = 6, GGUF_TYPE_BOOL = 7, GGUF_TYPE_STRING = 8, GGUF_TYPE_ARRAY = 9, GGUF_TYPE_UINT64 = 10, GGUF_TYPE_INT64 = 11, GGUF_TYPE_FLOAT64 = 12, GGUF_TYPE_COUNT, // marks the end of the enum }; struct gguf_context; struct gguf_init_params { bool no_alloc; // if not NULL, create a ggml_context and allocate the tensor data in it struct ggml_context ** ctx; }; GGML_API struct gguf_context * gguf_init_empty(void); GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); //GGML_API struct gguf_context * gguf_init_from_buffer(..); GGML_API void gguf_free(struct gguf_context * ctx); GGML_API const char * gguf_type_name(enum gguf_type type); GGML_API uint32_t gguf_get_version (const struct gguf_context * ctx); GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx); GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx); GGML_API int64_t gguf_get_n_kv(const struct gguf_context * ctx); GGML_API int64_t gguf_find_key(const struct gguf_context * ctx, const char * key); // returns -1 if key is not found GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int64_t key_id); GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int64_t key_id); GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id); // will abort if the wrong type is used for the key GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int64_t key_id); GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int64_t key_id); GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int64_t key_id); GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int64_t key_id); GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int64_t key_id); GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int64_t key_id); GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int64_t key_id); GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int64_t key_id); GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int64_t key_id); GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int64_t key_id); GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int64_t key_id); GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int64_t key_id); GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id); GGML_API size_t gguf_get_arr_n (const struct gguf_context * ctx, int64_t key_id); // get raw pointer to the first element of the array with the given key_id // for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference) GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id); // get ith C string from array with given key_id GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i); GGML_API int64_t gguf_get_n_tensors (const struct gguf_context * ctx); GGML_API int64_t gguf_find_tensor (const struct gguf_context * ctx, const char * name); // returns -1 if the tensor is not found GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int64_t tensor_id); GGML_API const char * gguf_get_tensor_name (const struct gguf_context * ctx, int64_t tensor_id); GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int64_t tensor_id); GGML_API size_t gguf_get_tensor_size (const struct gguf_context * ctx, int64_t tensor_id); // removes key if it exists, returns id that the key had prior to removal (-1 if it didn't exist) GGML_API int64_t gguf_remove_key(struct gguf_context * ctx, const char * key); // overrides an existing KV pair or adds a new one, the new KV pair is always at the back GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val); GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val); GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val); GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); // creates a new array with n elements of the given type and copies the corresponding number of bytes from data GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, size_t n); // creates a new array with n strings and copies the corresponding strings from data GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, size_t n); // set or add KV pairs from another context GGML_API void gguf_set_kv(struct gguf_context * ctx, const struct gguf_context * src); // add tensor to GGUF context, tensor name must be unique GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); // after changing a tensor's type, the offsets of all tensors with higher indices are immediately recalculated // in such a way that the tensor data remains as one contiguous block (except for padding) GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); // assumes that at least gguf_get_tensor_size bytes can be read from data GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data); // writing gguf files can be done in 3 ways: // // - write the entire gguf_context to a binary file in a single pass: // // gguf_write_to_file(ctx, fname, /*only_meta =*/ false); // // - write only the meta data to a file, then re-open the file and append the tensor data: // // gguf_write_to_file(ctx, fname, /*only_meta =*/ true); // FILE * f = fopen(fname, "ab"); // fwrite(f, ...); // write tensor data // fclose(f); // // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data: // // FILE * f = fopen(fname, "wb"); // const size_t size_meta = gguf_get_meta_size(ctx); // fseek(f, size_meta, SEEK_SET); // fwrite(f, ...); // write tensor data // void * data = malloc(size_meta); // gguf_get_meta_data(ctx, data); // rewind(f); // fwrite(data, 1, data, f); // free(data); // fclose(f); // // write the entire context to a binary file GGML_API bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta); // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx); // writes the meta data to pointer "data" GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/requirements.txt000066400000000000000000000003271512524704700176520ustar00rootroot00000000000000accelerate==0.19.0 numpy>=2.0.2 sentencepiece~=0.1.98 torchvision>=0.15.2 transformers>=4.35.2,<5.0.0 gguf>=0.1.0 keras==3.5.0 tensorflow==2.18.0 --extra-index-url https://download.pytorch.org/whl/cpu torch~=2.5.1 ggml-org-ggml-3678254/scripts/000077500000000000000000000000001512524704700160535ustar00rootroot00000000000000ggml-org-ggml-3678254/scripts/gen-authors.sh000077500000000000000000000005311512524704700206450ustar00rootroot00000000000000#!/usr/bin/env bash printf "# date: $(date)\n" > AUTHORS printf "# this file is auto-generated by scripts/gen-authors.sh\n\n" >> AUTHORS git log --format='%an <%ae>' --reverse --date=short master | awk '!seen[$0]++' | sort >> AUTHORS # if necessary, update your name here. for example: jdoe -> John Doe sed -i '' 's/^jdoe/John Doe/g' AUTHORS ggml-org-ggml-3678254/scripts/release.sh000077500000000000000000000223161512524704700200360ustar00rootroot00000000000000#!/bin/bash # # Automated release script for ggml. # # Note: Sync from llama.cpp should be done separately via PR process # prior to running this script. # # Usage: # ./scripts/release.sh prepare [major|minor|patch] [--dry-run] # ./scripts/release.sh finalize [--dry-run] # # Two-stage release process: # # Stage 1 - Prepare: # $ ./scripts/release.sh prepare minor # This creates a release candidate branch with version bump and removes -dev suffix. # The branch should then be manually pushed and a PR created, reviewed, and merged. # # Stage 2 - Finalize: # $ ./scripts/release.sh finalize # After the RC PR is merged, this reads the current version from CMakeLists.txt, # creates the release tag, and prepares the next development cycle. # # Prepare stage: # 1. Creates release candidate branch # 2. Updates version and removes -dev suffix # 3. Commits the version bump # # Finalize stage: # 1. Reads current release version from CMakeLists.txt # 2. Creates signed git tag on master # 3. Adds -dev suffix back for next development cycle # 4. Creates branch and commit for development version # set -e if [ ! -f "CMakeLists.txt" ] || [ ! -d "scripts" ]; then echo "Error: Must be run from ggml root directory" exit 1 fi # Parse command line arguments COMMAND="" VERSION_TYPE="" DRY_RUN=false # First argument should be the command if [ $# -eq 0 ]; then echo "Error: Missing command" echo "Usage: $0 prepare [major|minor|patch] [--dry-run]" echo " $0 finalize [--dry-run]" exit 1 fi COMMAND="$1" shift # Parse remaining arguments for arg in "$@"; do case $arg in --dry-run) DRY_RUN=true ;; major|minor|patch) if [ "$COMMAND" = "prepare" ]; then VERSION_TYPE="$arg" else echo "Error: Version type only valid for 'prepare' command" exit 1 fi ;; *) echo "Error: Unknown argument '$arg'" echo "Usage: $0 prepare [major|minor|patch] [--dry-run]" echo " $0 finalize [--dry-run]" exit 1 ;; esac done # Validate command if [[ ! "$COMMAND" =~ ^(prepare|finalize)$ ]]; then echo "Error: Command must be 'prepare' or 'finalize'" echo "Usage: $0 prepare [major|minor|patch] [--dry-run]" echo " $0 finalize [--dry-run]" exit 1 fi # For prepare command, default to patch if no version type specified if [ "$COMMAND" = "prepare" ]; then VERSION_TYPE="${VERSION_TYPE:-patch}" if [[ ! "$VERSION_TYPE" =~ ^(major|minor|patch)$ ]]; then echo "Error: Version type must be 'major', 'minor', or 'patch'" echo "Usage: $0 prepare [major|minor|patch] [--dry-run]" exit 1 fi fi # Common validation functions check_git_status() { # Check for uncommitted changes (skip in dry-run) if [ "$DRY_RUN" = false ] && ! git diff-index --quiet HEAD --; then echo "Error: You have uncommitted changes. Please commit or stash them first." exit 1 fi } check_master_branch() { # Ensure we're on master branch CURRENT_BRANCH=$(git branch --show-current) if [ "$CURRENT_BRANCH" != "master" ]; then if [ "$DRY_RUN" = true ]; then echo "[dry run] Warning: Not on master branch (currently on: $CURRENT_BRANCH). Continuing with dry-run..." echo "" else echo "Error: Must be on master branch. Currently on: $CURRENT_BRANCH" exit 1 fi fi } check_master_up_to_date() { # Check if we have the latest from master (skip in dry-run) if [ "$DRY_RUN" = false ]; then echo "Checking if local master is up-to-date with remote..." git fetch origin master LOCAL=$(git rev-parse HEAD) REMOTE=$(git rev-parse origin/master) if [ "$LOCAL" != "$REMOTE" ]; then echo "Error: Your local master branch is not up-to-date with origin/master." echo "Please run 'git pull origin master' first." exit 1 fi echo "✓ Local master is up-to-date with remote" echo "" elif [ "$(git branch --show-current)" = "master" ]; then echo "[dry run] Warning: Dry-run mode - not checking if master is up-to-date with remote" echo "" fi } prepare_release() { if [ "$DRY_RUN" = true ]; then echo "[dry-run] Preparing release (no changes will be made)" else echo "Starting release preparation..." fi echo "" check_git_status check_master_branch check_master_up_to_date # Extract current version from CMakeLists.txt echo "Step 1: Reading current version..." MAJOR=$(grep "set(GGML_VERSION_MAJOR" CMakeLists.txt | sed 's/.*MAJOR \([0-9]*\).*/\1/') MINOR=$(grep "set(GGML_VERSION_MINOR" CMakeLists.txt | sed 's/.*MINOR \([0-9]*\).*/\1/') PATCH=$(grep "set(GGML_VERSION_PATCH" CMakeLists.txt | sed 's/.*PATCH \([0-9]*\).*/\1/') echo "Current version: $MAJOR.$MINOR.$PATCH" # Calculate new version case $VERSION_TYPE in major) NEW_MAJOR=$((MAJOR + 1)) NEW_MINOR=0 NEW_PATCH=0 ;; minor) NEW_MAJOR=$MAJOR NEW_MINOR=$((MINOR + 1)) NEW_PATCH=0 ;; patch) NEW_MAJOR=$MAJOR NEW_MINOR=$MINOR NEW_PATCH=$((PATCH + 1)) ;; esac NEW_VERSION="$NEW_MAJOR.$NEW_MINOR.$NEW_PATCH" RC_BRANCH="ggml-rc-v$NEW_VERSION" echo "New release version: $NEW_VERSION" echo "Release candidate branch: $RC_BRANCH" echo "" # Create release candidate branch echo "Step 2: Creating release candidate branch..." if [ "$DRY_RUN" = true ]; then echo " [dry-run] Would create branch: $RC_BRANCH" else git checkout -b "$RC_BRANCH" echo "✓ Created and switched to branch: $RC_BRANCH" fi echo "" # Update CMakeLists.txt for release echo "Step 3: Updating version in CMakeLists.txt..." if [ "$DRY_RUN" = true ]; then echo " [dry-run] Would update GGML_VERSION_MAJOR to $NEW_MAJOR" echo " [dry-run] Would update GGML_VERSION_MINOR to $NEW_MINOR" echo " [dry-run] Would update GGML_VERSION_PATCH to $NEW_PATCH" else sed -i'' -e "s/set(GGML_VERSION_MAJOR [0-9]*)/set(GGML_VERSION_MAJOR $NEW_MAJOR)/" CMakeLists.txt sed -i'' -e "s/set(GGML_VERSION_MINOR [0-9]*)/set(GGML_VERSION_MINOR $NEW_MINOR)/" CMakeLists.txt sed -i'' -e "s/set(GGML_VERSION_PATCH [0-9]*)/set(GGML_VERSION_PATCH $NEW_PATCH)/" CMakeLists.txt fi echo "" # Commit version bump echo "Step 4: Committing version bump..." if [ "$DRY_RUN" = true ]; then echo " [dry-run] Would commit: 'ggml : bump version to $NEW_VERSION'" else git add CMakeLists.txt git commit -m "ggml : bump version to $NEW_VERSION" fi echo "" echo "" if [ "$DRY_RUN" = true ]; then echo "[dry-run] Summary (no changes were made):" echo " • Would have created branch: $RC_BRANCH" echo " • Would have updated version to: $NEW_VERSION" else echo "Release preparation completed!" echo "Summary:" echo " • Created branch: $RC_BRANCH" echo " • Updated version to: $NEW_VERSION" echo "" echo "Next steps:" echo " • Push branch to remote: git push origin $RC_BRANCH" echo " • Create a Pull Request from $RC_BRANCH to master" echo " • After PR is merged, run: ./scripts/release.sh finalize" fi } finalize_release() { if [ "$DRY_RUN" = true ]; then echo "[dry-run] Finalizing release (no changes will be made)" else echo "Starting release finalization..." fi echo "" check_git_status check_master_branch check_master_up_to_date # Read current version from CMakeLists.txt echo "Step 1: Reading current release version..." MAJOR=$(grep "set(GGML_VERSION_MAJOR" CMakeLists.txt | sed 's/.*MAJOR \([0-9]*\).*/\1/') MINOR=$(grep "set(GGML_VERSION_MINOR" CMakeLists.txt | sed 's/.*MINOR \([0-9]*\).*/\1/') PATCH=$(grep "set(GGML_VERSION_PATCH" CMakeLists.txt | sed 's/.*PATCH \([0-9]*\).*/\1/') RELEASE_VERSION="$MAJOR.$MINOR.$PATCH" echo "Release version: $RELEASE_VERSION" echo "" # Create git tag echo "Step 2: Creating signed git tag..." if [ "$DRY_RUN" = true ]; then echo " [dry-run] Would create signed tag: v$RELEASE_VERSION with message 'Release version $RELEASE_VERSION'" else git tag -s "v$RELEASE_VERSION" -m "Release version $RELEASE_VERSION" echo "✓ Created signed tag: v$RELEASE_VERSION" fi echo "" echo "" if [ "$DRY_RUN" = true ]; then echo "[dry-run] Summary (no changes were made):" echo " • Would have created tag: v$RELEASE_VERSION" else echo "Release finalization completed!" echo "Summary:" echo " • Created signed tag: v$RELEASE_VERSION" echo "" echo "Next steps:" echo " • Push tag to remote: git push origin v$RELEASE_VERSION" echo " • The release is now complete!" fi } # Execute the appropriate command case $COMMAND in prepare) prepare_release ;; finalize) finalize_release ;; esac ggml-org-ggml-3678254/scripts/sync-llama-am.sh000077500000000000000000000122501512524704700210450ustar00rootroot00000000000000#!/bin/bash # # Synchronize llama.cpp changes to ggml # # Usage: # # $ cd /path/to/ggml # $ ./scripts/sync-llama-am.sh -skip hash0,hash1,hash2... -C 3 # set -e sd=$(dirname $0) cd $sd/../ SRC_GGML=$(pwd) SRC_LLAMA=$(cd ../llama.cpp; pwd) if [ ! -d $SRC_LLAMA ]; then echo "llama.cpp not found at $SRC_LLAMA" exit 1 fi lc=$(cat $SRC_GGML/scripts/sync-llama.last) echo "Syncing llama.cpp changes since commit $lc" to_skip="" # context for git patches in number of lines ctx="8" while [ "$1" != "" ]; do case $1 in -skip ) shift to_skip=$1 ;; -C ) shift ctx=$1 ;; esac shift done cd $SRC_LLAMA git log --oneline $lc..HEAD git log --oneline $lc..HEAD --reverse | grep -v "(ggml/[0-9]*)" | grep -v "(whisper/[0-9]*)" | cut -d' ' -f1 > $SRC_GGML/llama-commits if [ ! -s $SRC_GGML/llama-commits ]; then rm -v $SRC_GGML/llama-commits echo "No new commits" exit 0 fi if [ -f $SRC_GGML/llama-src.patch ]; then rm -v $SRC_GGML/llama-src.patch fi while read c; do if [ -n "$to_skip" ]; then if [[ $to_skip == *"$c"* ]]; then echo "Skipping $c" continue fi fi git format-patch -U${ctx} -k $c~1..$c --stdout -- \ ggml/CMakeLists.txt \ ggml/src/CMakeLists.txt \ ggml/cmake/BuildTypes.cmake \ ggml/cmake/GitVars.cmake \ ggml/cmake/common.cmake \ ggml/cmake/ggml-config.cmake.in \ ggml/src/ggml-cpu/cmake/FindSIMD.cmake \ ggml/src/ggml* \ ggml/include/ggml*.h \ ggml/include/gguf*.h \ tests/test-opt.cpp \ tests/test-quantize-fns.cpp \ tests/test-quantize-perf.cpp \ tests/test-backend-ops.cpp \ LICENSE \ scripts/gen-authors.sh \ >> $SRC_GGML/llama-src.patch done < $SRC_GGML/llama-commits rm -v $SRC_GGML/llama-commits # delete files if empty if [ ! -s $SRC_GGML/llama-src.patch ]; then rm -v $SRC_GGML/llama-src.patch fi cd $SRC_GGML if [ -f $SRC_GGML/llama-src.patch ]; then # replace PR numbers # # Subject: some text (#1234) # Subject: some text (llama/1234) cat llama-src.patch | sed -e 's/^Subject: \(.*\) (#\([0-9]*\))/Subject: \1 (llama\/\2)/' > llama-src.patch.tmp mv llama-src.patch.tmp llama-src.patch cat llama-src.patch | sed -e 's/^\(.*\) (#\([0-9]*\))$/\1 (llama\/\2)/' > llama-src.patch.tmp mv llama-src.patch.tmp llama-src.patch # replace filenames: # # ggml/CMakelists.txt -> CMakeLists.txt # ggml/src/CMakelists.txt -> src/CMakeLists.txt # # ggml/cmake/BuildTypes.cmake -> cmake/BuildTypes.cmake # ggml/cmake/GitVars.cmake -> cmake/GitVars.cmake # ggml/cmake/common.cmake -> cmake/common.cmake # ggml/cmake/ggml-config.cmake.in -> cmake/ggml-config.cmake.in # ggml/src/ggml-cpu/cmake/FindSIMD.cmake -> src/ggml-cpu/cmake/FindSIMD.cmake # # ggml/src/ggml* -> src/ggml* # # ggml/include/ggml*.h -> include/ggml*.h # ggml/include/gguf*.h -> include/gguf*.h # # tests/test-opt.cpp -> tests/test-opt.cpp # tests/test-quantize-fns.cpp -> tests/test-quantize-fns.cpp # tests/test-quantize-perf.cpp -> tests/test-quantize-perf.cpp # tests/test-backend-ops.cpp -> tests/test-backend-ops.cpp # # LICENSE -> LICENSE # scripts/gen-authors.sh -> scripts/gen-authors.sh cat llama-src.patch | sed -E \ -e 's/(^[[:space:]]| [ab]\/)ggml\/CMakeLists\.txt/\1CMakeLists.txt/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/src\/CMakeLists\.txt/\1src\/CMakeLists.txt/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/cmake\/BuildTypes\.cmake/\1cmake\/BuildTypes\.cmake/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/cmake\/GitVars\.cmake/\1cmake\/GitVars\.cmake/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/cmake\/common\.cmake/\1cmake\/common\.cmake/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/cmake\/ggml-config\.cmake\.in/\1cmake\/ggml-config\.cmake\.in/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/src\/ggml-cpu\/cmake\/FindSIMD\.cmake/\1src\/ggml-cpu\/cmake\/FindSIMD\.cmake/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/src\/ggml(.*)/\1src\/ggml\2/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/include\/ggml(.*)\.h/\1include\/ggml\2.h/g' \ -e 's/(^[[:space:]]| [ab]\/)ggml\/include\/gguf(.*)\.h/\1include\/gguf\2.h/g' \ -e 's/(^[[:space:]]| [ab]\/)tests\/test-opt\.cpp/\1tests\/test-opt.cpp/g' \ -e 's/(^[[:space:]]| [ab]\/)tests\/test-quantize-fns\.cpp/\1tests\/test-quantize-fns.cpp/g' \ -e 's/(^[[:space:]]| [ab]\/)tests\/test-quantize-perf\.cpp/\1tests\/test-quantize-perf.cpp/g' \ -e 's/(^[[:space:]]| [ab]\/)tests\/test-backend-ops\.cpp/\1tests\/test-backend-ops.cpp/g' \ -e 's/(^[[:space:]]| [ab]\/)LICENSE/\1LICENSE/g' \ -e 's/(^[[:space:]]| [ab]\/)scripts\/gen-authors\.sh/\1scripts\/gen-authors.sh/g' \ > llama-src.patch.tmp mv llama-src.patch.tmp llama-src.patch git am -C${ctx} llama-src.patch rm -v $SRC_GGML/llama-src.patch fi # update last commit cd $SRC_LLAMA git log -1 --format=%H > $SRC_GGML/scripts/sync-llama.last echo "Done" exit 0 ggml-org-ggml-3678254/scripts/sync-llama.last000066400000000000000000000000511512524704700207740ustar00rootroot000000000000009a6369bb603457f277b597f0ccee1c19cd25c4b2 ggml-org-ggml-3678254/scripts/sync-llama.sh000077500000000000000000000015041512524704700204520ustar00rootroot00000000000000#!/bin/bash cp -rpv ../llama.cpp/ggml/CMakeLists.txt CMakeLists.txt cp -rpv ../llama.cpp/ggml/src/CMakeLists.txt src/CMakeLists.txt cp -rpv ../llama.cpp/ggml/cmake/* cmake/ cp -rpv ../llama.cpp/ggml/src/ggml-cpu/cmake/* src/ggml-cpu/cmake/ cp -rpv ../llama.cpp/ggml/src/ggml* src/ cp -rpv ../llama.cpp/ggml/include/ggml*.h include/ cp -rpv ../llama.cpp/ggml/include/gguf*.h include/ cp -rpv ../llama.cpp/tests/test-opt.cpp tests/test-opt.cpp cp -rpv ../llama.cpp/tests/test-quantize-fns.cpp tests/test-quantize-fns.cpp cp -rpv ../llama.cpp/tests/test-quantize-perf.cpp tests/test-quantize-perf.cpp cp -rpv ../llama.cpp/tests/test-backend-ops.cpp tests/test-backend-ops.cpp cp -rpv ../llama.cpp/LICENSE ./LICENSE cp -rpv ../llama.cpp/scripts/gen-authors.sh ./scripts/gen-authors.sh ggml-org-ggml-3678254/scripts/sync-whisper-am.sh000077500000000000000000000074161512524704700214500ustar00rootroot00000000000000#!/bin/bash # # Synchronize whisper.cpp changes to ggml # # Usage: # # $ cd /path/to/ggml # $ ./scripts/sync-whisper-am.sh -skip hash0,hash1,hash2... # set -e sd=$(dirname $0) cd $sd/../ SRC_GGML=$(pwd) SRC_WHISPER=$(cd ../whisper.cpp; pwd) if [ ! -d $SRC_WHISPER ]; then echo "whisper.cpp not found at $SRC_WHISPER" exit 1 fi lc=$(cat $SRC_GGML/scripts/sync-whisper.last) echo "Syncing whisper.cpp changes since commit $lc" to_skip="" if [ "$1" == "-skip" ]; then to_skip=$2 fi cd $SRC_WHISPER git log --oneline $lc..HEAD git log --oneline $lc..HEAD --reverse | grep -v "(ggml/[0-9]*)" | grep -v "(llama/[0-9]*)" | cut -d' ' -f1 > $SRC_GGML/whisper-commits if [ ! -s $SRC_GGML/whisper-commits ]; then rm -v $SRC_GGML/whisper-commits echo "No new commits" exit 0 fi if [ -f $SRC_GGML/whisper-src.patch ]; then rm -v $SRC_GGML/whisper-src.patch fi while read c; do if [ -n "$to_skip" ]; then if [[ $to_skip == *"$c"* ]]; then echo "Skipping $c" continue fi fi git format-patch -k $c~1..$c --stdout -- \ ggml/CMakeLists.txt \ ggml/src/CMakeLists.txt \ ggml/cmake/FindSIMD.cmake \ ggml/src/ggml* \ ggml/include/ggml*.h \ ggml/include/gguf*.h \ examples/common-ggml.h \ examples/common-ggml.cpp \ LICENSE \ scripts/gen-authors.sh \ >> $SRC_GGML/whisper-src.patch done < $SRC_GGML/whisper-commits rm -v $SRC_GGML/whisper-commits # delete files if empty if [ ! -s $SRC_GGML/whisper-src.patch ]; then rm -v $SRC_GGML/whisper-src.patch fi cd $SRC_GGML if [ -f $SRC_GGML/whisper-src.patch ]; then # replace PR numbers # # Subject: some text (#1234) # Subject: some text (whisper/1234) cat whisper-src.patch | sed -e 's/^Subject: \(.*\) (#\([0-9]*\))/Subject: \1 (whisper\/\2)/' > whisper-src.patch.tmp mv whisper-src.patch.tmp whisper-src.patch cat whisper-src.patch | sed -e 's/^\(.*\) (#\([0-9]*\))$/\1 (whisper\/\2)/' > whisper-src.patch.tmp mv whisper-src.patch.tmp whisper-src.patch # replace filenames: # # ggml/CMakelists.txt -> CMakeLists.txt # ggml/src/CMakelists.txt -> src/CMakeLists.txt # ggml/cmake/FindSIMD.cmake -> cmake/FindSIMD.cmake # # ggml/src/ggml* -> src/ggml* # # ggml/include/ggml*.h -> include/ggml*.h # ggml/include/gguf*.h -> include/gguf*.h # # examples/common.h -> examples/common.h # examples/common.cpp -> examples/common.cpp # examples/common-ggml.h -> examples/common-ggml.h # examples/common-ggml.cpp -> examples/common-ggml.cpp # # LICENSE -> LICENSE # scripts/gen-authors.sh -> scripts/gen-authors.sh cat whisper-src.patch | sed -E \ -e 's/\/ggml\/CMakeLists\.txt/\/CMakeLists.txt/g' \ -e 's/\/ggml\/src\/CMakeLists\.txt/\/src\/CMakeLists.txt/g' \ -e 's/\/ggml\/cmake\/FindSIMD\.cmake/\/cmake\/FindSIMD.cmake/g' \ -e 's/\/ggml\/src\/ggml(.*)/\/src\/ggml\1/g' \ -e 's/\/ggml\/include\/ggml(.*)\.h/\/include\/ggml\1.h/g' \ -e 's/\/ggml\/include\/gguf(.*)\.h/\/include\/gguf\1.h/g' \ -e 's/\/examples\/common\.h/\/examples\/common.h/g' \ -e 's/\/examples\/common\.cpp/\/examples\/common.cpp/g' \ -e 's/\/examples\/common-ggml\.h/\/examples\/common-ggml.h/g' \ -e 's/\/examples\/common-ggml\.cpp/\/examples\/common-ggml.cpp/g' \ -e 's/\/LICENSE/\/LICENSE/g' \ -e 's/\/scripts\/gen-authors\.sh/\/scripts\/gen-authors.sh/g' \ > whisper-src.patch.tmp mv whisper-src.patch.tmp whisper-src.patch git am whisper-src.patch rm -v $SRC_GGML/whisper-src.patch fi # update last commit cd $SRC_WHISPER git log -1 --format=%H > $SRC_GGML/scripts/sync-whisper.last echo "Done" exit 0 ggml-org-ggml-3678254/scripts/sync-whisper.last000066400000000000000000000000511512524704700213670ustar00rootroot000000000000007359ac94d54967a39b6c24f170174a9fe09303da ggml-org-ggml-3678254/scripts/sync-whisper.sh000077500000000000000000000011771512524704700210530ustar00rootroot00000000000000#!/bin/bash cp -rpv ../whisper.cpp/ggml/CMakeLists.txt CMakeLists.txt cp -rpv ../whisper.cpp/ggml/src/CMakeLists.txt src/CMakeLists.txt cp -rpv ../whisper.cpp/ggml/cmake/FindSIMD.cmake cmake/FindSIMD.cmake cp -rpv ../whisper.cpp/ggml/src/ggml* src/ cp -rpv ../whisper.cpp/ggml/include/ggml*.h include/ cp -rpv ../whisper.cpp/ggml/include/gguf*.h include/ cp -rpv ../whisper.cpp/examples/common-ggml.h examples/common-ggml.h cp -rpv ../whisper.cpp/examples/common-ggml.cpp examples/common-ggml.cpp cp -rpv ../whisper.cpp/LICENSE ./LICENSE cp -rpv ../whisper.cpp/scripts/gen-authors.sh ./scripts/gen-authors.sh ggml-org-ggml-3678254/src/000077500000000000000000000000001512524704700151535ustar00rootroot00000000000000ggml-org-ggml-3678254/src/CMakeLists.txt000066400000000000000000000450061512524704700177200ustar00rootroot00000000000000include(CheckCXXCompilerFlag) include("../cmake/common.cmake") add_compile_definitions(GGML_SCHED_MAX_COPIES=${GGML_SCHED_MAX_COPIES}) # enable libstdc++ assertions for debug builds if (CMAKE_SYSTEM_NAME MATCHES "Linux") add_compile_definitions($<$:_GLIBCXX_ASSERTIONS>) endif() if (NOT MSVC) if (GGML_SANITIZE_THREAD) add_compile_options(-fsanitize=thread) link_libraries (-fsanitize=thread) endif() if (GGML_SANITIZE_ADDRESS) add_compile_options(-fsanitize=address -fno-omit-frame-pointer) link_libraries (-fsanitize=address) endif() if (GGML_SANITIZE_UNDEFINED) add_compile_options(-fsanitize=undefined) link_libraries (-fsanitize=undefined) endif() endif() if (GGML_FATAL_WARNINGS) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") list(APPEND C_FLAGS -Werror) list(APPEND CXX_FLAGS -Werror) elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") add_compile_options(/WX) endif() endif() if (GGML_ALL_WARNINGS) if (NOT MSVC) list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration) list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) list(APPEND C_FLAGS ${WARNING_FLAGS}) list(APPEND CXX_FLAGS ${WARNING_FLAGS}) ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") else() # todo : msvc set(C_FLAGS "") set(CXX_FLAGS "") endif() endif() if (GGML_LTO) include(CheckIPOSupported) check_ipo_supported(RESULT result OUTPUT output) if (result) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) else() message(WARNING "IPO is not supported: ${output}") endif() endif() if (GGML_CCACHE AND NOT CMAKE_C_COMPILER_LAUNCHER AND NOT CMAKE_CXX_COMPILER_LAUNCHER) find_program(GGML_CCACHE_FOUND ccache) find_program(GGML_SCCACHE_FOUND sccache) if (GGML_CCACHE_FOUND OR GGML_SCCACHE_FOUND) if(GGML_CCACHE_FOUND) set(GGML_CCACHE_VARIANT ccache) else() set(GGML_CCACHE_VARIANT sccache) endif() # TODO: should not be set globally if (GGML_SYCL AND GGML_CCACHE_FOUND AND WIN32) set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "ccache compiler_type=icl") else () set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${GGML_CCACHE_VARIANT}") endif () set(ENV{CCACHE_SLOPPINESS} time_macros) message(STATUS "${GGML_CCACHE_VARIANT} found, compilation results will be cached. Disable with GGML_CCACHE=OFF.") else() message(STATUS "Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF") endif () endif() # this version of Apple ld64 is buggy execute_process( COMMAND ${CMAKE_C_COMPILER} ${CMAKE_EXE_LINKER_FLAGS} -Wl,-v ERROR_VARIABLE output OUTPUT_QUIET ) if (output MATCHES "dyld-1015\.7") add_compile_definitions(HAVE_BUGGY_APPLE_LINKER) endif() # architecture specific # TODO: probably these flags need to be tweaked on some architectures # feel free to update the Makefile for your architecture and send a pull request or issue message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") if (MSVC) string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") else () set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () ggml_get_system_arch() message(STATUS "GGML_SYSTEM_ARCH: ${GGML_SYSTEM_ARCH}") if (NOT MSVC) if (GGML_STATIC) if (UNIX AND NOT APPLE) set(CMAKE_FIND_LIBRARY_SUFFIXES ".a;.so") endif() add_link_options(-static) if (MINGW) add_link_options(-static-libgcc -static-libstdc++) endif() endif() if (GGML_GPROF) add_compile_options(-pg) endif() endif() # # POSIX conformance # # clock_gettime came in POSIX.1b (1993) # CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional # posix_memalign came in POSIX.1-2001 / SUSv3 # M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) # Somehow in OpenBSD whenever POSIX conformance is specified # some string functions rely on locale_t availability, # which was introduced in POSIX.1-2008, forcing us to go higher if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") add_compile_definitions(_XOPEN_SOURCE=700) elseif (CMAKE_SYSTEM_NAME MATCHES "AIX") # Don't define _XOPEN_SOURCE. We need _ALL_SOURCE, which is the default, # in order to define _SC_PHYS_PAGES. else() add_compile_definitions(_XOPEN_SOURCE=600) endif() # Data types, macros and functions related to controlling CPU affinity and # some memory allocation are available on Linux through GNU extensions in libc if (CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "Android") add_compile_definitions(_GNU_SOURCE) endif() # RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, # and on macOS its availability depends on enabling Darwin extensions # similarly on DragonFly, enabling BSD extensions is necessary if ( CMAKE_SYSTEM_NAME MATCHES "Darwin" OR CMAKE_SYSTEM_NAME MATCHES "iOS" OR CMAKE_SYSTEM_NAME MATCHES "tvOS" OR CMAKE_SYSTEM_NAME MATCHES "DragonFly" ) add_compile_definitions(_DARWIN_C_SOURCE) endif() # alloca is a non-standard interface that is not visible on BSDs when # POSIX conformance is specified, but not all of them provide a clean way # to enable it in such cases if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") add_compile_definitions(__BSD_VISIBLE) endif() if (CMAKE_SYSTEM_NAME MATCHES "NetBSD") add_compile_definitions(_NETBSD_SOURCE) endif() if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") add_compile_definitions(_BSD_SOURCE) endif() if (WIN32) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) endif() # ggml if (GGML_BACKEND_DL AND NOT BUILD_SHARED_LIBS) message(FATAL_ERROR "GGML_BACKEND_DL requires BUILD_SHARED_LIBS") endif() add_library(ggml-base ../include/ggml.h ../include/ggml-alloc.h ../include/ggml-backend.h ../include/ggml-cpp.h ../include/ggml-opt.h ../include/gguf.h ggml.c ggml.cpp ggml-alloc.c ggml-backend.cpp ggml-opt.cpp ggml-threading.cpp ggml-threading.h ggml-quants.c ggml-quants.h gguf.cpp) set_target_properties(ggml-base PROPERTIES VERSION ${GGML_VERSION} SOVERSION ${GGML_VERSION_MAJOR} ) target_include_directories(ggml-base PRIVATE .) if (GGML_BACKEND_DL) target_compile_definitions(ggml-base PUBLIC GGML_BACKEND_DL) endif() if (GGML_SCHED_NO_REALLOC) target_compile_definitions(ggml-base PUBLIC GGML_SCHED_NO_REALLOC) endif() add_library(ggml ggml-backend-reg.cpp) add_library(ggml::ggml ALIAS ggml) set_target_properties(ggml PROPERTIES VERSION ${GGML_VERSION} SOVERSION ${GGML_VERSION_MAJOR} ) if (GGML_BACKEND_DIR) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_BACKEND_DIR requires GGML_BACKEND_DL") endif() target_compile_definitions(ggml PUBLIC GGML_BACKEND_DIR="${GGML_BACKEND_DIR}") endif() target_link_libraries(ggml PUBLIC ggml-base) if (CMAKE_SYSTEM_NAME MATCHES "Linux") target_link_libraries(ggml PRIVATE dl) endif() function(ggml_add_backend_library backend) if (GGML_BACKEND_DL) add_library(${backend} MODULE ${ARGN}) # write the shared library to the output directory set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) add_dependencies(ggml ${backend}) if (GGML_BACKEND_DIR) install(TARGETS ${backend} LIBRARY DESTINATION ${GGML_BACKEND_DIR}) else() install(TARGETS ${backend} LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR}) endif() else() add_library(${backend} ${ARGN}) target_link_libraries(ggml PUBLIC ${backend}) install(TARGETS ${backend} LIBRARY) endif() target_link_libraries(${backend} PRIVATE ggml-base) target_include_directories(${backend} PRIVATE ..) if (${BUILD_SHARED_LIBS}) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_BUILD) target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED) endif() # Set versioning properties for all backend libraries # Building a MODULE library with a version is not supported on macOS (https://gitlab.kitware.com/cmake/cmake/-/issues/20782) if (NOT (APPLE AND GGML_BACKEND_DL)) set_target_properties(${backend} PROPERTIES VERSION ${GGML_VERSION} SOVERSION ${GGML_VERSION_MAJOR} ) endif() if(NOT GGML_AVAILABLE_BACKENDS) set(GGML_AVAILABLE_BACKENDS "${backend}" CACHE INTERNAL "List of backends for cmake package") else() list(FIND GGML_AVAILABLE_BACKENDS "${backend}" has_backend) if(has_backend EQUAL -1) set(GGML_AVAILABLE_BACKENDS "${GGML_AVAILABLE_BACKENDS};${backend}" CACHE INTERNAL "List of backends for cmake package") endif() endif() endfunction() function(ggml_add_backend backend) string(TOUPPER "GGML_${backend}" backend_id) if (${backend_id}) string(TOLOWER "ggml-${backend}" backend_target) add_subdirectory(${backend_target}) message(STATUS "Including ${backend} backend") if (NOT GGML_BACKEND_DL) string(TOUPPER "GGML_USE_${backend}" backend_use) target_compile_definitions(ggml PUBLIC ${backend_use}) endif() endif() endfunction() function(ggml_add_cpu_backend_variant tag_name) set(GGML_CPU_TAG_NAME ${tag_name}) # other: OPENMP LLAMAFILE CPU_HBM if (GGML_SYSTEM_ARCH STREQUAL "x86") foreach (feat NATIVE SSE42 AVX AVX2 BMI2 AVX_VNNI FMA F16C AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8 AMX_BF16) set(GGML_${feat} OFF) endforeach() foreach (feat ${ARGN}) set(GGML_${feat} ON) endforeach() elseif (GGML_SYSTEM_ARCH STREQUAL "ARM") foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") foreach (feat VXE2 NNPA) set(GGML_INTERNAL_${feat} OFF) endforeach() foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") foreach (feat RVV) set(GGML_INTERNAL_${feat} OFF) endforeach() foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() endif() ggml_add_cpu_backend_variant_impl(${tag_name}) endfunction() ggml_add_backend(CPU) if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") elseif (GGML_CPU_ARM_ARCH) message(FATAL_ERROR "Cannot use both GGML_CPU_ARM_ARCH and GGML_CPU_ALL_VARIANTS") endif() if (GGML_SYSTEM_ARCH STREQUAL "x86") ggml_add_cpu_backend_variant(x64) ggml_add_cpu_backend_variant(sse42 SSE42) ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) if (NOT MSVC) # __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 ggml_add_cpu_backend_variant(ivybridge SSE42 AVX F16C) ggml_add_cpu_backend_variant(piledriver SSE42 AVX F16C FMA) endif() ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C FMA AVX2 BMI2) ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C FMA AVX2 BMI2 AVX512) ggml_add_cpu_backend_variant(cannonlake SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VBMI) ggml_add_cpu_backend_variant(cascadelake SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VNNI) ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VBMI AVX512_VNNI) if (NOT MSVC) # MSVC 2022 doesn't support BF16 intrinsics without `/arch:AVX10.1` ?! # https://learn.microsoft.com/en-us/cpp/intrinsics/x64-amd64-intrinsics-list?view=msvc-170 # https://learn.microsoft.com/en-us/cpp/build/reference/arch-x64?view=msvc-170 ggml_add_cpu_backend_variant(cooperlake SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VNNI AVX512_BF16) ggml_add_cpu_backend_variant(zen4 SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16) endif() ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C FMA AVX2 BMI2 AVX_VNNI) if (NOT MSVC) # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C FMA AVX2 BMI2 AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() elseif(GGML_SYSTEM_ARCH STREQUAL "ARM") if (CMAKE_SYSTEM_NAME MATCHES "Linux") # Many of these features are optional so we build versions with popular # combinations and name the backends based on the version they were # first released with ggml_add_cpu_backend_variant(armv8.0_1) ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) elseif (CMAKE_SYSTEM_NAME MATCHES "Android") # Android-specific backends with SoC-compatible feature sets ggml_add_cpu_backend_variant(android_armv8.0_1) ggml_add_cpu_backend_variant(android_armv8.2_1 DOTPROD) ggml_add_cpu_backend_variant(android_armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) ggml_add_cpu_backend_variant(android_armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC MATMUL_INT8) ggml_add_cpu_backend_variant(android_armv9.0_1 DOTPROD MATMUL_INT8 FP16_VECTOR_ARITHMETIC SVE2) ggml_add_cpu_backend_variant(android_armv9.2_1 DOTPROD MATMUL_INT8 FP16_VECTOR_ARITHMETIC SVE SME) ggml_add_cpu_backend_variant(android_armv9.2_2 DOTPROD MATMUL_INT8 FP16_VECTOR_ARITHMETIC SVE SVE2 SME) elseif (APPLE) ggml_add_cpu_backend_variant(apple_m1 DOTPROD) ggml_add_cpu_backend_variant(apple_m2_m3 DOTPROD MATMUL_INT8) ggml_add_cpu_backend_variant(apple_m4 DOTPROD MATMUL_INT8 NOSVE SME) else() message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") endif() elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") if (CMAKE_SYSTEM_NAME MATCHES "Linux") ggml_add_cpu_backend_variant(power0) ggml_add_cpu_backend_variant(power7_1 POWER7) ggml_add_cpu_backend_variant(power7_2 POWER7 VSX) ggml_add_cpu_backend_variant(power8_1 POWER8) ggml_add_cpu_backend_variant(power8_2 POWER8 VSX) ggml_add_cpu_backend_variant(power9 POWER9 VSX) ggml_add_cpu_backend_variant(power10 POWER10 VSX) ggml_add_cpu_backend_variant(power11 POWER11 VSX) else() message(FATAL_ERROR "Unsupported PowerPC target OS: ${CMAKE_SYSTEM_NAME}") endif() elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") if (CMAKE_SYSTEM_NAME MATCHES "Linux") ggml_add_cpu_backend_variant(z15 Z15 VXE2) ggml_add_cpu_backend_variant(z16 Z16 VXE2 NNPA) else() message(FATAL_ERROR "Unsupported s390x target OS: ${CMAKE_SYSTEM_NAME}") endif() elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") if (CMAKE_SYSTEM_NAME MATCHES "Linux") ggml_add_cpu_backend_variant(riscv64_0) ggml_add_cpu_backend_variant(riscv64_v RVV) else() message(FATAL_ERROR "Unsupported RISC-V target OS: ${CMAKE_SYSTEM_NAME}") endif() else() message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") endif() ggml_add_backend(BLAS) ggml_add_backend(CANN) ggml_add_backend(CUDA) ggml_add_backend(HIP) ggml_add_backend(METAL) ggml_add_backend(MUSA) ggml_add_backend(RPC) ggml_add_backend(SYCL) ggml_add_backend(Vulkan) ggml_add_backend(WebGPU) ggml_add_backend(zDNN) ggml_add_backend(OpenCL) ggml_add_backend(Hexagon) ggml_add_backend(ZenDNN) foreach (target ggml-base ggml) target_include_directories(${target} PUBLIC $ $) target_compile_features (${target} PRIVATE c_std_11 cxx_std_17) # don't bump endforeach() target_link_libraries(ggml-base PRIVATE Threads::Threads) find_library(MATH_LIBRARY m) if (MATH_LIBRARY) if (NOT WIN32 OR NOT DEFINED ENV{ONEAPI_ROOT}) target_link_libraries(ggml-base PRIVATE m) endif() endif() if (CMAKE_SYSTEM_NAME MATCHES "Android") target_link_libraries(ggml-base PRIVATE dl) endif() if(CMAKE_SYSTEM_NAME MATCHES "visionOS") target_compile_definitions(ggml-base PUBLIC _DARWIN_C_SOURCE) endif() if (BUILD_SHARED_LIBS) foreach (target ggml-base ggml) set_target_properties(${target} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_compile_definitions(${target} PRIVATE GGML_BUILD) target_compile_definitions(${target} PUBLIC GGML_SHARED) endforeach() endif() ggml-org-ggml-3678254/src/ggml-alloc.c000066400000000000000000001352251512524704700173450ustar00rootroot00000000000000#include "ggml-alloc.h" #include "ggml-backend-impl.h" #include "ggml.h" #include "ggml-impl.h" #include #include #include #include #include #include #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MAX_FREE_BLOCKS 256 //#define GGML_ALLOCATOR_DEBUG //#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__) #define AT_PRINTF(...) static bool ggml_is_view(const struct ggml_tensor * t) { return t->view_src != NULL; } // ops that return true for this function must not use restrict pointers for their backend implementations bool ggml_op_can_inplace(enum ggml_op op) { switch (op) { case GGML_OP_FILL: case GGML_OP_SCALE: case GGML_OP_DIAG_MASK_ZERO: case GGML_OP_DIAG_MASK_INF: case GGML_OP_ADD: case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_LOG: case GGML_OP_UNARY: case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: case GGML_OP_SILU_BACK: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: case GGML_OP_SOFT_MAX: case GGML_OP_SOFT_MAX_BACK: return true; default: return false; } } static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { assert(alignment && !(alignment & (alignment - 1))); // power of 2 size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment; return offset + align; } // tallocr struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) { void * base = ggml_backend_buffer_get_base(buffer); size_t align = ggml_backend_buffer_get_alignment(buffer); assert(align && !(align & (align - 1))); // power of 2 struct ggml_tallocr talloc = (struct ggml_tallocr) { /*.buffer = */ buffer, /*.base = */ base, /*.alignment = */ align, /*.offset = */ aligned_offset(base, 0, align), }; return talloc; } enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) { size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor); size = GGML_PAD(size, talloc->alignment); if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) { GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n", __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset); GGML_ABORT("not enough space in the buffer"); } void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset; talloc->offset += size; assert(((uintptr_t)addr % talloc->alignment) == 0); return ggml_backend_tensor_alloc(talloc->buffer, tensor, addr); } // dynamic tensor allocator #define GGML_VBUFFER_MAX_CHUNKS 16 // relative memory address within an allocation that can be split into multiple buffers (chunks) struct buffer_address { int chunk; // index of a backend buffer size_t offset; // local memory offset within the buffer }; static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX }; static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) { return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset; } struct free_block { size_t offset; size_t size; }; struct tallocr_chunk { struct free_block free_blocks[MAX_FREE_BLOCKS]; int n_free_blocks; size_t max_size; }; struct ggml_dyn_tallocr { size_t alignment; size_t max_chunk_size; struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS]; int n_chunks; #ifdef GGML_ALLOCATOR_DEBUG struct { const struct ggml_tensor * tensor; struct buffer_address addr; } allocated_tensors[1024]; #endif }; static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) { GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster) int insert_pos = 0; while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) { insert_pos++; } // shift all blocks from insert_pos onward to make room for the new block for (int i = chunk->n_free_blocks; i > insert_pos; i--) { chunk->free_blocks[i] = chunk->free_blocks[i-1]; } // insert the new block chunk->free_blocks[insert_pos].offset = offset; chunk->free_blocks[insert_pos].size = size; chunk->n_free_blocks++; } static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) { // shift all elements after idx by 1 to the left, overwriting the element at idx for (int i = idx; i < chunk->n_free_blocks; i++) { chunk->free_blocks[i] = chunk->free_blocks[i+1]; } chunk->n_free_blocks--; } static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) { if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) { return -1; } struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk)); chunk->n_free_blocks = 1; chunk->free_blocks[0].offset = 0; // available space in a chunk is limited to max_chunk_size, but can be higher if: // 1. a single tensor exceeds the maximum, and cannot fit any other way // 2. we are running out of chunks // backends will either manage to allocate the larger size, or report an error. chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size); if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) { chunk->free_blocks[0].size = SIZE_MAX/2; } alloc->chunks[alloc->n_chunks] = chunk; alloc->n_chunks++; return alloc->n_chunks - 1; } #ifdef GGML_ALLOCATOR_DEBUG static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i].tensor == NULL) { alloc->allocated_tensors[i].tensor = tensor; alloc->allocated_tensors[i].addr = addr; return; } } GGML_ABORT("out of allocated_tensors"); } static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) { alloc->allocated_tensors[i].tensor = NULL; return; } } GGML_ABORT("tried to free tensor %s not found\n", tensor->name); } #endif static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) { size = aligned_offset(NULL, size, alloc->alignment); AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size); int best_fit_chunk = -1; int best_fit_block = -1; size_t max_avail = 0; // find the best fitting free block besides the last block, within any chunk for (int c = 0; c < alloc->n_chunks; ++c) { struct tallocr_chunk * chunk = alloc->chunks[c]; size_t best_fit_size = SIZE_MAX; for (int i = 0; i < chunk->n_free_blocks - 1; i++) { struct free_block * block = &chunk->free_blocks[i]; max_avail = MAX(max_avail, block->size); if (block->size >= size && block->size <= best_fit_size) { best_fit_chunk = c; best_fit_block = i; best_fit_size = block->size; } } } if (best_fit_block == -1) { // no suitable block found, try the last block (this may grow a chunks size) int64_t best_reuse = INT64_MIN; for (int c = 0; c < alloc->n_chunks; ++c) { struct tallocr_chunk * chunk = alloc->chunks[c]; if (chunk->n_free_blocks > 0) { struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1]; max_avail = MAX(max_avail, block->size); int64_t reuse_factor = chunk->max_size - block->offset - size; // reuse_factor < 0 : amount of extra memory that needs to be allocated // reuse_factor = 0 : allocated free space exactly matches tensor size // reuse_factor > 0 : superfluous memory that will remain unused bool better_reuse = best_reuse < 0 && reuse_factor > best_reuse; bool better_fit = reuse_factor >= 0 && reuse_factor < best_reuse; if (block->size >= size && (better_reuse || better_fit)) { best_fit_chunk = c; best_fit_block = chunk->n_free_blocks - 1; best_reuse = reuse_factor; } } } } if (best_fit_block == -1) { // none of the existing chunks have enough space left best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size); best_fit_block = 0; } if (best_fit_chunk == -1) { // since the last chunk always has virtually endless memory, this should never happen GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n", __func__, size, max_avail); GGML_ABORT("graph allocation: failed to reserve memory"); } struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk]; struct free_block * block = &chunk->free_blocks[best_fit_block]; struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset }; block->offset += size; block->size -= size; if (block->size == 0) { // remove block if empty ggml_dyn_tallocr_remove_block(chunk, best_fit_block); } AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk); #ifdef GGML_ALLOCATOR_DEBUG add_allocated_tensor(alloc, addr, tensor); size_t cur_max = addr.offset + size; if (cur_max > chunk->max_size) { // sort allocated_tensors by chunk/offset for (int i = 0; i < 1024; i++) { for (int j = i + 1; j < 1024; j++) { if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) { const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor; struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr; alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor; alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr; alloc->allocated_tensors[j].tensor = tmp_tensor; alloc->allocated_tensors[j].addr = tmp_addr; } } } GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0); for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i].tensor) { GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name, alloc->allocated_tensors[i].addr.chunk, alloc->allocated_tensors[i].addr.offset, alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor), ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0); } } GGML_LOG_DEBUG("\n"); } #endif chunk->max_size = MAX(chunk->max_size, addr.offset + size); return addr; GGML_UNUSED(tensor); } // this is a very naive implementation, but for our case the number of free blocks should be very small static void ggml_dyn_tallocr_free_bytes(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size) { size = aligned_offset(NULL, size, alloc->alignment); struct tallocr_chunk * chunk = alloc->chunks[addr.chunk]; // see if we can merge with an existing block for (int i = 0; i < chunk->n_free_blocks; i++) { struct free_block * block = &chunk->free_blocks[i]; // check if ptr is at the end of the block if (block->offset + block->size == addr.offset) { block->size += size; // check if we can merge with the next block if (i < chunk->n_free_blocks - 1) { struct free_block * next = &chunk->free_blocks[i+1]; if (block->offset + block->size == next->offset) { block->size += next->size; ggml_dyn_tallocr_remove_block(chunk, i+1); } } return; } // check if ptr is at the beginning of the block if (addr.offset + size == block->offset) { block->offset = addr.offset; block->size += size; // check if we can merge with the previous block if (i > 0) { struct free_block * prev = &chunk->free_blocks[i-1]; if (prev->offset + prev->size == block->offset) { prev->size += block->size; ggml_dyn_tallocr_remove_block(chunk, i); } } return; } } // otherwise, add a new block ggml_dyn_tallocr_insert_block(chunk, addr.offset, size); } static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) { for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) { free(alloc->chunks[i]); alloc->chunks[i] = NULL; } alloc->n_chunks = 0; #ifdef GGML_ALLOCATOR_DEBUG for (int i = 0; i < 1024; i++) { alloc->allocated_tensors[i].tensor = NULL; } #endif } static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) { struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr)); *alloc = (struct ggml_dyn_tallocr) { /*.alignment = */ alignment, /*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows /*.chunks = */ {NULL}, /*.n_chunks = */ 0, #ifdef GGML_ALLOCATOR_DEBUG /*.allocated_tensors = */ {{0}}, #endif }; ggml_dyn_tallocr_reset(alloc); return alloc; } static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) { for (int i = 0; i < alloc->n_chunks; ++i) { free(alloc->chunks[i]); } free(alloc); } static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc, int chunk) { return chunk < alloc->n_chunks ? alloc->chunks[chunk]->max_size : 0; } // virtual buffer with contiguous memory range, split into multiple backend buffers (chunks) struct vbuffer { ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS]; }; static void ggml_vbuffer_free(struct vbuffer * buf) { if (buf == NULL) { return; } for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) { ggml_backend_buffer_free(buf->chunks[i]); } free(buf); } static size_t ggml_vbuffer_chunk_size(struct vbuffer * buf, int chunk) { return buf->chunks[chunk] ? ggml_backend_buffer_get_size(buf->chunks[chunk]) : 0; } static size_t ggml_vbuffer_size(struct vbuffer * buf) { size_t size = 0; for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) { size += ggml_backend_buffer_get_size(buf->chunks[i]); } return size; } static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) { struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer)); if (buf == NULL) { return NULL; } for (int n = 0; n < talloc->n_chunks; n++) { size_t chunk_size = talloc->chunks[n]->max_size; buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size); if (buf->chunks[n] == NULL) { ggml_vbuffer_free(buf); return NULL; } ggml_backend_buffer_set_usage(buf->chunks[n], usage); } return buf; } static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) { void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]); void * addr = (char *)base + buf_addr.offset; ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr); } static void ggml_vbuffer_reset(struct vbuffer * buf) { for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) { ggml_backend_buffer_reset(buf->chunks[i]); } } ///////////////////////////////////// // graph allocator struct hash_node { int n_children; int n_views; int buffer_id; struct buffer_address addr; bool allocated; }; struct tensor_alloc { int buffer_id; struct buffer_address addr; size_t size_max; // 0 = pre-allocated, unused, or view }; struct leaf_alloc { struct tensor_alloc leaf; }; struct node_alloc { struct tensor_alloc dst; struct tensor_alloc src[GGML_MAX_SRC]; }; struct ggml_gallocr { ggml_backend_buffer_type_t * bufts; // [n_buffers] struct vbuffer ** buffers; // [n_buffers] struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers] int n_buffers; struct ggml_hash_set hash_set; struct hash_node * hash_values; // [hash_set.size] struct node_alloc * node_allocs; // [n_nodes] int n_nodes; struct leaf_alloc * leaf_allocs; // [n_leafs] int n_leafs; }; ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) { ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr)); GGML_ASSERT(galloc != NULL); galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t)); GGML_ASSERT(galloc->bufts != NULL); galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *)); GGML_ASSERT(galloc->buffers != NULL); galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *)); GGML_ASSERT(galloc->buf_tallocs != NULL); for (int i = 0; i < n_bufs; i++) { galloc->bufts[i] = bufts[i]; galloc->buffers[i] = NULL; // check if the same buffer type is used multiple times and reuse the same allocator for (int j = 0; j < i; j++) { if (bufts[i] == bufts[j]) { galloc->buf_tallocs[i] = galloc->buf_tallocs[j]; break; } } if (galloc->buf_tallocs[i] == NULL) { size_t alignment = ggml_backend_buft_get_alignment(bufts[i]); size_t max_size = ggml_backend_buft_get_max_size(bufts[i]); galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size); } } galloc->n_buffers = n_bufs; return galloc; } ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) { return ggml_gallocr_new_n(&buft, 1); } void ggml_gallocr_free(ggml_gallocr_t galloc) { if (galloc == NULL) { return; } for (int i = 0; i < galloc->n_buffers; i++) { if (galloc->buffers != NULL) { // skip if already freed bool freed = false; for (int j = 0; j < i; j++) { if (galloc->buffers[j] == galloc->buffers[i]) { freed = true; break; } } if (!freed) { ggml_vbuffer_free(galloc->buffers[i]); } } if (galloc->buf_tallocs != NULL) { // skip if already freed bool freed = false; for (int j = 0; j < i; j++) { if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) { freed = true; break; } } if (!freed) { ggml_dyn_tallocr_free(galloc->buf_tallocs[i]); } } } ggml_hash_set_free(&galloc->hash_set); free(galloc->hash_values); free(galloc->bufts); free(galloc->buffers); free(galloc->buf_tallocs); free(galloc->node_allocs); free(galloc->leaf_allocs); free(galloc); } typedef struct ggml_gallocr * ggml_gallocr_t; static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) { size_t i = ggml_hash_find_or_insert(&galloc->hash_set, t); return &galloc->hash_values[i]; } static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) { return ggml_gallocr_hash_get(galloc, t)->allocated; } static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) { return t->data != NULL // tensor data already set externally || t->buffer // tensor on external buffer (but not yet allocated) || ggml_gallocr_is_own(galloc, t); // tensor will be allocated by galloc } // free the extra space at the end if the new tensor is smaller static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) { struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent); size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); GGML_ASSERT(parent_size >= node_size); // note: we want after the freeing the chunks to continue to be aligned struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id]; parent_size = aligned_offset(NULL, parent_size, p_alloc->alignment); node_size = aligned_offset(NULL, node_size, p_alloc->alignment); if (parent_size > node_size) { struct buffer_address p_addr = p_hn->addr; p_addr.offset += node_size; size_t extra_size = parent_size - node_size; AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name); ggml_dyn_tallocr_free_bytes(p_alloc, p_addr, extra_size); } } static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { GGML_ASSERT(buffer_id >= 0); struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) { hn->allocated = true; assert(hn->addr.offset == 0); // try to reuse a parent's buffer (inplace) if (ggml_op_can_inplace(node->op)) { for (int i = 0; i < GGML_MAX_SRC; i++) { struct ggml_tensor * parent = node->src[i]; if (parent == NULL) { continue; } // if the node's data is external, then we cannot re-use it if (!ggml_gallocr_is_own(galloc, parent)) { AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); continue; } // outputs cannot be reused if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) { AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name); continue; } if (!ggml_are_same_layout(node, parent)) { AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name); continue; } struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); if (p_hn->n_children == 1 && p_hn->n_views == 0) { if (ggml_is_view(parent)) { struct ggml_tensor * view_src = parent->view_src; struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset); hn->buffer_id = p_hn->buffer_id; hn->addr = p_hn->addr; p_hn->allocated = false; // avoid freeing the parent view_src_hn->allocated = false; ggml_gallocr_free_extra_space(galloc, node, view_src); return; } } else { AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); hn->buffer_id = p_hn->buffer_id; hn->addr = p_hn->addr; p_hn->allocated = false; // avoid freeing the parent ggml_gallocr_free_extra_space(galloc, node, parent); return; } } } } // allocate tensor from the buffer struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; size_t size = ggml_backend_buft_get_alloc_size(buft, node); hn->buffer_id = buffer_id; hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node); } } static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) { // graph outputs are never freed if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { AT_PRINTF("not freeing output %s\n", node->name); return; } struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); int buffer_id = hn->buffer_id; struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; size_t size = ggml_backend_buft_get_alloc_size(buft, node); AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n", __func__, node->name, hn->addr.chunk, hn->addr.offset, size, alloc->chunks[hn->addr.chunk]->n_free_blocks); #ifdef GGML_ALLOCATOR_DEBUG remove_allocated_tensor(alloc, hn->addr, node); #endif ggml_dyn_tallocr_free_bytes(alloc, hn->addr, size); hn->allocated = false; } static int get_node_buffer_id(const int * node_buffer_ids, int i) { return node_buffer_ids ? node_buffer_ids[i] : 0; } static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { // clear hash tables ggml_hash_set_reset(&galloc->hash_set); memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size); // allocate leafs // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i)); } // count number of children and views // allocate other graph inputs and leafs first to avoid overwriting them for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; // TODO: better way to add external dependencies // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node // itself is never used and should not be considered a dependency if (ggml_is_view(node) && node->op != GGML_OP_NONE) { struct ggml_tensor * view_src = node->view_src; ggml_gallocr_hash_get(galloc, view_src)->n_views += 1; } if (node->flags & GGML_TENSOR_FLAG_INPUT) { ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i)); } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } ggml_gallocr_hash_get(galloc, src)->n_children += 1; // allocate explicit inputs if (src->flags & GGML_TENSOR_FLAG_INPUT) { ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i)); } } } // allocate tensors for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; int buffer_id = get_node_buffer_id(node_buffer_ids, i); // allocate parents (only leafs need to be allocated at this point) for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * parent = node->src[j]; if (parent == NULL) { continue; } ggml_gallocr_allocate_node(galloc, parent, buffer_id); } // allocate node ggml_gallocr_allocate_node(galloc, node, buffer_id); AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * parent = node->src[j]; if (parent == NULL) { continue; } AT_PRINTF("%s", parent->name); if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { AT_PRINTF(", "); } } AT_PRINTF("\n"); // update parents for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * parent = node->src[j]; if (parent == NULL) { continue; } struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); p_hn->n_children -= 1; AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n", parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated); if (p_hn->n_children == 0 && p_hn->n_views == 0) { if (ggml_is_view(parent)) { struct ggml_tensor * view_src = parent->view_src; struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); view_src_hn->n_views -= 1; AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views); if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) { ggml_gallocr_free_node(galloc, view_src); } } else if (p_hn->allocated) { ggml_gallocr_free_node(galloc, parent); } } AT_PRINTF("\n"); } } } static bool ggml_gallocr_reserve_n_impl( ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, bool no_alloc) { size_t min_hash_size = graph->n_nodes + graph->n_leafs; // add 25% margin to avoid hash collisions min_hash_size += min_hash_size / 4; // initialize hash table if (galloc->hash_set.size < min_hash_size) { ggml_hash_set_free(&galloc->hash_set); galloc->hash_set = ggml_hash_set_new(min_hash_size); GGML_ASSERT(galloc->hash_set.keys != NULL); free(galloc->hash_values); galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size); GGML_ASSERT(galloc->hash_values != NULL); } // reset allocators for (int i = 0; i < galloc->n_buffers; i++) { ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]); } // allocate in hash table ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids); // set the node_allocs from the hash table if (galloc->n_nodes < graph->n_nodes) { free(galloc->node_allocs); galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc)); GGML_ASSERT(galloc->node_allocs != NULL); } galloc->n_nodes = graph->n_nodes; for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; struct node_alloc * node_alloc = &galloc->node_allocs[i]; if (node->view_src || node->data) { node_alloc->dst.buffer_id = -1; node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID; node_alloc->dst.size_max = 0; } else { struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); node_alloc->dst.buffer_id = hn->buffer_id; node_alloc->dst.addr = hn->addr; node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (!src || src->view_src || src->data) { node_alloc->src[j].buffer_id = -1; node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID; node_alloc->src[j].size_max = 0; } else { struct hash_node * hn = ggml_gallocr_hash_get(galloc, src); node_alloc->src[j].buffer_id = hn->buffer_id; node_alloc->src[j].addr = hn->addr; node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src); } } } if (galloc->n_leafs < graph->n_leafs) { free(galloc->leaf_allocs); galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0])); GGML_ASSERT(galloc->leaf_allocs != NULL); } galloc->n_leafs = graph->n_leafs; for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf); if (leaf->view_src || leaf->data) { galloc->leaf_allocs[i].leaf.buffer_id = -1; galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID; galloc->leaf_allocs[i].leaf.size_max = 0; } else { galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id; galloc->leaf_allocs[i].leaf.addr = hn->addr; galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf); } } // reallocate buffers if needed for (int i = 0; i < galloc->n_buffers; i++) { // if the buffer type is used multiple times, we reuse the same buffer for (int j = 0; j < i; j++) { if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) { galloc->buffers[i] = galloc->buffers[j]; break; } } // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views bool realloc = galloc->buffers[i] == NULL; size_t new_size = 0; for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) { size_t cur_chunk_size = galloc->buffers[i] ? ggml_vbuffer_chunk_size(galloc->buffers[i], c) : 0; size_t new_chunk_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i], c); new_size += new_chunk_size; if (new_chunk_size > cur_chunk_size) { realloc = true; } } if (realloc) { #ifndef NDEBUG { size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0; if (cur_size > 0) { GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); } } #endif ggml_vbuffer_free(galloc->buffers[i]); if (no_alloc) { galloc->buffers[i] = NULL; } else { galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE); if (galloc->buffers[i] == NULL) { GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); return false; } } } } return true; } void ggml_gallocr_reserve_n_size( ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids, size_t * sizes) { GGML_ASSERT(ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, /*no_alloc =*/ true)); for (int i = 0; i < galloc->n_buffers; i++) { sizes[i] = 0; for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) { sizes[i] += galloc->buf_tallocs[i]->chunks[c]->max_size; } } } bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { return ggml_gallocr_reserve_n_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids, /*no_alloc =*/ false); } bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL); } static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) { int buffer_id = tensor_alloc->buffer_id; assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max); if (tensor->view_src != NULL) { if (tensor->buffer == NULL) { assert(tensor_alloc->addr.offset == SIZE_MAX); if (tensor->view_src->buffer == NULL) { // this tensor was allocated without ggml-backend return; } ggml_backend_view_init(tensor); } } else { if (tensor->data == NULL) { assert(tensor_alloc->addr.offset != SIZE_MAX); assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max); ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr); } else { if (tensor->buffer == NULL) { // this tensor was allocated without ggml-backend return; } } } } static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) { size_t node_size = 0; if (!node->data && !node->view_src) { // If we previously had data but don't now then reallocate if (talloc->buffer_id < 0) { return false; } node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node); } return talloc->size_max >= node_size; } static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { if (galloc->n_nodes != graph->n_nodes) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__); #endif return true; } if (galloc->n_leafs != graph->n_leafs) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__); #endif return true; } for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; struct node_alloc * node_alloc = &galloc->node_allocs[i]; if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name); #endif return true; } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name); #endif return true; } } } return false; } bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { if (ggml_gallocr_needs_realloc(galloc, graph)) { if (galloc->n_buffers == 1) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__); #endif if (!ggml_gallocr_reserve(galloc, graph)) { return false; } } else { #ifndef NDEBUG GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__); #endif return false; } } // reset buffers for (int i = 0; i < galloc->n_buffers; i++) { if (galloc->buffers[i] != NULL) { ggml_vbuffer_reset(galloc->buffers[i]); } } // allocate the graph tensors from the previous assignments // leafs for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i]; ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf); } // nodes for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; struct node_alloc * node_alloc = &galloc->node_allocs[i]; for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]); } ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst); } return true; } size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers); if (galloc->buffers[buffer_id] == NULL) { return 0; } for (int i = 0; i < buffer_id; i++) { if (galloc->buffers[i] == galloc->buffers[buffer_id]) { // this buffer is the same as a previous one due to the same buffer type being used multiple times // only return the buffer size the first time it appears to avoid double counting return 0; } } return ggml_vbuffer_size(galloc->buffers[buffer_id]); } // utils static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) { for (size_t i = 0; i < *n_buffers; i++) { ggml_backend_buffer_free((*buffers)[i]); } free(*buffers); } static bool alloc_tensor_range(struct ggml_context * ctx, struct ggml_tensor * first, struct ggml_tensor * last, ggml_backend_buffer_type_t buft, size_t size, ggml_backend_buffer_t ** buffers, size_t * n_buffers) { ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); if (buffer == NULL) { GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size); free_buffers(buffers, n_buffers); return false; } *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1)); (*buffers)[(*n_buffers)++] = buffer; struct ggml_tallocr tallocr = ggml_tallocr_new(buffer); for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) { enum ggml_status status = GGML_STATUS_SUCCESS; if (t->data == NULL) { if (t->view_src == NULL) { status = ggml_tallocr_alloc(&tallocr, t); } else if (t->buffer == NULL) { status = ggml_backend_view_init(t); } } else { if (t->view_src != NULL && t->buffer == NULL) { // view of a pre-allocated tensor status = ggml_backend_view_init(t); } } if (status != GGML_STATUS_SUCCESS) { GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name); free_buffers(buffers, n_buffers); return false; } } return true; } static ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft_impl( struct ggml_context * ctx, ggml_backend_buffer_type_t buft, size_t * nbytes_total, bool no_alloc) { GGML_ASSERT(ggml_get_no_alloc(ctx) == true); size_t alignment = ggml_backend_buft_get_alignment(buft); size_t max_size = ggml_backend_buft_get_max_size(buft); ggml_backend_buffer_t * buffers = NULL; size_t n_buffers = 0; *nbytes_total = 0; size_t cur_buf_size = 0; struct ggml_tensor * first = ggml_get_first_tensor(ctx); for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) { size_t this_size = 0; if (t->data == NULL && t->view_src == NULL) { this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment); } if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) { // allocate tensors in the current buffer if (!no_alloc && !alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { return NULL; } first = t; *nbytes_total += cur_buf_size; cur_buf_size = this_size; } else { cur_buf_size += this_size; } } // allocate remaining tensors if (cur_buf_size > 0) { *nbytes_total += cur_buf_size; if (!no_alloc && !alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { return NULL; } } if (no_alloc) { return NULL; } if (n_buffers == 0) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__); #endif GGML_ASSERT(!buffers); return NULL; } ggml_backend_buffer_t buffer; if (n_buffers == 1) { buffer = buffers[0]; } else { buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers); } if (buffers) { free(buffers); // can be NULL if context is empty or no_alloc } return buffer; } size_t ggml_backend_alloc_ctx_tensors_from_buft_size(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { size_t nbytes_total = 0; ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, /*no_alloc=*/ true); GGML_ASSERT(!buf); return nbytes_total; } ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { size_t nbytes_total = 0; return ggml_backend_alloc_ctx_tensors_from_buft_impl(ctx, buft, &nbytes_total, /*no_alloc =*/ false); } ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) { return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend)); } ggml-org-ggml-3678254/src/ggml-backend-impl.h000066400000000000000000000273731512524704700206120ustar00rootroot00000000000000#pragma once // ggml-backend internal header #include "ggml-backend.h" #ifdef __cplusplus extern "C" { #endif #define GGML_BACKEND_API_VERSION 2 // // Backend buffer type // struct ggml_backend_buffer_type_i { const char * (*get_name) (ggml_backend_buffer_type_t buft); // allocate a buffer of this type ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size); // tensor alignment size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // (optional) max buffer size that can be allocated (defaults to SIZE_MAX) size_t (*get_max_size) (ggml_backend_buffer_type_t buft); // (optional) data size needed to allocate the tensor, including padding (defaults to ggml_nbytes) size_t (*get_alloc_size)(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false) bool (*is_host) (ggml_backend_buffer_type_t buft); }; struct ggml_backend_buffer_type { struct ggml_backend_buffer_type_i iface; ggml_backend_dev_t device; void * context; }; // // Backend buffer // struct ggml_backend_buffer_i { // (optional) free the buffer void (*free_buffer) (ggml_backend_buffer_t buffer); // base address of the buffer void * (*get_base) (ggml_backend_buffer_t buffer); // (optional) initialize a tensor in the buffer (eg. add tensor extras) enum ggml_status (*init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // tensor data access void (*memset_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size); void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported) bool (*cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // clear the entire buffer void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); // (optional) reset any internal state due to tensor initialization, such as tensor extras void (*reset) (ggml_backend_buffer_t buffer); }; struct ggml_backend_buffer { struct ggml_backend_buffer_i iface; ggml_backend_buffer_type_t buft; void * context; size_t size; enum ggml_backend_buffer_usage usage; }; GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( ggml_backend_buffer_type_t buft, struct ggml_backend_buffer_i iface, void * context, size_t size); // do not use directly, use ggml_backend_tensor_copy instead GGML_API bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); // multi-buffer // buffer that contains a collection of buffers GGML_API ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers); GGML_API bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer); GGML_API void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); // // Backend (stream) // struct ggml_backend_i { const char * (*get_name)(ggml_backend_t backend); void (*free)(ggml_backend_t backend); // (optional) asynchronous tensor data access void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); bool (*cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst); // (optional) complete all pending operations (required if the backend supports async operations) void (*synchronize)(ggml_backend_t backend); // (optional) graph plans (not used currently) // compute graph with a plan ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph); void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology void (*graph_plan_update) (ggml_backend_t backend, ggml_backend_graph_plan_t plan, const struct ggml_cgraph * cgraph); // compute the graph with the plan enum ggml_status (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); // compute graph (always async if supported by the backend) enum ggml_status (*graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph); // (optional) event synchronization // record an event on this stream void (*event_record)(ggml_backend_t backend, ggml_backend_event_t event); // wait for an event on on a different stream void (*event_wait) (ggml_backend_t backend, ggml_backend_event_t event); // (optional) sort/optimize the nodes in the graph void (*graph_optimize) (ggml_backend_t backend, struct ggml_cgraph * cgraph); }; struct ggml_backend { ggml_guid_t guid; struct ggml_backend_i iface; ggml_backend_dev_t device; void * context; }; struct ggml_backend_event { struct ggml_backend_device * device; void * context; }; // // Backend device // // Note: if additional properties are needed, we should add a struct with all of them // the current functions to obtain the properties can remain, since they are more convenient for often used properties struct ggml_backend_device_i { // device name: short identifier for this device, such as "CPU" or "CUDA0" const char * (*get_name)(ggml_backend_dev_t dev); // device description: short informative description of the device, could be the model name const char * (*get_description)(ggml_backend_dev_t dev); // device memory in bytes void (*get_memory)(ggml_backend_dev_t dev, size_t * free, size_t * total); // device type enum ggml_backend_dev_type (*get_type)(ggml_backend_dev_t dev); // device properties void (*get_props)(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props); // backend (stream) initialization ggml_backend_t (*init_backend)(ggml_backend_dev_t dev, const char * params); // preferred buffer type ggml_backend_buffer_type_t (*get_buffer_type)(ggml_backend_dev_t dev); // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device) ggml_backend_buffer_type_t (*get_host_buffer_type)(ggml_backend_dev_t dev); // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries) ggml_backend_buffer_t (*buffer_from_host_ptr)(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size); // check if the backend can compute an operation bool (*supports_op)(ggml_backend_dev_t dev, const struct ggml_tensor * op); // check if the backend can use tensors allocated in a buffer type bool (*supports_buft)(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft); // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer // these should be expensive operations that may benefit from running on this backend instead of the CPU backend bool (*offload_op)(ggml_backend_dev_t dev, const struct ggml_tensor * op); // (optional) event synchronization ggml_backend_event_t (*event_new) (ggml_backend_dev_t dev); void (*event_free) (ggml_backend_dev_t dev, ggml_backend_event_t event); void (*event_synchronize) (ggml_backend_dev_t dev, ggml_backend_event_t event); }; struct ggml_backend_device { struct ggml_backend_device_i iface; ggml_backend_reg_t reg; void * context; }; // // Backend (reg) // struct ggml_backend_reg_i { const char * (*get_name)(ggml_backend_reg_t reg); // enumerate available devices size_t (*get_device_count)(ggml_backend_reg_t reg); ggml_backend_dev_t (*get_device)(ggml_backend_reg_t reg, size_t index); // (optional) get a pointer to a function in the backend // backends can add custom functions that are not part of the standard ggml-backend interface void * (*get_proc_address)(ggml_backend_reg_t reg, const char * name); }; struct ggml_backend_reg { int api_version; // initialize to GGML_BACKEND_API_VERSION struct ggml_backend_reg_i iface; void * context; }; // Add backend dynamic loading support to the backend // Initialize the backend typedef ggml_backend_reg_t (*ggml_backend_init_t)(void); // Optional: obtain a score for the backend based on the system configuration // Higher scores are preferred, 0 means the backend is not supported in the current system typedef int (*ggml_backend_score_t)(void); #ifdef GGML_BACKEND_DL # ifdef __cplusplus # define GGML_BACKEND_DL_IMPL(reg_fn) \ extern "C" { \ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_init(void); \ } \ ggml_backend_reg_t ggml_backend_init(void) { \ return reg_fn(); \ } # define GGML_BACKEND_DL_SCORE_IMPL(score_fn) \ extern "C" { \ GGML_BACKEND_API int ggml_backend_score(void); \ } \ int ggml_backend_score(void) { \ return score_fn(); \ } # else # define GGML_BACKEND_DL_IMPL(reg_fn) \ GGML_BACKEND_API ggml_backend_reg_t ggml_backend_init(void); \ ggml_backend_reg_t ggml_backend_init(void) { \ return reg_fn(); \ } # define GGML_BACKEND_DL_SCORE_IMPL(score_fn) \ GGML_BACKEND_API int ggml_backend_score(void); \ int ggml_backend_score(void) { \ return score_fn(); \ } # endif #else # define GGML_BACKEND_DL_IMPL(reg_fn) # define GGML_BACKEND_DL_SCORE_IMPL(score_fn) #endif #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-backend-reg.cpp000066400000000000000000000445701512524704700207570ustar00rootroot00000000000000#include "ggml-backend-impl.h" #include "ggml-backend.h" #include "ggml-impl.h" #include #include #include #include #include #include #include #include #ifdef _WIN32 # define WIN32_LEAN_AND_MEAN # ifndef NOMINMAX # define NOMINMAX # endif # include #elif defined(__APPLE__) # include # include #else # include # include #endif // Backend registry #ifdef GGML_USE_CPU #include "ggml-cpu.h" #endif #ifdef GGML_USE_CUDA #include "ggml-cuda.h" #endif #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif #ifdef GGML_USE_SYCL #include "ggml-sycl.h" #endif #ifdef GGML_USE_VULKAN #include "ggml-vulkan.h" #endif #ifdef GGML_USE_WEBGPU #include "ggml-webgpu.h" #endif #ifdef GGML_USE_ZDNN #include "ggml-zdnn.h" #endif #ifdef GGML_USE_OPENCL #include "ggml-opencl.h" #endif #ifdef GGML_USE_HEXAGON #include "ggml-hexagon.h" #endif #ifdef GGML_USE_BLAS #include "ggml-blas.h" #endif #ifdef GGML_USE_RPC #include "ggml-rpc.h" #endif #ifdef GGML_USE_CANN #include "ggml-cann.h" #endif #ifdef GGML_USE_ZENDNN #include "ggml-zendnn.h" #endif // disable C++17 deprecation warning for std::codecvt_utf8 #if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" #elif defined(__GNUC__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif namespace fs = std::filesystem; static std::string path_str(const fs::path & path) { std::string u8path; try { #if defined(__cpp_lib_char8_t) // C++20 and later: u8string() returns std::u8string std::u8string u8str = path.u8string(); u8path = std::string(reinterpret_cast(u8str.c_str())); #else // C++17: u8string() returns std::string u8path = path.u8string(); #endif } catch (...) { } return u8path; } #if defined(__clang__) # pragma clang diagnostic pop #elif defined(__GNUC__) # pragma GCC diagnostic pop #endif #ifdef _WIN32 using dl_handle = std::remove_pointer_t; struct dl_handle_deleter { void operator()(HMODULE handle) { FreeLibrary(handle); } }; static dl_handle * dl_load_library(const fs::path & path) { // suppress error dialogs for missing DLLs DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); HMODULE handle = LoadLibraryW(path.wstring().c_str()); SetErrorMode(old_mode); return handle; } static void * dl_get_sym(dl_handle * handle, const char * name) { DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); void * p = (void *) GetProcAddress(handle, name); SetErrorMode(old_mode); return p; } static const char * dl_error() { return ""; } #else using dl_handle = void; struct dl_handle_deleter { void operator()(void * handle) { dlclose(handle); } }; static void * dl_load_library(const fs::path & path) { dl_handle * handle = dlopen(path.string().c_str(), RTLD_NOW | RTLD_LOCAL); return handle; } static void * dl_get_sym(dl_handle * handle, const char * name) { return dlsym(handle, name); } static const char * dl_error() { const char *rslt = dlerror(); return rslt != nullptr ? rslt : ""; } #endif using dl_handle_ptr = std::unique_ptr; struct ggml_backend_reg_entry { ggml_backend_reg_t reg; dl_handle_ptr handle; }; struct ggml_backend_registry { std::vector backends; std::vector devices; ggml_backend_registry() { #ifdef GGML_USE_CUDA register_backend(ggml_backend_cuda_reg()); #endif #ifdef GGML_USE_METAL register_backend(ggml_backend_metal_reg()); #endif #ifdef GGML_USE_SYCL register_backend(ggml_backend_sycl_reg()); #endif #ifdef GGML_USE_VULKAN register_backend(ggml_backend_vk_reg()); #endif #ifdef GGML_USE_WEBGPU register_backend(ggml_backend_webgpu_reg()); #endif #ifdef GGML_USE_ZDNN register_backend(ggml_backend_zdnn_reg()); #endif #ifdef GGML_USE_OPENCL register_backend(ggml_backend_opencl_reg()); #endif #ifdef GGML_USE_ZENDNN register_backend(ggml_backend_zendnn_reg()); #endif #ifdef GGML_USE_HEXAGON register_backend(ggml_backend_hexagon_reg()); #endif #ifdef GGML_USE_CANN register_backend(ggml_backend_cann_reg()); #endif #ifdef GGML_USE_BLAS register_backend(ggml_backend_blas_reg()); #endif #ifdef GGML_USE_RPC register_backend(ggml_backend_rpc_reg()); #endif #ifdef GGML_USE_CPU register_backend(ggml_backend_cpu_reg()); #endif } ~ggml_backend_registry() { // FIXME: backends cannot be safely unloaded without a function to destroy all the backend resources, // since backend threads may still be running and accessing resources from the dynamic library for (auto & entry : backends) { if (entry.handle) { entry.handle.release(); // NOLINT } } } void register_backend(ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) { if (!reg) { return; } #ifndef NDEBUG GGML_LOG_DEBUG("%s: registered backend %s (%zu devices)\n", __func__, ggml_backend_reg_name(reg), ggml_backend_reg_dev_count(reg)); #endif backends.push_back({ reg, std::move(handle) }); for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { register_device(ggml_backend_reg_dev_get(reg, i)); } } void register_device(ggml_backend_dev_t device) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); #endif devices.push_back(device); } ggml_backend_reg_t load_backend(const fs::path & path, bool silent) { dl_handle_ptr handle { dl_load_library(path) }; if (!handle) { if (!silent) { GGML_LOG_ERROR("%s: failed to load %s: %s\n", __func__, path_str(path).c_str(), dl_error()); } return nullptr; } auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); if (score_fn && score_fn() == 0) { if (!silent) { GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, path_str(path).c_str()); } return nullptr; } auto backend_init_fn = (ggml_backend_init_t) dl_get_sym(handle.get(), "ggml_backend_init"); if (!backend_init_fn) { if (!silent) { GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, path_str(path).c_str()); } return nullptr; } ggml_backend_reg_t reg = backend_init_fn(); if (!reg || reg->api_version != GGML_BACKEND_API_VERSION) { if (!silent) { if (!reg) { GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, path_str(path).c_str()); } else { GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n", __func__, path_str(path).c_str(), reg->api_version, GGML_BACKEND_API_VERSION); } } return nullptr; } GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), path_str(path).c_str()); register_backend(reg, std::move(handle)); return reg; } void unload_backend(ggml_backend_reg_t reg, bool silent) { auto it = std::find_if(backends.begin(), backends.end(), [reg](const ggml_backend_reg_entry & entry) { return entry.reg == reg; }); if (it == backends.end()) { if (!silent) { GGML_LOG_ERROR("%s: backend not found\n", __func__); } return; } if (!silent) { GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, ggml_backend_reg_name(reg)); } // remove devices devices.erase( std::remove_if(devices.begin(), devices.end(), [reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }), devices.end()); // remove backend backends.erase(it); } }; static ggml_backend_registry & get_reg() { static ggml_backend_registry reg; return reg; } // Internal API void ggml_backend_register(ggml_backend_reg_t reg) { get_reg().register_backend(reg); } void ggml_backend_device_register(ggml_backend_dev_t device) { get_reg().register_device(device); } // Backend (reg) enumeration static bool striequals(const char * a, const char * b) { for (; *a && *b; a++, b++) { if (std::tolower(*a) != std::tolower(*b)) { return false; } } return *a == *b; } size_t ggml_backend_reg_count() { return get_reg().backends.size(); } ggml_backend_reg_t ggml_backend_reg_get(size_t index) { GGML_ASSERT(index < ggml_backend_reg_count()); return get_reg().backends[index].reg; } ggml_backend_reg_t ggml_backend_reg_by_name(const char * name) { for (size_t i = 0; i < ggml_backend_reg_count(); i++) { ggml_backend_reg_t reg = ggml_backend_reg_get(i); if (striequals(ggml_backend_reg_name(reg), name)) { return reg; } } return nullptr; } // Device enumeration size_t ggml_backend_dev_count() { return get_reg().devices.size(); } ggml_backend_dev_t ggml_backend_dev_get(size_t index) { GGML_ASSERT(index < ggml_backend_dev_count()); return get_reg().devices[index]; } ggml_backend_dev_t ggml_backend_dev_by_name(const char * name) { for (size_t i = 0; i < ggml_backend_dev_count(); i++) { ggml_backend_dev_t dev = ggml_backend_dev_get(i); if (striequals(ggml_backend_dev_name(dev), name)) { return dev; } } return nullptr; } ggml_backend_dev_t ggml_backend_dev_by_type(enum ggml_backend_dev_type type) { for (size_t i = 0; i < ggml_backend_dev_count(); i++) { ggml_backend_dev_t dev = ggml_backend_dev_get(i); if (ggml_backend_dev_type(dev) == type) { return dev; } } return nullptr; } // Convenience functions ggml_backend_t ggml_backend_init_by_name(const char * name, const char * params) { ggml_backend_dev_t dev = ggml_backend_dev_by_name(name); if (!dev) { return nullptr; } return ggml_backend_dev_init(dev, params); } ggml_backend_t ggml_backend_init_by_type(enum ggml_backend_dev_type type, const char * params) { ggml_backend_dev_t dev = ggml_backend_dev_by_type(type); if (!dev) { return nullptr; } return ggml_backend_dev_init(dev, params); } ggml_backend_t ggml_backend_init_best(void) { ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); if (!dev) { return nullptr; } return ggml_backend_dev_init(dev, nullptr); } // Dynamic loading ggml_backend_reg_t ggml_backend_load(const char * path) { return get_reg().load_backend(path, false); } void ggml_backend_unload(ggml_backend_reg_t reg) { get_reg().unload_backend(reg, true); } static fs::path get_executable_path() { #if defined(__APPLE__) // get executable path std::vector path; uint32_t size; while (true) { size = path.size(); if (_NSGetExecutablePath(path.data(), &size) == 0) { break; } path.resize(size); } std::string base_path(path.data(), size); // remove executable name auto last_slash = base_path.find_last_of('/'); if (last_slash != std::string::npos) { base_path = base_path.substr(0, last_slash); } return base_path + "/"; #elif defined(__linux__) || defined(__FreeBSD__) std::string base_path = "."; std::vector path(1024); while (true) { // get executable path # if defined(__linux__) ssize_t len = readlink("/proc/self/exe", path.data(), path.size()); # elif defined(__FreeBSD__) ssize_t len = readlink("/proc/curproc/file", path.data(), path.size()); # endif if (len == -1) { break; } if (len < (ssize_t) path.size()) { base_path = std::string(path.data(), len); // remove executable name auto last_slash = base_path.find_last_of('/'); if (last_slash != std::string::npos) { base_path = base_path.substr(0, last_slash); } break; } path.resize(path.size() * 2); } return base_path + "/"; #elif defined(_WIN32) std::vector path(MAX_PATH); DWORD len = GetModuleFileNameW(NULL, path.data(), path.size()); if (len == 0) { return {}; } std::wstring base_path(path.data(), len); // remove executable name auto last_slash = base_path.find_last_of('\\'); if (last_slash != std::string::npos) { base_path = base_path.substr(0, last_slash); } return base_path + L"\\"; #else return {}; #endif } static fs::path backend_filename_prefix() { #ifdef _WIN32 return fs::u8path("ggml-"); #else return fs::u8path("libggml-"); #endif } static fs::path backend_filename_extension() { #ifdef _WIN32 return fs::u8path(".dll"); #else return fs::u8path(".so"); #endif } static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) { // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths const fs::path name_path = fs::u8path(name); const fs::path file_prefix = backend_filename_prefix().native() + name_path.native() + fs::u8path("-").native(); const fs::path file_extension = backend_filename_extension(); std::vector search_paths; if (user_search_path == nullptr) { #ifdef GGML_BACKEND_DIR search_paths.push_back(fs::u8path(GGML_BACKEND_DIR)); #endif // default search paths: executable directory, current directory search_paths.push_back(get_executable_path()); search_paths.push_back(fs::current_path()); } else { search_paths.push_back(fs::u8path(user_search_path)); } int best_score = 0; fs::path best_path; for (const auto & search_path : search_paths) { if (std::error_code ec; !fs::exists(search_path, ec)) { if (ec) { GGML_LOG_DEBUG("%s: posix_stat(%s) failure, error-message: %s\n", __func__, path_str(search_path).c_str(), ec.message().c_str()); } else { GGML_LOG_DEBUG("%s: search path %s does not exist\n", __func__, path_str(search_path).c_str()); } continue; } fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied); for (const auto & entry : dir_it) { if (entry.is_regular_file()) { auto filename = entry.path().filename(); auto ext = entry.path().extension(); if (filename.native().find(file_prefix) == 0 && ext == file_extension) { dl_handle_ptr handle { dl_load_library(entry) }; if (!handle && !silent) { GGML_LOG_ERROR("%s: failed to load %s: %s\n", __func__, path_str(entry.path()).c_str(), dl_error()); } if (handle) { auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); if (score_fn) { int s = score_fn(); #ifndef NDEBUG GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, path_str(entry.path()).c_str(), s); #endif if (s > best_score) { best_score = s; best_path = entry.path(); } } else { if (!silent) { GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, path_str(entry.path()).c_str()); } } } } } } } if (best_score == 0) { // try to load the base backend for (const auto & search_path : search_paths) { fs::path filename = backend_filename_prefix().native() + name_path.native() + backend_filename_extension().native(); fs::path path = search_path / filename; if (std::error_code ec; fs::exists(path, ec)) { return get_reg().load_backend(path, silent); } else { if (ec) { GGML_LOG_DEBUG("%s: posix_stat(%s) failure, error-message: %s\n", __func__, path_str(path).c_str(), ec.message().c_str()); } } } return nullptr; } return get_reg().load_backend(best_path, silent); } void ggml_backend_load_all() { ggml_backend_load_all_from_path(nullptr); } void ggml_backend_load_all_from_path(const char * dir_path) { #ifdef NDEBUG bool silent = true; #else bool silent = false; #endif ggml_backend_load_best("blas", silent, dir_path); ggml_backend_load_best("zendnn", silent, dir_path); ggml_backend_load_best("cann", silent, dir_path); ggml_backend_load_best("cuda", silent, dir_path); ggml_backend_load_best("hip", silent, dir_path); ggml_backend_load_best("metal", silent, dir_path); ggml_backend_load_best("rpc", silent, dir_path); ggml_backend_load_best("sycl", silent, dir_path); ggml_backend_load_best("vulkan", silent, dir_path); ggml_backend_load_best("opencl", silent, dir_path); ggml_backend_load_best("hexagon", silent, dir_path); ggml_backend_load_best("musa", silent, dir_path); ggml_backend_load_best("cpu", silent, dir_path); // check the environment variable GGML_BACKEND_PATH to load an out-of-tree backend const char * backend_path = std::getenv("GGML_BACKEND_PATH"); if (backend_path) { ggml_backend_load(backend_path); } } ggml-org-ggml-3678254/src/ggml-backend.cpp000066400000000000000000002553551512524704700202110ustar00rootroot00000000000000// Note: porting this file to C++ is a work in progress #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN #ifndef NOMINMAX # define NOMINMAX #endif #include #endif #include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-alloc.h" #include "ggml-impl.h" #include #include #include #include #include #include #include #include #ifdef __APPLE__ #include #include #endif // backend buffer type const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { GGML_ASSERT(buft); return buft->iface.get_name(buft); } ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { GGML_ASSERT(buft); if (size == 0) { // return a dummy buffer for zero-sized allocations return ggml_backend_buffer_init(buft, {}, NULL, 0); } return buft->iface.alloc_buffer(buft, size); } size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) { GGML_ASSERT(buft); return buft->iface.get_alignment(buft); } size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) { GGML_ASSERT(buft); // get_max_size is optional, defaults to SIZE_MAX if (buft->iface.get_max_size) { return buft->iface.get_max_size(buft); } return SIZE_MAX; } size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { GGML_ASSERT(buft); // get_alloc_size is optional, defaults to ggml_nbytes if (buft->iface.get_alloc_size) { size_t size = buft->iface.get_alloc_size(buft, tensor); assert(size >= ggml_nbytes(tensor)); return size; } return ggml_nbytes(tensor); } bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { GGML_ASSERT(buft); if (buft->iface.is_host) { return buft->iface.is_host(buft); } return false; } ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) { GGML_ASSERT(buft); return buft->device; } // backend buffer ggml_backend_buffer_t ggml_backend_buffer_init( ggml_backend_buffer_type_t buft, struct ggml_backend_buffer_i iface, void * context, size_t size) { ggml_backend_buffer_t buffer = new ggml_backend_buffer { /* .interface = */ iface, /* .buft = */ buft, /* .context = */ context, /* .size = */ size, /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY }; return buffer; } const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer)); } void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer == NULL) { return; } if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } delete buffer; } size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); return buffer->size; } void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); // get_base is optional if the buffer is zero-sized if (buffer->size == 0) { return NULL; } // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional, // I don't know whether the above comment is correct if (!buffer->iface.get_base) { return NULL; } void * base = buffer->iface.get_base(buffer); GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); return base; } enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { GGML_ASSERT(buffer); // init_tensor is optional if (buffer->iface.init_tensor) { return buffer->iface.init_tensor(buffer, tensor); } return GGML_STATUS_SUCCESS; } void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { GGML_ASSERT(buffer); // clear is optional if the buffer is zero-sized if (buffer->size == 0) { return; } buffer->iface.clear(buffer, value); } size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer)); } size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) { return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer)); } size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) { return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor); } bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer)); } void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { GGML_ASSERT(buffer); buffer->usage = usage; // FIXME: add a generic callback to the buffer interface if (ggml_backend_buffer_is_multi_buffer(buffer)) { ggml_backend_multi_buffer_set_usage(buffer, usage); } } enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); return buffer->usage; } ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); return buffer->buft; } void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); if (buffer->iface.reset) { buffer->iface.reset(buffer); } } bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) { ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer; if (dst_buf->iface.cpy_tensor) { return dst_buf->iface.cpy_tensor(dst_buf, src, dst); } return false; } // backend ggml_guid_t ggml_backend_guid(ggml_backend_t backend) { if (backend == NULL) { return NULL; } return backend->guid; } const char * ggml_backend_name(ggml_backend_t backend) { if (backend == NULL) { return "NULL"; } return backend->iface.get_name(backend); } void ggml_backend_free(ggml_backend_t backend) { if (backend == NULL) { return; } backend->iface.free(backend); } ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) { GGML_ASSERT(backend); return ggml_backend_dev_buffer_type(backend->device); } ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) { return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size); } size_t ggml_backend_get_alignment(ggml_backend_t backend) { return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend)); } size_t ggml_backend_get_max_size(ggml_backend_t backend) { return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend)); } void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(backend); GGML_ASSERT(tensor); GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); if (backend->iface.set_tensor_async == NULL) { ggml_backend_tensor_set(tensor, data, offset, size); } else { backend->iface.set_tensor_async(backend, tensor, data, offset, size); } } void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(backend); GGML_ASSERT(tensor); GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); if (backend->iface.get_tensor_async == NULL) { ggml_backend_tensor_get(tensor, data, offset, size); } else { backend->iface.get_tensor_async(backend, tensor, data, offset, size); } } void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(tensor); ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (size == 0) { return; } GGML_ASSERT(buf != NULL && "tensor buffer not set"); GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); buf->iface.set_tensor(buf, tensor, data, offset, size); } void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(tensor); ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (size == 0) { return; } GGML_ASSERT(buf != NULL && "tensor buffer not set"); GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); buf->iface.get_tensor(buf, tensor, data, offset, size); } void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { GGML_ASSERT(tensor); ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (size == 0) { return; } GGML_ASSERT(buf != NULL && "tensor buffer not set"); GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer"); buf->iface.memset_tensor(buf, tensor, value, offset, size); } void ggml_backend_synchronize(ggml_backend_t backend) { GGML_ASSERT(backend); if (backend->iface.synchronize == NULL) { return; } backend->iface.synchronize(backend); } ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { GGML_ASSERT(backend); GGML_ASSERT(backend->iface.graph_plan_create != NULL); return backend->iface.graph_plan_create(backend, cgraph); } void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { GGML_ASSERT(backend); GGML_ASSERT(backend->iface.graph_plan_free != NULL); backend->iface.graph_plan_free(backend, plan); } enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { GGML_ASSERT(backend); GGML_ASSERT(backend->iface.graph_plan_compute != NULL); return backend->iface.graph_plan_compute(backend, plan); } enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph); ggml_backend_synchronize(backend); return err; } enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) { GGML_ASSERT(backend); return backend->iface.graph_compute(backend, cgraph); } bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { GGML_ASSERT(backend); return ggml_backend_dev_supports_op(backend->device, op); } bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) { GGML_ASSERT(backend); return ggml_backend_dev_supports_buft(backend->device, buft); } bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) { GGML_ASSERT(backend); return ggml_backend_dev_offload_op(backend->device, op); } ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) { GGML_ASSERT(backend); return backend->device; } // backend copy void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); if (src == dst) { return; } if (ggml_backend_buffer_is_host(src->buffer)) { ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); } else if (ggml_backend_buffer_is_host(dst->buffer)) { ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); } else if (!ggml_backend_buffer_copy_tensor(src, dst)) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer)); #endif size_t nbytes = ggml_nbytes(src); void * data = malloc(nbytes); ggml_backend_tensor_get(src, data, 0, nbytes); ggml_backend_tensor_set(dst, data, 0, nbytes); free(data); } } void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) { GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); if (src == dst) { return; } GGML_ASSERT(backend_dst); if (backend_dst->iface.cpy_tensor_async != NULL) { if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) { return; } } // an async copy would normally happen after all the queued operations on both backends are completed // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy ggml_backend_synchronize(backend_src); ggml_backend_synchronize(backend_dst); ggml_backend_tensor_copy(src, dst); } // events ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) { // null device is allowed for the transition period to the device interface if (device == NULL || device->iface.event_new == NULL) { return NULL; } return device->iface.event_new(device); } void ggml_backend_event_free(ggml_backend_event_t event) { if (event == NULL) { return; } event->device->iface.event_free(event->device, event); } void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) { GGML_ASSERT(backend); GGML_ASSERT(backend->iface.event_record != NULL); backend->iface.event_record(backend, event); } void ggml_backend_event_synchronize(ggml_backend_event_t event) { GGML_ASSERT(event); GGML_ASSERT(event->device->iface.event_synchronize); event->device->iface.event_synchronize(event->device, event); } void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { GGML_ASSERT(backend); GGML_ASSERT(backend->iface.event_wait != NULL); backend->iface.event_wait(backend, event); } static void ggml_backend_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) { GGML_ASSERT(backend); if (backend->iface.graph_optimize != NULL) { backend->iface.graph_optimize(backend, cgraph); } } // Backend device const char * ggml_backend_dev_name(ggml_backend_dev_t device) { GGML_ASSERT(device); return device->iface.get_name(device); } const char * ggml_backend_dev_description(ggml_backend_dev_t device) { GGML_ASSERT(device); return device->iface.get_description(device); } void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) { GGML_ASSERT(device); device->iface.get_memory(device, free, total); } enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) { GGML_ASSERT(device); return device->iface.get_type(device); } void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) { memset(props, 0, sizeof(*props)); device->iface.get_props(device, props); } ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) { GGML_ASSERT(device); return device->reg; } ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) { GGML_ASSERT(device); return device->iface.init_backend(device, params); } ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) { GGML_ASSERT(device); return device->iface.get_buffer_type(device); } ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) { GGML_ASSERT(device); if (device->iface.get_host_buffer_type == NULL) { return NULL; } return device->iface.get_host_buffer_type(device); } ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) { GGML_ASSERT(device); return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size); } bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { GGML_ASSERT(device); return device->iface.supports_op(device, op); } bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) { GGML_ASSERT(device); return device->iface.supports_buft(device, buft); } bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { GGML_ASSERT(device); if (device->iface.offload_op != NULL) { return device->iface.offload_op(device, op); } return false; } // Backend (reg) const char * ggml_backend_reg_name(ggml_backend_reg_t reg) { GGML_ASSERT(reg); return reg->iface.get_name(reg); } size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) { GGML_ASSERT(reg); return reg->iface.get_device_count(reg); } ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(reg); return reg->iface.get_device(reg, index); } void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { GGML_ASSERT(reg); if (!reg->iface.get_proc_address) { return NULL; } return reg->iface.get_proc_address(reg, name); } // multi-buffer buffer struct ggml_backend_multi_buffer_context { ggml_backend_buffer_t * buffers; size_t n_buffers; }; static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; for (size_t i = 0; i < ctx->n_buffers; i++) { ggml_backend_buffer_free(ctx->buffers[i]); } free(ctx->buffers); free(ctx); } static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { GGML_ASSERT(buffer); ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; for (size_t i = 0; i < ctx->n_buffers; i++) { ggml_backend_buffer_clear(ctx->buffers[i], value); } } static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = { /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer, /* .get_base = */ NULL, /* .init_tensor = */ NULL, /* .memset_tensor = */ NULL, /* .set_tensor = */ NULL, /* .get_tensor = */ NULL, /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_multi_buffer_clear, /* .reset = */ NULL, }; ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) { ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context)); ctx->n_buffers = n_buffers; ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t)); GGML_ASSERT(ctx->buffers != NULL); size_t total_size = 0; for (size_t i = 0; i < n_buffers; i++) { ctx->buffers[i] = buffers[i]; total_size += ggml_backend_buffer_get_size(buffers[i]); } return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size); } bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer; } void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { GGML_ASSERT(buffer); GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer)); ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; for (size_t i = 0; i < ctx->n_buffers; i++) { ggml_backend_buffer_set_usage(ctx->buffers[i], usage); } } // creates a copy of the tensor with the same memory layout static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) { struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor); for (int i = 0; i < GGML_MAX_DIMS; i++) { dup->nb[i] = tensor->nb[i]; } return dup; } static bool ggml_is_view_op(enum ggml_op op) { return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE; } // scheduler #ifndef GGML_SCHED_MAX_BACKENDS #define GGML_SCHED_MAX_BACKENDS 16 #endif #ifndef GGML_SCHED_MAX_SPLIT_INPUTS #define GGML_SCHED_MAX_SPLIT_INPUTS 30 #endif #ifndef GGML_SCHED_MAX_COPIES #define GGML_SCHED_MAX_COPIES 4 #endif struct ggml_backend_sched_split { int backend_id; int i_start; int i_end; struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; int n_inputs; // graph view of this split struct ggml_cgraph graph; }; struct ggml_backend_sched { bool is_reset; // true if the scheduler has been reset since the last graph split bool is_alloc; int n_backends; ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS]; ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS]; ggml_gallocr_t galloc; // hash map of the nodes in the graph struct ggml_hash_set hash_set; int * hv_tensor_backend_ids; // [hash_set.size] struct ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies] int * node_backend_ids; // [graph_size] int * leaf_backend_ids; // [graph_size] int * prev_node_backend_ids; // [graph_size] int * prev_leaf_backend_ids; // [graph_size] // copy of the graph with modified inputs struct ggml_cgraph graph; // graph splits struct ggml_backend_sched_split * splits; int n_splits; int splits_capacity; // pipeline parallelism support int n_copies; int cur_copy; int next_copy; ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES]; struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; int n_graph_inputs; struct ggml_context * ctx; ggml_backend_sched_eval_callback callback_eval; void * callback_eval_user_data; char * context_buffer; size_t context_buffer_size; bool op_offload; int debug; // used for debugging graph reallocations [GGML_SCHED_DEBUG_REALLOC] // ref: https://github.com/ggml-org/llama.cpp/pull/17617 int debug_realloc; int debug_graph_size; int debug_prev_graph_size; }; #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)] #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)] #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id) // returns the priority of the backend, lower id is higher priority static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) { for (int i = 0; i < sched->n_backends; i++) { if (sched->backends[i] == backend) { return i; } } return -1; } static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) { ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (buffer == NULL) { return -1; } // find highest prio backend that supports the buffer type and the op for (int i = 0; i < sched->n_backends; i++) { if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) && ggml_backend_supports_op(sched->backends[i], op)) { return i; } } #ifndef NDEBUG GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n", __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name); #endif return -1; } #if 0 #define GGML_SCHED_MAX_SPLITS_DEBUG 4096 static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) #define GET_CAUSE(node) causes[hash_id(node)] #else #define SET_CAUSE(node, ...) #define GET_CAUSE(node) "" #endif // returns the backend that should be used for the node based on the current locations static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) { // assign pre-allocated nodes to their backend int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor); if (cur_backend_id != -1) { SET_CAUSE(tensor, "1.dst"); return cur_backend_id; } // view_src if (tensor->view_src != NULL) { cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor); if (cur_backend_id != -1) { SET_CAUSE(tensor, "1.vsrc"); return cur_backend_id; } } if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) { // since the tensor is pre-allocated, it cannot be moved to another backend ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op)); } // graph input if (tensor->flags & GGML_TENSOR_FLAG_INPUT) { cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU) SET_CAUSE(tensor, "1.inp"); return cur_backend_id; } // operations with weights are preferably run on the same backend as the weights for (int i = 0; i < GGML_MAX_SRC; i++) { const struct ggml_tensor * src = tensor->src[i]; if (src == NULL) { continue; } // skip ROPE since the rope freqs tensor is too small to choose a backend based on it // not an ideal solution if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor); // check if a backend with higher prio wants to offload the op if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) { for (int b = 0; b < src_backend_id; b++) { if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) { SET_CAUSE(tensor, "1.off"); return b; } } } SET_CAUSE(tensor, "1.wgt%d", i); return src_backend_id; } } return -1; } static char * fmt_size(size_t size) { static char buffer[128]; if (size >= 1024*1024) { snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024); } else { snprintf(buffer, sizeof(buffer), "%zuK", size/1024); } return buffer; } static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { int cur_split = 0; for (int i = 0; i < graph->n_nodes; i++) { if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) { ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id]; GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend), sched->splits[cur_split].n_inputs); for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) { if (j == 0) { GGML_LOG_DEBUG(": "); } GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j]))); } GGML_LOG_DEBUG("\n"); cur_split++; } struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } if (sched->debug > 1) { ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node), graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)]); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src); GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src)); } GGML_LOG_DEBUG("\n"); } } } static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) { ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer; ggml_backend_buffer_type_t buft = NULL; if (buf) { // the tensor is already allocated buft = buf->buft; } else { // see if the tensor already has a backend assigned, and use the buffer type of that backend int tensor_backend_id = tensor_backend_id(t); if (tensor_backend_id == -1 && t->view_src) { tensor_backend_id = tensor_backend_id(t->view_src); } if (tensor_backend_id != -1) { buft = sched->bufts[tensor_backend_id]; } } return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft); } static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) { if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) { *node_backend_id = cur_backend_id; SET_CAUSE(node, "2.sup"); } } // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { // reset splits sched->n_splits = 0; sched->n_graph_inputs = 0; sched->is_reset = false; struct ggml_init_params params = { /* .mem_size = */ sched->context_buffer_size, /* .mem_buffer = */ sched->context_buffer, /* .no_alloc = */ true }; ggml_free(sched->ctx); sched->ctx = ggml_init(params); if (sched->ctx == NULL) { GGML_ABORT("%s: failed to initialize context\n", __func__); } // pass 1: assign backends to ops with pre-allocated inputs for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; int * leaf_backend_id = &tensor_backend_id(leaf); // do not overwrite user assignments if (*leaf_backend_id == -1) { *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf); } } for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; int * node_backend_id = &tensor_backend_id(node); // do not overwrite user assignments if (*node_backend_id == -1) { *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node); #if 0 // src if (node->op == GGML_OP_NONE) { continue; } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } int * src_backend_id = &tensor_backend_id(src); if (*src_backend_id == -1) { *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src); } } #endif } } // pass 2: expand current backend assignments // assign the same backend to adjacent nodes // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend) // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known // expand gpu down { int cur_backend_id = -1; for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } int * node_backend_id = &tensor_backend_id(node); if (*node_backend_id != -1) { if (*node_backend_id == sched->n_backends - 1) { // skip cpu (lowest prio backend) cur_backend_id = -1; } else { cur_backend_id = *node_backend_id; } } else if (cur_backend_id != -1) { ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); } } } // expand gpu up { int cur_backend_id = -1; for (int i = graph->n_nodes - 1; i >= 0; i--) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } int * node_backend_id = &tensor_backend_id(node); if (*node_backend_id != -1) { if (*node_backend_id == sched->n_backends - 1) { // skip cpu (lowest prio backend) cur_backend_id = -1; } else { cur_backend_id = *node_backend_id; } } else if (cur_backend_id != -1) { ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); } } } // expand rest down { int cur_backend_id = -1; for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } int * node_backend_id = &tensor_backend_id(node); if (*node_backend_id != -1) { cur_backend_id = *node_backend_id; } else if (cur_backend_id != -1) { ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); } } } // expand rest up { int cur_backend_id = -1; for (int i = graph->n_nodes - 1; i >= 0; i--) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } int * node_backend_id = &tensor_backend_id(node); if (*node_backend_id != -1) { cur_backend_id = *node_backend_id; } else if (cur_backend_id != -1) { ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); } } } // pass 3: upgrade nodes to higher prio backends with compatible buffer types // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there // however, we also need to verify that the sources are in compatible buffer types // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU) // additionally, set remaining unassigned nodes to the backend with the most supported inputs // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } int * node_backend_id = &tensor_backend_id(node); if (*node_backend_id == -1) { // unassigned node: find the backend with the most supported inputs int n_supported_best = -1; for (int b = 0; b < sched->n_backends; b++) { if (ggml_backend_supports_op(sched->backends[b], node)) { int n_supported = 0; for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) { n_supported++; } } if (n_supported > n_supported_best) { n_supported_best = n_supported; *node_backend_id = b; SET_CAUSE(node, "3.best"); } } } } else { // assigned node: upgrade to higher prio backend if possible for (int b = 0; b < *node_backend_id; b++) { if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) { bool supported = true; for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } if (!ggml_backend_sched_buffer_supported(sched, src, b)) { supported = false; break; } } if (supported) { *node_backend_id = b; SET_CAUSE(node, "3.upg"); break; } } } } } // pass 4: assign backends to remaining src from dst and view_src for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; int * cur_backend_id = &tensor_backend_id(node); if (node->view_src != NULL && *cur_backend_id == -1) { *cur_backend_id = tensor_backend_id(node->view_src); SET_CAUSE(node, "4.vsrc"); } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } int * src_backend_id = &tensor_backend_id(src); if (*src_backend_id == -1) { if (src->view_src != NULL) { // views are always on the same backend as the source *src_backend_id = tensor_backend_id(src->view_src); SET_CAUSE(src, "4.vsrc"); } else { *src_backend_id = *cur_backend_id; SET_CAUSE(src, "4.cur"); } } } // if the node is still unassigned, assign it to the first backend that supports it for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) { ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id); } GGML_ASSERT(*cur_backend_id != -1); } // pass 5: split graph, find tensors that need to be copied { int i_split = 0; struct ggml_backend_sched_split * split = &sched->splits[0]; // find the backend of the first split, skipping view ops int i = 0; for (; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; if (!ggml_is_view_op(node->op)) { split->backend_id = tensor_backend_id(node); break; } } split->i_start = 0; split->n_inputs = 0; int cur_backend_id = split->backend_id; for (; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; if (ggml_is_view_op(node->op)) { continue; } const int node_backend_id = tensor_backend_id(node); GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback // check if we should start a new split based on the sources of the current node bool need_new_split = false; if (node_backend_id == cur_backend_id && split->n_inputs > 0) { for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } // check if a weight is on a different and incompatible backend // by starting a new split, the memory of the previously offloaded weights can be reused if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { int src_backend_id = tensor_backend_id(src); if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { need_new_split = true; break; } } // check if the split has too many inputs // FIXME: count the number of inputs instead of only checking when full if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) { const size_t id = hash_id(src); int src_backend_id = sched->hv_tensor_backend_ids[id]; bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id); if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) { need_new_split = true; break; } } } } if (node_backend_id != cur_backend_id || need_new_split) { split->i_end = i; i_split++; if (i_split >= sched->splits_capacity) { sched->splits_capacity *= 2; sched->splits = (ggml_backend_sched_split *) realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split)); GGML_ASSERT(sched->splits != NULL); } split = &sched->splits[i_split]; split->backend_id = node_backend_id; split->i_start = i; split->n_inputs = 0; cur_backend_id = node_backend_id; } // find inputs that are not on the same backend for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { continue; } size_t src_id = hash_id(src); const int src_backend_id = sched->hv_tensor_backend_ids[src_id]; GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) { if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) { ggml_backend_t backend = sched->backends[src_backend_id]; for (int c = 0; c < sched->n_copies; c++) { struct ggml_tensor * tensor_copy; if (c == sched->cur_copy) { tensor_copy = src; // use the original tensor as the current copy } else { tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); } ggml_set_input(tensor_copy); ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor tensor_id_copy(src_id, src_backend_id, c) = tensor_copy; SET_CAUSE(tensor_copy, "4.cpy"); } int n_graph_inputs = sched->n_graph_inputs++; GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); sched->graph_inputs[n_graph_inputs] = src; } } if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { // create a copy of the input in the split's backend if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) { ggml_backend_t backend = sched->backends[cur_backend_id]; for (int c = 0; c < sched->n_copies; c++) { struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); if (sched->n_copies > 1) { ggml_set_input(tensor_copy); ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor } tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy; SET_CAUSE(tensor_copy, "4.cpy"); } int n_inputs = split->n_inputs++; GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); split->inputs[n_inputs] = src; } node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy); } } } split->i_end = graph->n_nodes; sched->n_splits = i_split + 1; } if (sched->debug) { ggml_backend_sched_print_assignments(sched, graph); } // swap node_backend_ids and leaf _backend_ids with prevs { int * tmp = sched->node_backend_ids; sched->node_backend_ids = sched->prev_node_backend_ids; sched->prev_node_backend_ids = tmp; tmp = sched->leaf_backend_ids; sched->leaf_backend_ids = sched->prev_leaf_backend_ids; sched->prev_leaf_backend_ids = tmp; } int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies; // remember the actual graph_size for performing reallocation checks later [GGML_SCHED_DEBUG_REALLOC] sched->debug_prev_graph_size = sched->debug_graph_size; sched->debug_graph_size = graph_size; if (sched->graph.size < graph_size) { sched->graph.size = graph_size; sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *)); sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *)); GGML_ASSERT(sched->graph.nodes != NULL); GGML_ASSERT(sched->graph.leafs != NULL); } sched->graph.n_nodes = 0; sched->graph.n_leafs = 0; struct ggml_cgraph * graph_copy = &sched->graph; for (int i = 0; i < sched->n_splits; i++) { struct ggml_backend_sched_split * split = &sched->splits[i]; split->graph = ggml_graph_view(graph, split->i_start, split->i_end); // Optimize this split of the graph. This needs to happen before we make graph_copy, // so they are in sync. ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph); // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split for (int j = 0; j < split->n_inputs; j++) { assert(graph_copy->size > (graph_copy->n_nodes + 1)); struct ggml_tensor * input = split->inputs[j]; const size_t input_id = hash_id(input); struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy); // add a dependency to the input source so that it is not freed before the copy is done struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input); input_dep->src[0] = input; sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id]; graph_copy->nodes[graph_copy->n_nodes++] = input_dep; // add a dependency to the input copy so that it is allocated at the start of the split sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id; graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; } for (int j = split->i_start; j < split->i_end; j++) { assert(graph_copy->size > graph_copy->n_nodes); sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]); graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j]; } } if (sched->n_copies > 1) { // add input copies as leafs so that they are allocated first for (int i = 0; i < sched->n_graph_inputs; i++) { struct ggml_tensor * input = sched->graph_inputs[i]; size_t id = hash_id(input); int backend_id = tensor_backend_id(input); for (int c = 0; c < sched->n_copies; c++) { struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; assert(graph_copy->size > graph_copy->n_leafs); graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; } } for (int i = 0; i < sched->n_splits; i++) { struct ggml_backend_sched_split * split = &sched->splits[i]; int backend_id = split->backend_id; for (int j = 0; j < split->n_inputs; j++) { struct ggml_tensor * input = split->inputs[j]; size_t id = hash_id(input); for (int c = 0; c < sched->n_copies; c++) { struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; assert(graph_copy->size > graph_copy->n_leafs); graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; } } } } // add leafs from the original graph for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf); assert(graph_copy->size > graph_copy->n_leafs); graph_copy->leafs[graph_copy->n_leafs++] = leaf; } } static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { bool backend_ids_changed = false; for (int i = 0; i < sched->graph.n_nodes; i++) { if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] && sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) { backend_ids_changed = true; break; } } if (!backend_ids_changed) { for (int i = 0; i < sched->graph.n_leafs; i++) { if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] && sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) { backend_ids_changed = true; break; } } } // allocate graph if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); #endif if (sched->debug_realloc > 0) { // we are interested only in situations where the graph was reallocated even though its size remained the same [GGML_SCHED_DEBUG_REALLOC] // example: https://github.com/ggml-org/llama.cpp/pull/17143 const bool unexpected = !backend_ids_changed && sched->debug_prev_graph_size == sched->debug_graph_size; if (unexpected || sched->debug_realloc > 1) { GGML_ABORT("%s: unexpected graph reallocation (graph size = %d, nodes = %d, leafs = %d), debug_realloc = %d\n", __func__, sched->debug_graph_size, sched->graph.n_nodes, sched->graph.n_leafs, sched->debug_realloc); } } // the re-allocation may cause the split inputs to be moved to a different address // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy for (int i = 0; i < sched->n_backends; i++) { ggml_backend_synchronize(sched->backends[i]); } ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids); if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__); return false; } } return true; } static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) { GGML_ASSERT(sched); struct ggml_backend_sched_split * splits = sched->splits; ggml_tensor * prev_ids_tensor = nullptr; std::vector ids; std::vector used_ids; for (int split_id = 0; split_id < sched->n_splits; split_id++) { struct ggml_backend_sched_split * split = &splits[split_id]; int split_backend_id = split->backend_id; ggml_backend_t split_backend = sched->backends[split_backend_id]; // copy the input tensors to the split backend for (int input_id = 0; input_id < split->n_inputs; input_id++) { ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); struct ggml_tensor * input = split->inputs[input_id]; struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); if (input->flags & GGML_TENSOR_FLAG_INPUT) { // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); } else { ggml_backend_synchronize(split_backend); } ggml_backend_tensor_copy(input, input_cpy); } else { // wait for the split backend to finish using the input before overwriting it if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); } else { ggml_backend_synchronize(split_backend); } // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used ggml_tensor * node = split->graph.nodes[0]; if (split->graph.n_nodes > 0 && ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(input->buffer) && ( (node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID) //|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */ )) { const int64_t n_expert = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1]; const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1]; ggml_backend_synchronize(input_backend); // get the ids ggml_tensor * ids_tensor = node->src[2]; ggml_backend_t ids_backend = split_backend; // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend // in that case, we use the original ids tensor for (int i = input_id + 1; i < split->n_inputs; i++) { if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) { ids_tensor = split->inputs[i]; ids_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]); break; } } if (ids_tensor != prev_ids_tensor) { ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t)); ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor)); ggml_backend_synchronize(ids_backend); // find the used experts used_ids.clear(); used_ids.resize(ggml_bitset_size(n_expert)); for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) { for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) { int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)]; GGML_ASSERT(id >= 0 && id < n_expert); ggml_bitset_set(used_ids.data(), id); } } prev_ids_tensor = ids_tensor; } // group consecutive experts and copy them together auto copy_experts = [&](int32_t first_id, int32_t last_id) { const size_t expert_offset = first_id * expert_size; const size_t expert_size_copy = (last_id - first_id + 1) * expert_size; const size_t padding = std::min(expert_size, 512); const size_t padding_end = last_id < n_expert - 1 ? padding : 0; ggml_backend_tensor_set_async(split_backend, input_cpy, (const uint8_t *)input->data + expert_offset, expert_offset, // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert // this is necessary for MMQ in the CUDA backend expert_size_copy + padding_end); }; int id = 0; while (!ggml_bitset_get(used_ids.data(), id)) { id++; } int32_t first_id = id; int32_t last_id = first_id; for (++id; id < n_expert; ++id) { if (!ggml_bitset_get(used_ids.data(), id)) { continue; } if (id == last_id + 1) { last_id = id; continue; } copy_experts(first_id, last_id); first_id = id; last_id = id; } copy_experts(first_id, last_id); } else { // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) { ggml_backend_synchronize(input_backend); if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); } else { ggml_backend_synchronize(split_backend); } ggml_backend_tensor_copy(input, input_cpy); } } } } if (!sched->callback_eval) { enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph); if (ec != GGML_STATUS_SUCCESS) { return ec; } } else { // similar to ggml_backend_compare_graph_backend for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { struct ggml_tensor * t = split->graph.nodes[j0]; // check if the user needs data from this node bool need = sched->callback_eval(t, true, sched->callback_eval_user_data); int j1 = j0; // determine the range [j0, j1] of nodes that can be computed together while (!need && j1 < split->graph.n_nodes - 1) { t = split->graph.nodes[++j1]; need = sched->callback_eval(t, true, sched->callback_eval_user_data); } struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv); if (ec != GGML_STATUS_SUCCESS) { return ec; } // TODO: pass backend to the callback, then the user can decide if they want to synchronize ggml_backend_synchronize(split_backend); if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) { break; } j0 = j1; } } // record the event of this copy if (split->n_inputs > 0) { if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend); } } } return GGML_STATUS_SUCCESS; } ggml_backend_sched_t ggml_backend_sched_new( ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload) { GGML_ASSERT(n_backends > 0); GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS); GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU); struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched)); const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG"); sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0; sched->debug_realloc = 0; #ifdef GGML_SCHED_NO_REALLOC sched->debug_realloc = 1; #endif const char * GGML_SCHED_DEBUG_REALLOC = getenv("GGML_SCHED_DEBUG_REALLOC"); sched->debug_realloc = GGML_SCHED_DEBUG_REALLOC ? atoi(GGML_SCHED_DEBUG_REALLOC) : sched->debug_realloc; sched->n_backends = n_backends; sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1; // initialize hash table // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead) sched->hash_set = ggml_hash_set_new(graph_size); sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2; sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0])); sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0])); sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0])); sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0])); sched->debug_graph_size = 0; sched->debug_prev_graph_size = 0; sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false); sched->context_buffer = (char *) malloc(sched->context_buffer_size); const int initial_splits_capacity = 16; sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0])); sched->splits_capacity = initial_splits_capacity; for (int b = 0; b < n_backends; b++) { sched->backends[b] = backends[b]; sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]); GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b])); if (sched->n_copies > 1) { for (int c = 0; c < sched->n_copies; c++) { sched->events[b][c] = ggml_backend_event_new(backends[b]->device); } } } sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends); sched->op_offload = op_offload; ggml_backend_sched_reset(sched); return sched; } void ggml_backend_sched_free(ggml_backend_sched_t sched) { if (sched == NULL) { return; } for (int b = 0; b < sched->n_backends; b++) { for (int c = 0; c < sched->n_copies; c++) { ggml_backend_event_free(sched->events[b][c]); } } ggml_gallocr_free(sched->galloc); ggml_free(sched->ctx); ggml_hash_set_free(&sched->hash_set); free(sched->splits); free(sched->hv_tensor_backend_ids); free(sched->hv_tensor_copies); free(sched->node_backend_ids); free(sched->leaf_backend_ids); free(sched->prev_node_backend_ids); free(sched->prev_leaf_backend_ids); free(sched->context_buffer); free(sched->graph.nodes); free(sched->graph.leafs); free(sched); } void ggml_backend_sched_reset(ggml_backend_sched_t sched) { GGML_ASSERT(sched); // reset state for the next run if (!sched->is_reset) { ggml_hash_set_reset(&sched->hash_set); memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); sched->is_reset = true; } sched->is_alloc = false; } void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) { GGML_ASSERT(sched); GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); GGML_ASSERT(sizes); ggml_backend_sched_reset(sched); ggml_backend_sched_synchronize(sched); ggml_backend_sched_split_graph(sched, measure_graph); ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes); } bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { GGML_ASSERT(sched); GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); ggml_backend_sched_synchronize(sched); ggml_backend_sched_split_graph(sched, measure_graph); if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) { return false; } ggml_backend_sched_reset(sched); return true; } bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT(sched); GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs); GGML_ASSERT(!sched->is_alloc); sched->cur_copy = sched->next_copy; sched->next_copy = (sched->next_copy + 1) % sched->n_copies; ggml_backend_sched_split_graph(sched, graph); if (!ggml_backend_sched_alloc_splits(sched)) { return false; } sched->is_alloc = true; return true; } enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph); ggml_backend_sched_synchronize(sched); return err; } enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT(sched); if (!sched->is_reset && !sched->is_alloc) { ggml_backend_sched_reset(sched); } if (!sched->is_alloc) { if (!ggml_backend_sched_alloc_graph(sched, graph)) { return GGML_STATUS_ALLOC_FAILED; } } return ggml_backend_sched_compute_splits(sched); } void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { GGML_ASSERT(sched); for (int i = 0; i < sched->n_backends; i++) { ggml_backend_synchronize(sched->backends[i]); } if (!sched->is_alloc) { // if the graph is not already allocated, always use copy 0 after a synchronization // this ensures that during generation the same copy is used every time, // which avoids changes in the graph that could cause CUDA or other graphs to be disabled sched->next_copy = 0; } } void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { GGML_ASSERT(sched); sched->callback_eval = callback; sched->callback_eval_user_data = user_data; } int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { GGML_ASSERT(sched); return sched->n_splits; } int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) { GGML_ASSERT(sched); return sched->n_copies; } int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) { GGML_ASSERT(sched); return sched->n_backends; } ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) { GGML_ASSERT(sched); GGML_ASSERT(i >= 0 && i < sched->n_backends); return sched->backends[i]; } ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend) { GGML_ASSERT(sched); int backend_index = ggml_backend_sched_backend_id(sched, backend); GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); return sched->bufts[backend_index]; } size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) { GGML_ASSERT(sched); int backend_index = ggml_backend_sched_backend_id(sched, backend); GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); return ggml_gallocr_get_buffer_size(sched->galloc, backend_index); } void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { GGML_ASSERT(sched); int backend_index = ggml_backend_sched_backend_id(sched, backend); GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); tensor_backend_id(node) = backend_index; SET_CAUSE(node, "usr"); sched->is_reset = false; } ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) { GGML_ASSERT(sched); int backend_index = tensor_backend_id(node); if (backend_index == -1) { return NULL; } return sched->backends[backend_index]; } // utils enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) { GGML_ASSERT(tensor); GGML_ASSERT(tensor->buffer == NULL); GGML_ASSERT(tensor->view_src != NULL); GGML_ASSERT(tensor->view_src->buffer != NULL); GGML_ASSERT(tensor->view_src->data != NULL); tensor->buffer = tensor->view_src->buffer; tensor->data = (char *)tensor->view_src->data + tensor->view_offs; return ggml_backend_buffer_init_tensor(tensor->buffer, tensor); } enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) { GGML_ASSERT(tensor); GGML_ASSERT(tensor->buffer == NULL); GGML_ASSERT(tensor->data == NULL); GGML_ASSERT(tensor->view_src == NULL); GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer)); GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <= (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer)); tensor->buffer = buffer; tensor->data = addr; return ggml_backend_buffer_init_tensor(buffer, tensor); } static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) { GGML_ASSERT(src != NULL); GGML_ASSERT(src->data && "graph must be allocated"); size_t id = ggml_hash_insert(&hash_set, src); if (id == GGML_HASHSET_ALREADY_EXISTS) { return node_copies[ggml_hash_find(&hash_set, src)]; } struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src); if (src->view_src != NULL) { dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src); dst->view_offs = src->view_offs; } dst->op = src->op; memcpy(dst->op_params, src->op_params, sizeof(dst->op_params)); ggml_set_name(dst, src->name); // copy src for (int i = 0; i < GGML_MAX_SRC; i++) { struct ggml_tensor * s = src->src[i]; if (s == NULL) { continue; } dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s); } node_copies[id] = dst; return dst; } static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) { size_t id = ggml_hash_find(hash_set, src); if (node_init[id]) { return; } node_init[id] = true; struct ggml_tensor * dst = node_copies[id]; if (dst->view_src != NULL) { graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src); enum ggml_status status = ggml_backend_view_init(dst); GGML_ASSERT(status == GGML_STATUS_SUCCESS); } else { ggml_backend_tensor_copy(src, dst); } // init src for (int i = 0; i < GGML_MAX_SRC; i++) { struct ggml_tensor * s = src->src[i]; if (s == NULL) { continue; } graph_copy_init_tensor(hash_set, node_copies, node_init, s); } } struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) { GGML_ASSERT(graph); struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size); struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0])); struct ggml_init_params params = { /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false), /* .mem_buffer = */ NULL, /* .no_alloc = */ true }; struct ggml_context * ctx_allocated = ggml_init(params); struct ggml_context * ctx_unallocated = ggml_init(params); if (ctx_allocated == NULL || ctx_unallocated == NULL) { GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__); ggml_hash_set_free(&hash_set); free(node_copies); free(node_init); ggml_free(ctx_allocated); ggml_free(ctx_unallocated); return { /* .buffer = */ NULL, /* .ctx_allocated = */ NULL, /* .ctx_unallocated = */ NULL, /* .graph = */ NULL, }; } // dup nodes for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node); } // allocate nodes ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend); if (buffer == NULL) { GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__); ggml_hash_set_free(&hash_set); free(node_copies); free(node_init); ggml_free(ctx_allocated); ggml_free(ctx_unallocated); return { /* .buffer = */ NULL, /* .ctx_allocated = */ NULL, /* .ctx_unallocated = */ NULL, /* .graph = */ NULL, }; } //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024); // copy data and init views for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; graph_copy_init_tensor(&hash_set, node_copies, node_init, node); } // build graph copy struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false); for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)]; graph_copy->nodes[i] = node_copy; } graph_copy->n_nodes = graph->n_nodes; ggml_hash_set_free(&hash_set); free(node_copies); free(node_init); return { /* .buffer = */ buffer, /* .ctx_allocated = */ ctx_allocated, /* .ctx_unallocated = */ ctx_unallocated, /* .graph = */ graph_copy, }; } void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { ggml_backend_buffer_free(copy.buffer); ggml_free(copy.ctx_allocated); ggml_free(copy.ctx_unallocated); } bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node) { struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); if (copy.buffer == NULL) { return false; } struct ggml_cgraph * g1 = graph; struct ggml_cgraph * g2 = copy.graph; assert(g1->n_nodes == g2->n_nodes); if (test_node != nullptr) { // Compute the whole graph and only test the output for a specific tensor ggml_backend_graph_compute(backend1, g1); ggml_backend_graph_compute(backend2, g2); int test_node_idx = -1; for (int i = 0; i < g1->n_nodes; i++) { struct ggml_tensor * t1 = g1->nodes[i]; if (t1 == test_node) { test_node_idx = i; break; } } GGML_ASSERT(test_node_idx != -1); callback(test_node_idx, g1->nodes[test_node_idx], g2->nodes[test_node_idx], user_data); } else { for (int i = 0; i < g1->n_nodes; i++) { struct ggml_tensor * t1 = g1->nodes[i]; struct ggml_tensor * t2 = g2->nodes[i]; assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); ggml_backend_graph_compute(backend1, &g1v); ggml_backend_graph_compute(backend2, &g2v); if (ggml_is_view_op(t1->op)) { continue; } // compare results, calculate rms etc if (!callback(i, t1, t2, user_data)) { break; } } } ggml_backend_graph_copy_free(copy); return true; } // CPU backend - buffer static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); uintptr_t data = (uintptr_t)buffer->context; // align the buffer if (data % TENSOR_ALIGNMENT != 0) { data = GGML_PAD(data, TENSOR_ALIGNMENT); } return (void *)data; } static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { GGML_ASSERT(buffer); ggml_aligned_free(buffer->context, buffer->size); } static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { GGML_ASSERT(tensor); memset((char *)tensor->data + offset, value, size); GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(tensor); memcpy((char *)tensor->data + offset, data, size); GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(tensor); memcpy(data, (const char *)tensor->data + offset, size); GGML_UNUSED(buffer); } static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { GGML_ASSERT(src); if (ggml_backend_buffer_is_host(src->buffer)) { memcpy(dst->data, src->data, ggml_nbytes(src)); return true; } return false; GGML_UNUSED(buffer); } static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { GGML_ASSERT(buffer); memset(buffer->context, value, buffer->size); } static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, /* .get_base = */ ggml_backend_cpu_buffer_get_base, /* .init_tensor = */ NULL, // no initialization required /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, /* .clear = */ ggml_backend_cpu_buffer_clear, /* .reset = */ NULL, }; static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = { /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed /* .get_base = */ ggml_backend_cpu_buffer_get_base, /* .init_tensor = */ NULL, // no initialization required /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, /* .clear = */ ggml_backend_cpu_buffer_clear, /* .reset = */ NULL, }; // CPU backend buffer type // this buffer type is defined here to make it available to all backends static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * data = ggml_aligned_malloc(size); if (data == NULL) { GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); return NULL; } return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size); } static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return TENSOR_ALIGNMENT; GGML_UNUSED(buft); } static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return true; GGML_UNUSED(buft); } ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, }, /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ NULL, }; return &ggml_backend_cpu_buffer_type; } static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU_Mapped"; GGML_UNUSED(buft); } static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_buffer_from_ptr_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, }, /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ NULL, }; return &ggml_backend_cpu_buffer_type; } ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) { GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned"); return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size); } ggml-org-ggml-3678254/src/ggml-blas/000077500000000000000000000000001512524704700170205ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-blas/CMakeLists.txt000066400000000000000000000071571512524704700215720ustar00rootroot00000000000000if (GGML_STATIC) set(BLA_STATIC ON) endif() #if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) # set(BLA_SIZEOF_INTEGER 8) #endif() set(BLA_VENDOR ${GGML_BLAS_VENDOR}) find_package(BLAS) if (BLAS_FOUND) message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") ggml_add_backend_library(ggml-blas ggml-blas.cpp ) if (${GGML_BLAS_VENDOR} MATCHES "Apple") add_compile_definitions(ACCELERATE_NEW_LAPACK) add_compile_definitions(ACCELERATE_LAPACK_ILP64) add_compile_definitions(GGML_BLAS_USE_ACCELERATE) elseif ("${BLAS_INCLUDE_DIRS}" STREQUAL "") # BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake. # see https://gitlab.kitware.com/cmake/cmake/-/issues/20268 find_package(PkgConfig REQUIRED) if (${GGML_BLAS_VENDOR} MATCHES "Generic") pkg_check_modules(DepBLAS blas) elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS") # As of openblas v0.3.22, the 64-bit is named openblas64.pc pkg_check_modules(DepBLAS openblas64) if (NOT DepBLAS_FOUND) pkg_check_modules(DepBLAS openblas) endif() elseif (${GGML_BLAS_VENDOR} MATCHES "FLAME") add_compile_definitions(GGML_BLAS_USE_BLIS) pkg_check_modules(DepBLAS blis) elseif (${GGML_BLAS_VENDOR} MATCHES "ATLAS") pkg_check_modules(DepBLAS blas-atlas) elseif (${GGML_BLAS_VENDOR} MATCHES "FlexiBLAS") pkg_check_modules(DepBLAS flexiblas_api) elseif (${GGML_BLAS_VENDOR} MATCHES "Intel") add_compile_definitions(GGML_BLAS_USE_MKL) # all Intel* libraries share the same include path pkg_check_modules(DepBLAS mkl-sdl) elseif (${GGML_BLAS_VENDOR} MATCHES "NVHPC") # this doesn't provide pkg-config # suggest to assign BLAS_INCLUDE_DIRS on your own if ("${NVHPC_VERSION}" STREQUAL "") message(WARNING "Better to set NVHPC_VERSION") else() set(DepBLAS_FOUND ON) set(DepBLAS_INCLUDE_DIRS "/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include") endif() endif() if (DepBLAS_FOUND) set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS}) else() message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically" " detected by pkgconfig, trying to find cblas.h from possible paths...") find_path(BLAS_INCLUDE_DIRS NAMES cblas.h HINTS /usr/include /usr/local/include /usr/include/openblas /opt/homebrew/opt/openblas/include /usr/local/opt/openblas/include /usr/include/x86_64-linux-gnu/openblas/include ) endif() endif() message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS}) if ("${BLAS_INCLUDE_DIRS}" MATCHES "mkl" AND (${GGML_BLAS_VENDOR} MATCHES "Generic" OR ${GGML_BLAS_VENDOR} MATCHES "Intel")) add_compile_definitions(GGML_BLAS_USE_MKL) endif() target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) target_include_directories(ggml-blas PRIVATE ${BLAS_INCLUDE_DIRS}) else() message(FATAL_ERROR "BLAS not found, please refer to " "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" " to set correct GGML_BLAS_VENDOR") endif() ggml-org-ggml-3678254/src/ggml-blas/ggml-blas.cpp000066400000000000000000000411341512524704700213740ustar00rootroot00000000000000#include "ggml-impl.h" #include "ggml-blas.h" #include "ggml-backend-impl.h" #include #include #include #if defined(GGML_BLAS_USE_ACCELERATE) # include #elif defined(GGML_BLAS_USE_MKL) # include #elif defined(GGML_BLAS_USE_BLIS) # include #elif defined(GGML_BLAS_USE_NVPL) # include #else # include #endif struct ggml_backend_blas_context { int n_threads = GGML_DEFAULT_N_THREADS; std::unique_ptr work_data; size_t work_size = 0; #ifndef GGML_USE_OPENMP std::vector> tasks; #endif }; static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const enum ggml_type type = src0->type; GGML_ASSERT(ne0 == ne01); GGML_ASSERT(ne1 == ne11); GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(type)); GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // broadcast factors const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; const int64_t ne_plane = ne01*ne00; const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); if (ctx->work_size < desired_wsize) { ctx->work_data.reset(new char[desired_wsize]); ctx->work_size = desired_wsize; } void * wdata = ctx->work_data.get(); // convert src0 to float if (type != GGML_TYPE_F32) { const auto * type_traits = ggml_get_type_traits(type); ggml_to_float_t const to_float = type_traits->to_float; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { const void * x = (char *) src0->data + i02*nb02 + i03*nb03; float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; const int min_cols_per_thread = 4096; const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); #ifdef GGML_USE_OPENMP #pragma omp parallel for num_threads(n_threads) for (int64_t i01 = 0; i01 < ne01; i01++) { to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); } #else for (int i = 1; i < n_threads; i++) { const int64_t start = i*ne01/n_threads; const int64_t end = (i + 1)*ne01/n_threads; if (start < end) { ctx->tasks.push_back(std::async(std::launch::async, [=]() { for (int64_t i01 = start; i01 < end; i01++) { to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); } })); } } { // reuse the current thread for the first task const int64_t start = 0; const int64_t end = ne01/n_threads; for (int64_t i01 = start; i01 < end; i01++) { to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); } } #endif } } #ifndef GGML_USE_OPENMP // wait for all tasks to finish for (auto & task : ctx->tasks) { task.get(); } ctx->tasks.clear(); #endif } #if defined(OPENBLAS_VERSION) openblas_set_num_threads(ctx->n_threads); #endif #if defined(GGML_BLAS_USE_BLIS) bli_thread_set_num_threads(ctx->n_threads); #endif #if defined(GGML_BLAS_USE_NVPL) nvpl_blas_set_num_threads(ctx->n_threads); #endif for (int64_t i13 = 0; i13 < ne13; i13++) { for (int64_t i12 = 0; i12 < ne12; i12++) { const int64_t i03 = i13/r3; const int64_t i02 = i12/r2; const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); if (type != GGML_TYPE_F32) { x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; } cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, ne1, ne01, ne10, 1.0f, y, ne10, x, ne00, 0.0f, d, ne01); } } } static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS GGML_ASSERT(ne0 == ne00); GGML_ASSERT(ne1 == ne10); GGML_ASSERT(ne2 == ne02); GGML_ASSERT(ne02 == ne12); GGML_ASSERT(ne3 == ne13); GGML_ASSERT(ne03 == ne13); // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == sizeof(float)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); // GGML_ASSERT(nb0 <= nb1); // GGML_ASSERT(nb1 <= nb2); // GGML_ASSERT(nb2 <= nb3); // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) // src0: (k,n) // src1: (k,m) // dst: (m,n) // // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) // Also expressed as (major,minor) // a: (m,k): so src1 transposed // b: (k,n): so src0 // c: (m,n) // // However, if ggml_is_transposed(src1) is true, then // src1->data already contains a transposed version, so sgemm mustn't // transpose it further. int n = src0->ne[0]; int k = src0->ne[1]; int m = src1->ne[0]; CBLAS_TRANSPOSE transposeA; int lda; if (!ggml_is_transposed(src1)) { transposeA = CblasTrans; lda = m; } else { transposeA = CblasNoTrans; lda = k; } float * a = (float *) ((char *) src1->data); float * b = (float *) ((char *) src0->data); float * c = (float *) ((char *) dst->data); cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); GGML_UNUSED(ctx); } // backend interface static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { return "BLAS"; GGML_UNUSED(backend); } static void ggml_backend_blas_free(ggml_backend_t backend) { ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; delete ctx; delete backend; } static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; for (int i = 0; i < cgraph->n_nodes; i++) { struct ggml_tensor * node = cgraph->nodes[i]; switch (node->op) { case GGML_OP_MUL_MAT: ggml_backend_blas_mul_mat(ctx, node); break; case GGML_OP_OUT_PROD: ggml_backend_blas_out_prod(ctx, node); break; case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: break; default: GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); } } return GGML_STATUS_SUCCESS; GGML_UNUSED(backend); } static struct ggml_backend_i blas_backend_i = { /* .get_name = */ ggml_backend_blas_get_name, /* .free = */ ggml_backend_blas_free, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, /* .cpy_tensor_async = */ NULL, /* .synchronize = */ NULL, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_blas_graph_compute, /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ NULL, }; static ggml_guid_t ggml_backend_blas_guid(void) { static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; return &guid; } ggml_backend_t ggml_backend_blas_init(void) { ggml_backend_blas_context * ctx = new ggml_backend_blas_context; ggml_backend_t backend = new ggml_backend { /* .guid = */ ggml_backend_blas_guid(), /* .iface = */ blas_backend_i, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), /* .context = */ ctx, }; #if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP) if (openblas_get_parallel() != OPENBLAS_OPENMP) { GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__); } #endif #if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP) GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); #endif return backend; } bool ggml_backend_is_blas(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); } void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) { GGML_ASSERT(ggml_backend_is_blas(backend_blas)); ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context; ctx->n_threads = n_threads; } // device interface static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) { return "BLAS"; GGML_UNUSED(dev); } static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t dev) { #if defined(GGML_BLAS_USE_ACCELERATE) return "Accelerate"; #elif defined(GGML_BLAS_USE_MKL) return "MKL"; #elif defined(GGML_BLAS_USE_BLIS) return "BLIS"; #elif defined(GGML_BLAS_USE_NVPL) return "NVPL"; #elif defined(OPENBLAS_VERSION) return "OpenBLAS"; #else return "BLAS"; #endif GGML_UNUSED(dev); } static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { // TODO *free = 0; *total = 0; GGML_UNUSED(dev); } static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend_dev_t dev) { return GGML_BACKEND_DEVICE_TYPE_ACCEL; GGML_UNUSED(dev); } static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_blas_device_get_name(dev); props->description = ggml_backend_blas_device_get_description(dev); props->type = ggml_backend_blas_device_get_type(dev); ggml_backend_blas_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ false, /* .host_buffer = */ false, /* .buffer_from_host_ptr = */ true, /* .events = */ false, }; } static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t dev, const char * params) { return ggml_backend_blas_init(); GGML_UNUSED(dev); GGML_UNUSED(params); } static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) { return ggml_backend_cpu_buffer_type(); GGML_UNUSED(dev); } static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { return ggml_backend_cpu_buffer_from_ptr(ptr, size); GGML_UNUSED(dev); GGML_UNUSED(max_tensor_size); } static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; switch (op->op) { case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: return true; case GGML_OP_MUL_MAT: { // BLAS usually is only faster for large matrices const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const int64_t ne10 = src1->ne[0]; const int64_t ne0 = op->ne[0]; const int64_t ne1 = op->ne[1]; // TODO: find the optimal value const int64_t min_batch = 32; return ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && src1->type == GGML_TYPE_F32 && (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); } case GGML_OP_OUT_PROD: return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && ggml_is_matrix(src0) && ggml_is_matrix(src1) && ggml_is_contiguous(src0) && (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); default: return false; } GGML_UNUSED(dev); } static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { return ggml_backend_buft_is_host(buft); GGML_UNUSED(dev); } static const struct ggml_backend_device_i ggml_backend_blas_device_i = { /* .get_name = */ ggml_backend_blas_device_get_name, /* .get_description = */ ggml_backend_blas_device_get_description, /* .get_memory = */ ggml_backend_blas_device_get_memory, /* .get_type = */ ggml_backend_blas_device_get_type, /* .get_props = */ ggml_backend_blas_device_get_props, /* .init_backend = */ ggml_backend_blas_device_init_backend, /* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, /* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr, /* .supports_op = */ ggml_backend_blas_device_supports_op, /* .supports_buft = */ ggml_backend_blas_device_supports_buft, /* .offload_op = */ NULL, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; // backend reg interface static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) { return "BLAS"; GGML_UNUSED(reg); } static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) { return 1; GGML_UNUSED(reg); } static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index == 0); static ggml_backend_device ggml_backend_blas_device = { /* .iface = */ ggml_backend_blas_device_i, /* .reg = */ reg, /* .context = */ nullptr, }; return &ggml_backend_blas_device; GGML_UNUSED(reg); GGML_UNUSED(index); } static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { return (void *)ggml_backend_blas_set_n_threads; } return NULL; GGML_UNUSED(reg); GGML_UNUSED(name); } static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { /* .get_name = */ ggml_backend_blas_reg_get_name, /* .get_device_count = */ ggml_backend_blas_reg_get_device_count, /* .get_device = */ ggml_backend_blas_reg_get_device, /* .get_proc_address = */ ggml_backend_blas_get_proc_address, }; ggml_backend_reg_t ggml_backend_blas_reg(void) { static struct ggml_backend_reg ggml_backend_blas_reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_blas_reg_i, /* .context = */ NULL, }; return &ggml_backend_blas_reg; } GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) ggml-org-ggml-3678254/src/ggml-cann/000077500000000000000000000000001512524704700170165ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cann/CMakeLists.txt000077500000000000000000000066241512524704700215710ustar00rootroot00000000000000if ("cann${CANN_INSTALL_DIR}" STREQUAL "cann" AND DEFINED ENV{ASCEND_TOOLKIT_HOME}) set(CANN_INSTALL_DIR $ENV{ASCEND_TOOLKIT_HOME}) message(STATUS "CANN: updated CANN_INSTALL_DIR from ASCEND_TOOLKIT_HOME=$ENV{ASCEND_TOOLKIT_HOME}") endif() # Auto-detech Soc type and Soc version, if detect failed, will abort build set(SOC_VERSION "") function(detect_ascend_soc_type SOC_VERSION) execute_process( COMMAND bash -c "npu-smi info|awk -F' ' 'NF > 0 && NR==7 {print $3}'" OUTPUT_VARIABLE npu_info RESULT_VARIABLE npu_result OUTPUT_STRIP_TRAILING_WHITESPACE ) if("${npu_info}" STREQUAL "" OR ${npu_result}) message(FATAL_ERROR "Auto-detech ascend soc type failed, please specify manually or check ascend device working normally.") endif() set(${SOC_VERSION} "Ascend${npu_info}" PARENT_SCOPE) endfunction() if(NOT SOC_TYPE) detect_ascend_soc_type(SOC_VERSION) set(SOC_TYPE "${SOC_VERSION}") message(STATUS "CANN: SOC_VERSION auto-detected is:${SOC_VERSION}") endif() string(TOLOWER ${SOC_TYPE} SOC_VERSION) # SOC_VERSION need lower # Construct Soc specify compile option: ASCEND_#Soc_Major_SN. Such as ASCEND_910B, ASCEND_310P. string(REGEX MATCH "[0-9]+[a-zA-Z]" SOC_TYPE_MAJOR_SN "${SOC_VERSION}") set(SOC_TYPE_COMPILE_OPTION "ASCEND_${SOC_TYPE_MAJOR_SN}") string(TOUPPER ${SOC_TYPE_COMPILE_OPTION} SOC_TYPE_COMPILE_OPTION) message(STATUS "CANN: SOC_VERSION = ${SOC_VERSION}") option(USE_ACL_GRAPH "Enable CANN graph execution (ACL graph mode)" OFF) if(USE_ACL_GRAPH AND (SOC_TYPE_MAJOR_SN STREQUAL "310P" OR SOC_TYPE_COMPILE_OPTION STREQUAL "ASCEND_310P")) message(FATAL_ERROR "CANN Graph (ACL graph mode) is not supported on 310P devices. " "Please build with -DUSE_ACL_GRAPH=OFF or use a supported SOC.") endif() if (CANN_INSTALL_DIR) # Only Support Linux. if (NOT UNIX) message(FATAL_ERROR "CANN: CANN toolkit supports unix but not ${CMAKE_SYSTEM_NAME}") endif() # Supported platforms: x86-64, arm64 if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64") elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") else() message(FATAL_ERROR "CANN: CANN toolkit supports x86-64 and arm64 but not ${CMAKE_SYSTEM_PROCESSOR}") endif() # Set header and libs set(CANN_INCLUDE_DIRS ${CANN_INSTALL_DIR}/include ${CANN_INSTALL_DIR}/include/aclnn ${CANN_INSTALL_DIR}/acllib/include ) list(APPEND CANN_LIBRARIES ascendcl nnopbase opapi acl_op_compiler ) file(GLOB GGML_SOURCES_CANN "*.cpp") ggml_add_backend_library(ggml-cann ${GGML_SOURCES_CANN}) target_link_libraries(ggml-cann PRIVATE ${CANN_LIBRARIES}) target_include_directories(ggml-cann PRIVATE ${CANN_INCLUDE_DIRS}) target_link_directories(ggml-cann PRIVATE ${CANN_INSTALL_DIR}/lib64) target_compile_definitions(ggml-cann PRIVATE "-D${SOC_TYPE_COMPILE_OPTION}") if (USE_ACL_GRAPH) target_compile_definitions(ggml-cann PRIVATE USE_ACL_GRAPH) message(STATUS "CANN: USE_ACL_GRAPH is enabled.") else() message(STATUS "CANN: USE_ACL_GRAPH is disabled.") endif() message(STATUS "CANN: CANN_INCLUDE_DIRS = ${CANN_INCLUDE_DIRS}") message(STATUS "CANN: CANN_LIBRARIES = ${CANN_LIBRARIES}") else() message(FATAL_ERROR "CANN: Can't find CANN_INSTALL_DIR, did you forget to source set_var.sh?") endif() ggml-org-ggml-3678254/src/ggml-cann/acl_tensor.cpp000066400000000000000000000200431512524704700216520ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "acl_tensor.h" #include #include aclDataType ggml_cann_type_mapping(ggml_type type) { switch (type) { case GGML_TYPE_F32: return ACL_FLOAT; case GGML_TYPE_F16: return ACL_FLOAT16; case GGML_TYPE_BF16: return ACL_BF16; case GGML_TYPE_I8: return ACL_INT8; case GGML_TYPE_I16: return ACL_INT16; case GGML_TYPE_I32: return ACL_INT32; case GGML_TYPE_Q4_0: return ACL_INT4; case GGML_TYPE_Q8_0: return ACL_INT8; case GGML_TYPE_I64: return ACL_INT64; default: return ACL_DT_UNDEFINED; } } acl_tensor_ptr ggml_cann_create_tensor(const ggml_tensor * tensor, int64_t * ne, size_t * nb, int64_t dims, aclFormat format, size_t offset) { // If tensor is bcasted, Up to GGML_MAX_DIMS additional dimensions will be // added. int64_t acl_ne[GGML_MAX_DIMS * 2], acl_stride[GGML_MAX_DIMS * 2]; if (ne == nullptr) { for (int i = 0; i < GGML_MAX_DIMS; i++) { acl_ne[i] = tensor->ne[i]; // The step size of acl is in elements. acl_stride[i] = tensor->nb[i] / ggml_element_size(tensor); } } else { // With bcast for (int i = 0; i < dims; i++) { acl_ne[i] = ne[i]; acl_stride[i] = nb[i] / ggml_element_size(tensor); } } int64_t final_dims = (dims == 0 ? GGML_MAX_DIMS : dims); int64_t acl_storage_len = 1; for (int i = 0; i < final_dims; i++) { acl_storage_len += (acl_ne[i] - 1) * acl_stride[i]; } size_t elem_offset = offset / ggml_element_size(tensor); acl_storage_len += elem_offset; // Reverse ne and stride. std::reverse(acl_ne, acl_ne + final_dims); std::reverse(acl_stride, acl_stride + final_dims); aclTensor * raw = aclCreateTensor(acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride, elem_offset, format, &acl_storage_len, 1, tensor->data); return acl_tensor_ptr(raw); } acl_int_array_ptr ggml_cann_create_int_array(const int64_t * value, uint64_t size) { aclIntArray * raw = aclCreateIntArray(value, size); return acl_int_array_ptr(raw); } acl_scalar_ptr ggml_cann_create_scalar(void * value, aclDataType dataType) { aclScalar * raw = aclCreateScalar(value, dataType); return acl_scalar_ptr(raw); } bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1) { for (int i = 0; i < GGML_MAX_DIMS; i++) { if (t1->ne[i] != t0->ne[i] && t1->ne[i] != 1) { return true; } } return false; } int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0, const ggml_tensor * src1, int64_t * bcast_src0_ne, int64_t * bcast_src1_ne, size_t * bcast_src0_nb, size_t * bcast_src1_nb) { GGML_ASSERT(ggml_can_repeat(src1, src0)); int bcast_dim_cnt = 0; for (int i = 0; i < GGML_MAX_DIMS; i++) { int64_t nr = src0->ne[i] / src1->ne[i]; bcast_src0_ne[bcast_dim_cnt] = src0->ne[i] / nr; bcast_src1_ne[bcast_dim_cnt] = src1->ne[i]; bcast_src0_nb[bcast_dim_cnt] = src0->nb[i]; bcast_src1_nb[bcast_dim_cnt] = src1->nb[i]; bcast_dim_cnt++; if (nr != 1) { // Need to add an extra dim. bcast_src0_ne[bcast_dim_cnt] = nr; bcast_src1_ne[bcast_dim_cnt] = 1; bcast_src0_nb[bcast_dim_cnt] = bcast_src0_nb[bcast_dim_cnt - 1] * bcast_src0_ne[bcast_dim_cnt - 1]; bcast_src1_nb[bcast_dim_cnt] = bcast_src1_nb[bcast_dim_cnt - 1] * bcast_src1_ne[bcast_dim_cnt - 1]; bcast_dim_cnt++; } } return bcast_dim_cnt; } int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne, const int64_t * weight_ne, const int64_t * dst_ne, const size_t * input_nb, const size_t * weight_nb, const size_t * dst_nb, int64_t * bcast_input_ne, int64_t * bcast_weight_ne, int64_t * bcast_dst_ne, size_t * bcast_input_nb, size_t * bcast_weight_nb, size_t * bcast_dst_nb) { // input and dst shoule in same shape, except first two dims. GGML_ASSERT(input_ne[2] == dst_ne[2]); GGML_ASSERT(input_ne[3] == dst_ne[3]); int bcast_dim_cnt = 0; // For mul_mat, a dimension needs to be added before the dimension that // weight needs to be expanded to satisfy the bcast rule of matrix // multiplication. for (int i = 0; i < GGML_MAX_DIMS; i++) { int64_t nr = input_ne[i] / weight_ne[i]; // Do not use bcast in the first two dimensions because we only support // the bcast batch dimension. Just copy them. if (i < 2 || nr == 1) { bcast_input_ne[bcast_dim_cnt] = input_ne[i]; bcast_weight_ne[bcast_dim_cnt] = weight_ne[i]; bcast_dst_ne[bcast_dim_cnt] = dst_ne[i]; bcast_input_nb[bcast_dim_cnt] = input_nb[i]; bcast_weight_nb[bcast_dim_cnt] = weight_nb[i]; bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; bcast_dim_cnt++; } else { // Need to add an extra dim. bcast_input_ne[bcast_dim_cnt] = nr; bcast_dst_ne[bcast_dim_cnt] = nr; bcast_weight_ne[bcast_dim_cnt] = 1; bcast_input_nb[bcast_dim_cnt] = input_nb[i]; bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; bcast_weight_nb[bcast_dim_cnt] = weight_nb[i]; bcast_dim_cnt++; bcast_input_ne[bcast_dim_cnt] = input_ne[i] / nr; bcast_dst_ne[bcast_dim_cnt] = dst_ne[i] / nr; bcast_weight_ne[bcast_dim_cnt] = weight_ne[i]; bcast_input_nb[bcast_dim_cnt] = bcast_input_nb[bcast_dim_cnt - 1] * bcast_input_ne[bcast_dim_cnt - 1]; bcast_dst_nb[bcast_dim_cnt] = bcast_dst_nb[bcast_dim_cnt - 1] * bcast_dst_ne[bcast_dim_cnt - 1]; bcast_weight_nb[bcast_dim_cnt] = bcast_weight_nb[bcast_dim_cnt - 1] * bcast_weight_ne[bcast_dim_cnt - 1]; bcast_dim_cnt++; } } return bcast_dim_cnt; } ggml-org-ggml-3678254/src/ggml-cann/acl_tensor.h000066400000000000000000000421161512524704700213240ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef CANN_ACL_TENSOR_H #define CANN_ACL_TENSOR_H #include "common.h" #include #include #include /** * @brief Maps a ggml_type to its corresponding aclDataType. * * @details This function takes a ggml_type as input and returns the corresponding * aclDataType. It supports mapping for various ggml_types. If the input type * does not match any of the predefined ggml_types, the function returns * ACL_DT_UNDEFINED. * * @param type The ggml_type to be mapped. * @return The corresponding aclDataType. If the input type is not recognized, * ACL_DT_UNDEFINED is returned. */ aclDataType ggml_cann_type_mapping(ggml_type type); // Deleter for acl objects. template struct acl_deleter { void operator()(T * ptr) const noexcept { if (ptr) { ACL_CHECK(DestroyFunc(ptr)); } } }; using acl_tensor_ptr = std::unique_ptr>; using acl_int_array_ptr = std::unique_ptr>; using acl_scalar_ptr = std::unique_ptr>; using acl_tensor_list_ptr = std::unique_ptr>; /** * @brief Creates an ACL tensor from a ggml_tensor with optional shape. * * @details This function creates an ACL tensor based on the properties of the * provided ggml_tensor. It supports customer shape by adjusting dimensions * and strides accordingly. If customer shape is applied, additional * dimensions and strides are calculated based on the provided parameters. * * @param tensor Pointer to the ggml_tensor to be converted to ACL tensor. * @param ne Pointer to an array containing dimensions. Defaults to nullptr * if no customer shape is applied. * @param nb Pointer to an array containing strides. Defaults to nullptr * if no customer shape is applied. * @param dims Number of dimensions in the tensor. Defaults to 0 if no customer * shape is applied. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0. * @return Pointer to the created ACL tensor. */ acl_tensor_ptr ggml_cann_create_tensor(const ggml_tensor * tensor, int64_t * ne = nullptr, size_t * nb = nullptr, int64_t dims = 0, aclFormat format = ACL_FORMAT_ND, size_t offset = 0); /** * @brief Template for creating an ACL tensor from provided parameters. typename TYPE * should be size_t or float. * * @details This function creates an ACL tensor using the provided data pointer, * data type, dimensions, strides, format, offset, and additional parameters. * It calculates necessary dimensions and strides based on the provided ne and nb * arrays, adjusting them for the ACL tensor creation. The ACL storage length * is also calculated based on the provided dimensions and strides. * * @param data_ptr Pointer to the data buffer for the ACL tensor. * @param dtype ACL data type of the tensor. * @param type_size Size of each element in the tensor data buffer. * @param ne Pointer to an array containing tensor dimensions. * @param nb Pointer to an array containing tensor strides. * @param dims Number of dimensions of the tensor. * @param format ACL tensor format. Defaults to ACL_FORMAT_ND. * @param offset Offset in bytes for the ACL tensor data. Defaults to 0. * @return Pointer to the created ACL tensor. */ template acl_tensor_ptr ggml_cann_create_tensor(void * data_ptr, aclDataType dtype, TYPE type_size, int64_t * ne, TYPE * nb, int64_t dims, aclFormat format = ACL_FORMAT_ND, size_t offset = 0) { int64_t tmp_ne[GGML_MAX_DIMS * 2]; int64_t tmp_stride[GGML_MAX_DIMS * 2]; memcpy(tmp_ne, ne, dims * sizeof(int64_t)); for (int i = 0; i < dims; i++) { tmp_stride[i] = nb[i] / type_size; } int64_t acl_storage_len = 1; for (int i = 0; i < dims; i++) { acl_storage_len += (tmp_ne[i] - 1) * tmp_stride[i]; } std::reverse(tmp_ne, tmp_ne + dims); std::reverse(tmp_stride, tmp_stride + dims); aclTensor * raw = aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, format, &acl_storage_len, 1, data_ptr); return acl_tensor_ptr(raw); } /** * @brief Create an ACL int array resource wrapped in a smart pointer. * * This function constructs an aclIntArray from the provided int64_t values * and returns it as an acl_int_array_ptr (a std::unique_ptr with a custom * deleter). The returned pointer owns the ACL resource and will automatically * destroy it via aclDestroyIntArray(). * * @param value Pointer to the int64_t elements. * @param size Number of elements in value. * * @return A smart pointer managing the created ACL int array. */ acl_int_array_ptr ggml_cann_create_int_array(const int64_t * value, uint64_t size); /** * @brief Create an ACL scalar resource wrapped in a smart pointer. * * This function constructs an aclScalar from the raw value pointer and ACL * data type, then returns it as an acl_scalar_ptr (a std::unique_ptr with * a custom deleter). The returned pointer owns the ACL scalar and will * automatically destroy it via aclDestroyScalar(). * * @param value Pointer to the raw scalar memory. * @param dataType ACL data type of the scalar. * * @return A smart pointer managing the created ACL scalar. */ acl_scalar_ptr ggml_cann_create_scalar(void * value, aclDataType dataType); /** * @brief Create an ACL tensor list from multiple tensor smart pointers. * * This function accepts a variadic list of acl_tensor_ptr (a unique_ptr with * custom deleter) and produces an aclTensorList using aclCreateTensorList(). * * The lifecycle management of the tensor objects changes as follows: * - aclCreateTensorList() takes ownership of the tensors * - Each input smart pointer releases ownership using release() * - As a result, the tensors will NOT be destroyed by unique_ptr * - Instead, they will be destroyed when aclDestroyTensorList() is called * * This ensures correct ownership transfer and prevents double-free situations. * * @param acl_tensor_ptr Variadic template parameter; each argument must be * a unique_ptr-like type supporting get() and release(). * * @param tensors Variadic list of acl_tensor_ptr objects. Ownership of * each tensor is transferred away from these smart pointers. * * @return A smart pointer (acl_tensor_list_ptr) owning the created ACL tensor list. * * @note This implementation is C++11 compatible. The ownership-release process is * executed using a pack expansion inside an initializer list. */ template acl_tensor_list_ptr ggml_cann_create_tensor_list(acl_tensor_ptr &&... tensors) { aclTensor * raw_tensors[] = { tensors.get()... }; aclTensorList * raw = aclCreateTensorList(raw_tensors, sizeof...(tensors)); // aclTensor will release by aclTensorList, so release ownership without // destroying the tensor int dummy[] = { (tensors.release(), 0)... }; GGML_UNUSED(dummy); return acl_tensor_list_ptr(raw); } /** * @brief Checks if tensors require broadcasting based on their shapes. * * @details This function determines if two ggml_tensors need to be broadcasted for * element-wise operations. Broadcasting is necessary if the shapes of the * tensors are not identical and no dimension in either tensor equals 1. * * @param t0 Pointer to the first ggml_tensor. * @param t1 Pointer to the second ggml_tensor. * @return True if broadcasting is needed, False otherwise. * * @remarks This function iterates over the dimensions of t0 and t1. It checks if each * dimension in t1 differs from t0's corresponding dimension and is not equal * to 1. If such a dimension is found, broadcasting is required to align t1 * with t0 for element-wise operations. */ bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1); /** * @brief Computes broadcast shapes and strides for two ggml_tensors. * * @details This function calculates the broadcast shapes and strides for two ggml_tensors, * following the broadcasting rules similar to numpy. It adjusts dimensions and * strides to ensure compatibility for element-wise operations where one tensor * can be broadcasted to match the shape of another tensor. * * @param src0 Pointer to the first ggml_tensor. * @param src1 Pointer to the second ggml_tensor. * @param bcast_ne_src0 Output array to store broadcasted dimensions for src0. * @param bcast_ne_src1 Output array to store broadcasted dimensions for src1. * @param bcast_nb_src0 Output array to store broadcasted strides for src0. * @param bcast_nb_src1 Output array to store broadcasted strides for src1. * @return Number of dimensions in the broadcasted shape. * * @pre ggml_can_repeat(src1, src0) must return true, indicating src1 can be broadcasted * to match src0. * * @remarks This function iterates over the dimensions of src0 and src1, calculating the * necessary broadcast dimensions and strides. If a dimension requires broadcasting * (i.e., its size in src1 is smaller than in src0), an additional dimension is * added with size calculated to match src0's dimension. This adjustment ensures * that src1 can be element-wise broadcasted to src0's shape. * * How it works: * * if dim0 has padding. * a -> (2, 2) padding = 2 * a: [[1, 2, *, *] * [2, 3, *, *]] * nb = (8, 4, 2) * * if a should bcast with b -> (2, 4) * b' -> (2, 2, 2) * b : [[1, 2, 3, 4, *, *] * [5, 6, 7, 8, *, *]] * nb = (12, 6, 1) * * after bcast: * a' -> (2, 1, 2) * a': [[[1, 2], *, *] * [[2, 3], *, *]] * nb = (8, 4, 2, 1) * * b' : [[[1, 2], [3, 4], *, *] * [[5, 6], [7, 8], *, *]] * nb = (12, 6, 2, 1) * \endcode * * dim1 in a inserted dim, should add nb for dim1, * and all other nb moves to next in order. */ int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0, const ggml_tensor * src1, int64_t * bcast_ne_src0, int64_t * bcast_ne_src1, size_t * bcast_nb_src0, size_t * bcast_nb_src1); // Bcast macro to avoid duplicate code. #define BCAST_SHAPE(src0, src1) \ int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \ int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \ size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \ size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \ int64_t bcast_dims = ggml_cann_get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \ bcast_##src0##_nb, bcast_##src1##_nb); #define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims /** * @brief Calculates broadcast shapes for matrix multiplication. * * @details This function computes the broadcast shapes required for matrix multiplication * based on the input, weight, and destination tensor shapes. It ensures that the * dimensions of weight tensors are expanded appropriately to satisfy matrix * multiplication broadcast rules. * * @param input_ne Array containing the dimensions of the input tensor. * @param weight_ne Array containing the dimensions of the weight tensor. * @param dst_ne Array containing the dimensions of the destination tensor. * @param input_nb Array containing the strides of the input tensor. * @param weight_nb Array containing the strides of the weight tensor. * @param dst_nb Array containing the strides of the destination tensor. * @param bcast_input_ne Output array for broadcasted input tensor dimensions. * @param bcast_weight_ne Output array for broadcasted weight tensor dimensions. * @param bcast_dst_ne Output array for broadcasted destination tensor dimensions. * @param bcast_input_nb Output array for broadcasted input tensor strides. * @param bcast_weight_nb Output array for broadcasted weight tensor strides. * @param bcast_dst_nb Output array for broadcasted destination tensor strides. * @return The number of dimensions in the broadcasted tensors. * * @remarks This function iterates over the tensor dimensions and calculates the broadcast * shapes needed for matrix multiplication. It ensures that dimensions where * weight tensor requires expansion are appropriately handled to conform with * broadcasting rules. * @note compare with ggml_cann_get_bcast_shape, mul_mat broadcast need add this new dim * before cast dim. * @sa ggml_cann_get_bcast_shape */ int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne, const int64_t * weight_ne, const int64_t * dst_ne, const size_t * input_nb, const size_t * weight_nb, const size_t * dst_nb, int64_t * bcast_input_ne, int64_t * bcast_weight_ne, int64_t * bcast_dst_ne, size_t * bcast_input_nb, size_t * bcast_weight_nb, size_t * bcast_dst_nb); // Bcast macro to avoid duplicate code. #define BCAST_MUL_MAT_SHAPE(input, weight, dst) \ int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \ int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \ int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \ size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \ size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \ size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \ int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \ input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, bcast_##input##_ne, bcast_##weight##_ne, \ bcast_##dst##_ne, bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb); #define BCAST_MUL_MAT_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims #endif // CANN_ACL_TENSOR_H ggml-org-ggml-3678254/src/ggml-cann/aclnn_ops.cpp000066400000000000000000005455161512524704700215160ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "aclnn_ops.h" #include "ggml-impl.h" #include "ggml.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "../ggml-common.h" void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, acl_tensor_ptr & acl_src0, acl_tensor_ptr & acl_src1, acl_tensor_ptr & acl_dst) { GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_can_repeat(src1, src0)); // Need bcast if (!ggml_are_same_shape(src0, src1) && ggml_cann_need_bcast(src0, src1)) { BCAST_SHAPE(src0, src1) acl_src0 = ggml_cann_create_tensor(src0, BCAST_PARAM(src0)); acl_src1 = ggml_cann_create_tensor(src1, BCAST_PARAM(src1)); acl_dst = ggml_cann_create_tensor(dst, BCAST_PARAM(src0)); } else { acl_src0 = ggml_cann_create_tensor(src0); acl_src1 = ggml_cann_create_tensor(src1); acl_dst = ggml_cann_create_tensor(dst); } } void ggml_cann_op_unary(std::function unary_op, ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); unary_op(ctx, acl_src.get(), acl_dst.get()); } void ggml_cann_op_unary_gated(std::function unary_op, ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); const int32_t swapped = ggml_get_op_params_i32(dst, 1); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); acl_tensor_ptr acl_src0, acl_src1; if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); acl_src0 = ggml_cann_create_tensor(src0); acl_src1 = ggml_cann_create_tensor(src1); } else { int64_t ne[] = { src0->ne[0] / 2, src0->ne[1], src0->ne[2], src0->ne[3] }; size_t nb[] = { src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3] }; acl_src0 = ggml_cann_create_tensor(src0, ne, nb, GGML_MAX_DIMS, ACL_FORMAT_ND, 0); acl_src1 = ggml_cann_create_tensor(src0, ne, nb, GGML_MAX_DIMS, ACL_FORMAT_ND, ne[0] * ggml_element_size(src0)); if (swapped) { std::swap(acl_src0, acl_src1); } } unary_op(ctx, acl_src0.get(), acl_dst.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMul, acl_dst.get(), acl_src1.get()); } /** * @brief Repeats elements of a tensor along each dimension according to the * specified repeat array. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor to be repeated. * @param acl_dst The destination tensor after repeating. * @param repeat_array The array specifying the number of repetitions along each * dimension. */ static void aclnn_repeat(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, int64_t * repeat_array) { // repeat tensor along each dim with repeat_array acl_int_array_ptr repeats = ggml_cann_create_int_array(repeat_array, GGML_MAX_DIMS); GGML_CANN_CALL_ACLNN_OP(ctx, Repeat, acl_src, repeats.get(), acl_dst); } /** * @brief Casts the data type of a source tensor to a destination tensor. * * This function casts the data type of the source tensor `acl_src` to the * specified data type `cast_data_type` and stores the result in the destination * tensor `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor whose data type will be casted. * @param acl_dst The destination tensor where the casted result will be stored. * @param cast_data_type The target data type to which the source tensor will be * casted. */ static void aclnn_cast(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, aclDataType cast_data_type) { GGML_CANN_CALL_ACLNN_OP(ctx, Cast, acl_src, cast_data_type, acl_dst); } void ggml_cann_repeat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; GGML_ASSERT(ggml_can_repeat(src, dst)); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); int64_t repeatsArray[] = { dst->ne[3] / src->ne[3], dst->ne[2] / src->ne[2], dst->ne[1] / src->ne[1], dst->ne[0] / src->ne[0] }; aclnn_repeat(ctx, acl_src.get(), acl_dst.get(), repeatsArray); } void aclnn_add(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst) { float alphaValue = 1.0f; acl_scalar_ptr alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Add, acl_src0, acl_src1, alpha.get(), acl_dst); } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, acl_src0, acl_src1, alpha.get()); } } void aclnn_sub(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst) { float alphaValue = 1.0f; acl_scalar_ptr alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Sub, acl_src0, acl_src1, alpha.get(), acl_dst); } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSub, acl_src0, acl_src1, alpha.get()); } } void aclnn_mul(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst) { if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Mul, acl_src, acl_other, acl_dst); } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMul, acl_src, acl_other); } } void aclnn_div(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst) { if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Div, acl_src, acl_other, acl_dst); } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceDiv, acl_src, acl_other); } } /** * @brief Multiplies elements of a tensor by a scalar value, optionally * in-place. * * This function multiplies each element of the source tensor `acl_src` by the * scalar `scale` and stores the result in the destination tensor `acl_dst`. If * `inplace` is true, `acl_dst` will not be used and the operation is performed * in-place on `acl_src`. * The operation is defined as: * \f[ * \text {acl_dst }_i=\text {acl_src }_i \times \text {scale} * \f] * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor whose elements will be multiplied. * @param scale The scalar value by which each element of `acl_src` will be * multiplied. * @param acl_dst The destination tensor where the result will be stored if * `inplace` is false. * @param inplace Flag indicating whether to perform the operation in-place on * `acl_src`. */ static void aclnn_muls(ggml_backend_cann_context & ctx, aclTensor * acl_src, float scale, aclTensor * acl_dst, bool inplace) { acl_scalar_ptr acl_scale = ggml_cann_create_scalar(&scale, aclDataType::ACL_FLOAT); if (inplace) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMuls, acl_src, acl_scale.get()); } else { GGML_CANN_CALL_ACLNN_OP(ctx, Muls, acl_src, acl_scale.get(), acl_dst); } } void ggml_cann_leaky_relu(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); acl_scalar_ptr acl_negative_slope = ggml_cann_create_scalar(&negative_slope, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, LeakyRelu, acl_src.get(), acl_negative_slope.get(), acl_dst.get()); } /** * @brief Concatenates a list of tensors along a specified dimension and stores * the result in a destination tensor. * * @param ctx The context for the CANN backend operations. * @param tensorList The list of tensors to be concatenated. * @param acl_dst The destination tensor where the concatenated result will be * stored. * @param concat_dim The dimension along which the tensors will be concatenated. */ static void aclnn_concat(ggml_backend_cann_context & ctx, aclTensorList * tensorList, aclTensor * acl_dst, int64_t concat_dim) { GGML_CANN_CALL_ACLNN_OP(ctx, Cat, tensorList, concat_dim, acl_dst); } void ggml_cann_concat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; acl_tensor_ptr acl_src0 = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_src1 = ggml_cann_create_tensor(src1); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int32_t acl_dim = 3 - dim; acl_tensor_list_ptr tensor_list = ggml_cann_create_tensor_list(acl_src0, acl_src1); aclnn_concat(ctx, tensor_list.get(), acl_dst.get(), acl_dim); } /** * @brief Creates a tensor with values starting from `start`, incremented by * `step`, and ending before `stop`. * * This function performs the operation: * \f[ * \text {out }_{i+1}=\text {out }_i+\text {step} * \f] * the range is [start, stop). * * @param ctx The context for the CANN backend operations. * @param acl_dst The destination tensor where the values will be stored. * @param start The starting value of the range. * @param stop The ending value of the range (exclusive). * @param step The step size between consecutive values. * @param n_elements The number of elements in the destination tensor. */ static void aclnn_arange(ggml_backend_cann_context & ctx, aclTensor * acl_dst, float start, float stop, float step, int64_t n_elements) { int64_t steps = (int64_t) std::ceil((stop - start) / step); GGML_ASSERT(n_elements == steps); acl_scalar_ptr acl_start = ggml_cann_create_scalar(&start, aclDataType::ACL_FLOAT); acl_scalar_ptr acl_end = ggml_cann_create_scalar(&stop, aclDataType::ACL_FLOAT); acl_scalar_ptr acl_step = ggml_cann_create_scalar(&step, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Arange, acl_start.get(), acl_end.get(), acl_step.get(), acl_dst); } void ggml_cann_arange(ggml_backend_cann_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->type == GGML_TYPE_F32); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); int64_t n_elements = ggml_nelements(dst); float start; float stop; float step; memcpy(&start, (float *) dst->op_params + 0, sizeof(float)); memcpy(&stop, (float *) dst->op_params + 1, sizeof(float)); memcpy(&step, (float *) dst->op_params + 2, sizeof(float)); aclnn_arange(ctx, acl_dst.get(), start, stop, step, n_elements); } void ggml_cann_clamp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; float min; float max; memcpy(&min, dst->op_params, sizeof(float)); memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); acl_scalar_ptr acl_min = ggml_cann_create_scalar(&min, aclDataType::ACL_FLOAT); acl_scalar_ptr acl_max = ggml_cann_create_scalar(&max, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Clamp, acl_src.get(), acl_min.get(), acl_max.get(), acl_dst.get()); } void ggml_cann_scale(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; // scale factor float v; memcpy(&v, dst->op_params, sizeof(float)); acl_scalar_ptr scale = ggml_cann_create_scalar(&v, aclDataType::ACL_FLOAT); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); GGML_CANN_CALL_ACLNN_OP(ctx, Muls, acl_src.get(), scale.get(), acl_dst.get()); } void ggml_cann_argsort(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), ggml_nelements(dst) * sizeof(int64_t)); void * buffer = temp_buffer_allocator.get(); acl_tensor_ptr tmp_tensor = ggml_cann_create_tensor(buffer, ACL_INT64, ggml_type_size(dst->type), dst->ne, dst->nb, GGML_MAX_DIMS); GGML_CANN_CALL_ACLNN_OP(ctx, Argsort, acl_src.get(), -1, (order == GGML_SORT_ORDER_DESC ? true : false), tmp_tensor.get()); GGML_CANN_CALL_ACLNN_OP(ctx, Cast, tmp_tensor.get(), ggml_cann_type_mapping(dst->type), acl_dst.get()); } void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float eps; memcpy(&eps, dst->op_params, sizeof(float)); std::vector normData = { dst->ne[0] }; acl_int_array_ptr norm = ggml_cann_create_int_array(normData.data(), normData.size()); GGML_CANN_CALL_ACLNN_OP(ctx, LayerNorm, acl_src.get(), norm.get(), nullptr, nullptr, eps, acl_dst.get(), nullptr, nullptr); } void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); size_t type_size = ggml_type_size(src->type); int64_t n_bytes = src->ne[3] * src->ne[2] * src->ne[1] * type_size; ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), n_bytes); void * buffer = temp_buffer_allocator.get(); int64_t div_ne[] = { 1, src->ne[1], src->ne[2], src->ne[3] }; size_t div_nb[GGML_MAX_DIMS]; div_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; ++i) { div_nb[i] = div_nb[i - 1] * div_ne[i - 1]; } acl_tensor_ptr acl_div = ggml_cann_create_tensor(buffer, ACL_FLOAT, type_size, div_ne, div_nb, GGML_MAX_DIMS); std::vector norm_dims = { 3 }; acl_int_array_ptr dims_array = ggml_cann_create_int_array(norm_dims.data(), norm_dims.size()); float p_value = 2.0f; acl_scalar_ptr p_scalar = ggml_cann_create_scalar(&p_value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Norm, acl_src.get(), p_scalar.get(), dims_array.get(), true, acl_div.get()); GGML_CANN_CALL_ACLNN_OP(ctx, Div, acl_src.get(), acl_div.get(), acl_dst.get()); } void ggml_cann_cross_entropy_loss(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; const int64_t nc = src0->ne[0]; const int64_t nr = ggml_nrows(src0); int64_t logits_ne[] = { nc, nr }; size_t logits_nb[2]; logits_nb[0] = ggml_type_size(src0->type); logits_nb[1] = logits_nb[0] * logits_ne[0]; acl_tensor_ptr acl_logits = ggml_cann_create_tensor(src0->data, ACL_FLOAT, sizeof(float), logits_ne, logits_nb, 2); size_t log_softmax_type_size = sizeof(float); int64_t log_softmax_n_bytes = nr * nc * log_softmax_type_size; ggml_cann_pool_alloc log_softmax_allocator(ctx.pool(), log_softmax_n_bytes); void * log_softmax_buffer = log_softmax_allocator.get(); int64_t log_softmax_ne[] = { nc, nr }; size_t log_softmax_nb[2]; log_softmax_nb[0] = log_softmax_type_size; log_softmax_nb[1] = log_softmax_nb[0] * log_softmax_ne[0]; acl_tensor_ptr acl_log_softmax = ggml_cann_create_tensor(log_softmax_buffer, ACL_FLOAT, log_softmax_type_size, log_softmax_ne, log_softmax_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, LogSoftmax, acl_logits.get(), 1, acl_log_softmax.get()); int64_t labels_ne[] = { nc, nr }; size_t labels_nb[2]; labels_nb[0] = ggml_type_size(src1->type); labels_nb[1] = labels_nb[0] * labels_ne[0]; acl_tensor_ptr acl_labels = ggml_cann_create_tensor(src1->data, ACL_FLOAT, sizeof(float), labels_ne, labels_nb, 2); size_t mul_type_size = sizeof(float); int64_t mul_n_bytes = nr * nc * mul_type_size; ggml_cann_pool_alloc mul_allocator(ctx.pool(), mul_n_bytes); void * mul_buffer = mul_allocator.get(); int64_t mul_ne[] = { nc, nr }; size_t mul_nb[2]; mul_nb[0] = mul_type_size; mul_nb[1] = mul_nb[0] * mul_ne[0]; acl_tensor_ptr acl_mul_result = ggml_cann_create_tensor(mul_buffer, ACL_FLOAT, mul_type_size, mul_ne, mul_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, Mul, acl_log_softmax.get(), acl_labels.get(), acl_mul_result.get()); size_t sum_per_sample_type_size = sizeof(float); int64_t sum_per_sample_n_bytes = nr * sum_per_sample_type_size; ggml_cann_pool_alloc sum_per_sample_allocator(ctx.pool(), sum_per_sample_n_bytes); void * sum_per_sample_buffer = sum_per_sample_allocator.get(); int64_t sum_per_sample_ne[] = { nr }; size_t sum_per_sample_nb[1]; sum_per_sample_nb[0] = sum_per_sample_type_size; acl_tensor_ptr acl_sum_per_sample = ggml_cann_create_tensor( sum_per_sample_buffer, ACL_FLOAT, sum_per_sample_type_size, sum_per_sample_ne, sum_per_sample_nb, 1); std::vector sum_dims = { 1 }; acl_int_array_ptr dims_array = ggml_cann_create_int_array(sum_dims.data(), sum_dims.size()); bool keep_dims = false; GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_mul_result.get(), dims_array.get(), keep_dims, ACL_FLOAT, acl_sum_per_sample.get()); size_t total_sum_type_size = sizeof(float); int64_t total_sum_n_bytes = 1 * total_sum_type_size; ggml_cann_pool_alloc total_sum_allocator(ctx.pool(), total_sum_n_bytes); void * total_sum_buffer = total_sum_allocator.get(); int64_t total_sum_ne[] = { 1 }; size_t total_sum_nb[1]; total_sum_nb[0] = total_sum_type_size; acl_tensor_ptr acl_total_sum = ggml_cann_create_tensor(total_sum_buffer, ACL_FLOAT, total_sum_type_size, total_sum_ne, total_sum_nb, 1); std::vector total_sum_dims = { 0 }; acl_int_array_ptr total_sum_dims_array = ggml_cann_create_int_array(total_sum_dims.data(), total_sum_dims.size()); GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_sum_per_sample.get(), total_sum_dims_array.get(), keep_dims, ACL_FLOAT, acl_total_sum.get()); float value = -1.0f / static_cast(nr); acl_scalar_ptr scale_factor = ggml_cann_create_scalar(&value, aclDataType::ACL_FLOAT); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst->data, ACL_FLOAT, sizeof(float), total_sum_ne, total_sum_nb, 1); GGML_CANN_CALL_ACLNN_OP(ctx, Muls, acl_total_sum.get(), scale_factor.get(), acl_dst.get()); } void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); int n_groups = dst->op_params[0]; float eps; memcpy(&eps, dst->op_params + 1, sizeof(float)); int64_t N = src->ne[3]; int64_t C = src->ne[2]; int64_t HxW = src->ne[1] * src->ne[0]; size_t type_size = ggml_type_size(src->type); int64_t ne[] = { n_groups, N }; size_t nb[] = { type_size, type_size * n_groups }; size_t n_bytes = N * n_groups; ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), n_bytes * 2); void * buffer = temp_buffer_allocator.get(); acl_tensor_ptr acl_mean_out = ggml_cann_create_tensor(buffer, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); acl_tensor_ptr acl_rstd_out = ggml_cann_create_tensor((char *) buffer + n_bytes, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); GGML_CANN_CALL_ACLNN_OP(ctx, GroupNorm, acl_src.get(), nullptr, nullptr, N, C, HxW, n_groups, eps, acl_dst.get(), acl_mean_out.get(), acl_rstd_out.get()); } void ggml_cann_acc(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; size_t nb1 = ((int32_t *) dst->op_params)[0]; size_t nb2 = ((int32_t *) dst->op_params)[1]; size_t nb3 = ((int32_t *) dst->op_params)[2]; size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; size_t param_nb[] = { ggml_element_size(src0), nb1, nb2, nb3 }; acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, src1->ne, param_nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); acl_tensor_ptr acl_src1 = ggml_cann_create_tensor(src1); acl_scalar_ptr alpha = nullptr; float alphaValue = 1.0f; alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); if (!inplace) { size_t cpy_size = ggml_nbytes(dst); ACL_CHECK( aclrtMemcpyAsync(dst->data, cpy_size, src0->data, cpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, ctx.stream())); acl_tensor_ptr acl_src0 = ggml_cann_create_tensor(src0, src1->ne, src0->nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); GGML_CANN_CALL_ACLNN_OP(ctx, Add, acl_src0.get(), acl_src1.get(), alpha.get(), acl_dst.get()); } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, acl_dst.get(), acl_src1.get(), alpha.get()); } } /** * @brief Performs sum reduction on a given tensor along specified dimensions. * * This function reduces the input tensor by summing along the specified dimensions. * * @param ctx The context for the CANN backend operations. * @param dst The destination tensor where the reduced result will be stored. * @param dim An array of dimension indices. * @param dim_size The number of dimensions. */ static void aclnn_reduce_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst, int64_t * dim, size_t dim_size) { GGML_ASSERT(dst->ne[0] == 1); ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); acl_int_array_ptr reduce_dims = ggml_cann_create_int_array(dim, dim_size); GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_src.get(), reduce_dims.get(), true, ggml_cann_type_mapping(dst->type), acl_dst.get()); } void ggml_cann_sum_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { int64_t reduce_dims[] = { 3 }; aclnn_reduce_sum(ctx, dst, reduce_dims, 1); } void ggml_cann_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst) { int64_t reduce_dims[] = { 0, 1, 2, 3 }; aclnn_reduce_sum(ctx, dst, reduce_dims, 4); } void ggml_cann_upsample_nearest2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); std::vector output_size{ dst->ne[1], dst->ne[0] }; acl_int_array_ptr output_size_array = ggml_cann_create_int_array(output_size.data(), 2); GGML_CANN_CALL_ACLNN_OP(ctx, UpsampleNearest2d, acl_src.get(), output_size_array.get(), acl_dst.get()); } /** * @brief Pads a tensor with a specified value along each dimension. * * This function performs padding of the source tensor `acl_src` and stores the * result in the destination tensor `acl_dst`. The padding values for each * dimension are specified in the `paddings` array. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor to be padded. * @param acl_dst The destination tensor where the padded result will be stored. * @param paddings An array specifying the padding values for each dimension. * The size of the array should be twice the number of dimensions of the tensor. * @param value The value to be used for padding. The default value is 0.0. */ static void aclnn_pad(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, int64_t * paddings, float value = 0.0f) { acl_int_array_ptr acl_pad = ggml_cann_create_int_array(paddings, GGML_MAX_DIMS * 2); acl_scalar_ptr acl_value = ggml_cann_create_scalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, ConstantPadNd, acl_src, acl_pad.get(), acl_value.get(), acl_dst); } void ggml_cann_pad(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); // padding: value in the array means how much distance will be padding. // the position of elements in the array means which dirction to padding, // each position means: [dim0.front, dim0.behind, dim1.front, dim1.behind, // dim2.front, dim2.behind, dim3.front, dim3.behind] const int32_t lp0 = ggml_get_op_params_i32(dst, 0); const int32_t rp0 = ggml_get_op_params_i32(dst, 1); const int32_t lp1 = ggml_get_op_params_i32(dst, 2); const int32_t rp1 = ggml_get_op_params_i32(dst, 3); const int32_t lp2 = ggml_get_op_params_i32(dst, 4); const int32_t rp2 = ggml_get_op_params_i32(dst, 5); const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); int64_t paddings[] = { lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3 }; aclnn_pad(ctx, acl_src.get(), acl_dst.get(), paddings); } /** * @brief Performs 2D average pooling on the input tensor and stores the result * in the destination tensor. * * This function performs average pooling on the source tensor and stores the * result in the destination tensor. The pooling parameters (kernel size, * strides, padding) are specified in the `op_params` of the destination tensor. * * @param ctx The context for the CANN backend operations. * @param dst The destination tensor where the result will be stored. The source * tensor is referenced by `dst->src[0]`. */ static void ggml_cann_avg_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); const int32_t * opts = (const int32_t *) dst->op_params; const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; std::vector kernel_dims = { k1, k0 }; std::vector stride_dims = { s1, s0 }; std::vector padding_avg_dims = { p1, p0 }; // (padH, padW) acl_int_array_ptr kernel_size = ggml_cann_create_int_array(kernel_dims.data(), 2); acl_int_array_ptr strides = ggml_cann_create_int_array(stride_dims.data(), 2); acl_int_array_ptr paddings_avg = ggml_cann_create_int_array(padding_avg_dims.data(), 2); bool ceil_mode = false; bool count_include_pad = true; int64_t divisor_override = 0; int8_t cube_math_type = 0; #ifdef ASCEND_310P cube_math_type = 1; #endif GGML_CANN_CALL_ACLNN_OP(ctx, AvgPool2d, acl_src.get(), kernel_size.get(), strides.get(), paddings_avg.get(), ceil_mode, count_include_pad, divisor_override, cube_math_type, acl_dst.get()); } /** * @brief Performs 2D max pooling on the input tensor and stores the result in * the destination tensor. * * This function performs max pooling on the source tensor and stores the result * in the destination tensor. The pooling parameters (kernel size, strides, * padding) are specified in the `op_params` of the destination tensor. * * @param ctx The context for the CANN backend operations. * @param dst The destination tensor where the result will be stored. The source * tensor is referenced by `dst->src[0]`. */ static void ggml_cann_max_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); const int32_t * opts = (const int32_t *) dst->op_params; const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; int64_t temp_ne[] = { src->ne[0] + p0 * 2, src->ne[1] + p1 * 2, src->ne[2], src->ne[3] }; size_t temp_nb[GGML_MAX_DIMS]; temp_nb[0] = ggml_element_size(src); for (int i = 1; i < GGML_MAX_DIMS; i++) { temp_nb[i] = temp_nb[i - 1] * temp_ne[i - 1]; } ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), ggml_nbytes(src) + p0 * 2 + p1 * 2 * src->nb[1]); void * buffer = temp_buffer_allocator.get(); acl_tensor_ptr tmp_tensor = ggml_cann_create_tensor(buffer, ACL_FLOAT, ggml_element_size(src), temp_ne, temp_nb, GGML_MAX_DIMS, ACL_FORMAT_NCHW); // pad: see padding in ggml_cann_pad() int64_t paddings[] = { p0, p0, p1, p1, 0, 0, 0, 0 }; float value = -FLT_MAX; aclnn_pad(ctx, acl_src.get(), tmp_tensor.get(), paddings, value); // max_pool std::vector kernel_dims = { k1, k0 }; std::vector stride_dims = { s1, s0 }; // padding_max_dims: [dim0_start, dim0_end, dim1_start, dim1_end] std::vector padding_max_dims = { 0, 0, 0, 0 }; std::vector dilation_size = { 1, 1 }; acl_int_array_ptr kernel_size = ggml_cann_create_int_array(kernel_dims.data(), 2); acl_int_array_ptr strides = ggml_cann_create_int_array(stride_dims.data(), 2); acl_int_array_ptr paddings_max = ggml_cann_create_int_array(padding_max_dims.data(), 4); acl_int_array_ptr dilations = ggml_cann_create_int_array(dilation_size.data(), 2); bool ceil_mode = false; int64_t auto_pads = 0; GGML_CANN_CALL_ACLNN_OP(ctx, MaxPool, tmp_tensor.get(), kernel_size.get(), strides.get(), auto_pads, paddings_max.get(), dilations.get(), ceil_mode, acl_dst.get()); } void ggml_cann_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const int32_t * opts = (const int32_t *) dst->op_params; enum ggml_op_pool op = static_cast(opts[0]); switch (op) { case GGML_OP_POOL_AVG: ggml_cann_avg_pool2d(ctx, dst); break; case GGML_OP_POOL_MAX: ggml_cann_max_pool2d(ctx, dst); break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); break; } } /** * @brief Copies data from the source tensor to the destination tensor. * * This function copies data from the source tensor `acl_src` to the destination * tensor `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor from which data will be copied. * @param acl_dst The destination tensor where the data will be copied to. */ static void cann_copy(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceCopy, acl_dst, acl_src); } void ggml_cann_dup(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; if (ggml_are_same_shape(src0, dst)) { acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); if (dst->type == src0->type) { cann_copy(ctx, acl_src.get(), acl_dst.get()); } else { aclnn_cast(ctx, acl_src.get(), acl_dst.get(), ggml_cann_type_mapping(dst->type)); } } else { void * src_trans_buffer = src0->data; ggml_cann_pool_alloc src_buffer_allocator; if (!ggml_is_contiguous(src0)) { acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); src_buffer_allocator.alloc(ctx.pool(), ggml_nelements(src0) * ggml_type_size(src0->type)); src_trans_buffer = src_buffer_allocator.get(); size_t src_trans_nb[GGML_MAX_DIMS]; src_trans_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; } acl_tensor_ptr src_trans_tensor = ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); cann_copy(ctx, acl_src.get(), src_trans_tensor.get()); } size_t src_reshape_nb[GGML_MAX_DIMS]; src_reshape_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { src_reshape_nb[i] = src_reshape_nb[i - 1] * dst->ne[i - 1]; } acl_tensor_ptr trans_acl_src = ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), dst->ne, src_reshape_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); if (dst->type == src0->type) { cann_copy(ctx, trans_acl_src.get(), acl_dst.get()); } else { aclnn_cast(ctx, trans_acl_src.get(), acl_dst.get(), ggml_cann_type_mapping(dst->type)); } } } /** * @brief Creates an ACL tensor initialized with zeros using a provided buffer. * * This function initializes a tensor with zeros using the specified buffer and * tensor parameters. * * @param ctx The context for the CANN backend operations. * @param buffer The buffer to be used for the tensor data. * @param n_bytes The size of the buffer in bytes. * @param ne An array specifying the extents (sizes) of each dimension of the * tensor. * @param dims The number of dimensions of the tensor. * @param type The data type of the tensor. * @param type_size The size of each element in the tensor data type. * @return A tensor smart pointer initialized with zeros. */ static acl_tensor_ptr aclnn_zero(ggml_backend_cann_context & ctx, void * buffer, size_t n_bytes, int64_t * ne, int64_t dims, aclDataType type, size_t type_size) { size_t nb[GGML_MAX_DIMS]; nb[0] = type_size; for (int i = 1; i < dims; i++) { nb[i] = nb[i - 1] * ne[i - 1]; } acl_tensor_ptr zero = ggml_cann_create_tensor(buffer, type, type_size, ne, nb, dims); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, zero.get()); return zero; GGML_UNUSED(n_bytes); } /** * @brief Creates an ACL tensor initialized with value using a provided buffer. * * This function initializes a tensor with value using the specified buffer and * tensor parameters. * * @param ctx The context for the CANN backend operations. * @param buffer The buffer to be used for the tensor data. * @param n_bytes The size of the buffer in bytes. * @param ne An array specifying the extents (sizes) of each dimension of the * tensor. * @param dims The number of dimensions of the tensor. * @param type The data type of the tensor. * @param type_size The size of each element in the tensor data type. * @param value The value to be used for initializing the tensor (default * is 1.0). * @return A tensor smart pointer initialized with value. */ static acl_tensor_ptr aclnn_values(ggml_backend_cann_context & ctx, void * buffer, size_t n_bytes, int64_t * ne, int64_t dims, aclDataType type, size_t type_size, float value = 1.0f) { acl_tensor_ptr acl_tensor = aclnn_zero(ctx, buffer, n_bytes, ne, dims, type, type_size); float alpha_host = 1.0f; acl_scalar_ptr alpha = ggml_cann_create_scalar(&alpha_host, aclDataType::ACL_FLOAT); acl_scalar_ptr other = ggml_cann_create_scalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdds, acl_tensor.get(), other.get(), alpha.get()); return acl_tensor; } /** * @brief Fills a tensor with a scalar value. * * This function fills the destination tensor `acl_dst` with the scalar value * `scalar`. * * @param ctx The context for the CANN backend operations. * @param scalar The scalar value used to fill the tensor. * @param acl_dst The destination tensor to be filled with the scalar value. */ static void aclnn_fill_scalar(ggml_backend_cann_context & ctx, float scalar, aclTensor * acl_dst) { acl_scalar_ptr acl_scalar = ggml_cann_create_scalar(&scalar, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceFillScalar, acl_dst, acl_scalar.get()); } /** * @brief Get or expand a cached tensor filled with a scalar value. * * This function manages cached device memory for tensors. If the current * cache size is insufficient for the requested tensor shape, the old memory will * be released and new memory will be allocated. The allocated buffer is * initialized with the given scalar value using CANN operations. * Finally, an aclTensor object is created from the cached memory and returned. * * @param ctx The CANN backend context that manages device memory. * @param buffer A pointer to the cached device buffer (will be allocated * or reallocated if necessary). * @param cache_element The current number of cached elements. This will be * updated when the cache is expanded. * @param ne The tensor shape array (number of elements in each dimension). * @param nb The stride size for each dimension. * @param dtype Data type of cached tensor. * @param dims The number of tensor dimensions. * @param value The scalar value used to fill the tensor (supports zero * initialization via memset or arbitrary values via fill_scalar). * @return A tensor smart pointer created from the cached buffer. */ static acl_tensor_ptr get_cache_acl_tensor(ggml_backend_cann_context & ctx, void ** buffer, int64_t & cache_element, int64_t * ne, size_t * nb, ggml_type dtype, int64_t dims, float value) { // Calculate total number of elements int64_t n_element = 1; for (int i = 0; i < dims; i++) { n_element *= ne[i]; } size_t size = n_element * ggml_type_size(dtype); // Allocate or expand cache if needed if (cache_element < n_element) { if (*buffer != nullptr) { aclrtFree(*buffer); *buffer = nullptr; } ACL_CHECK(aclrtMalloc(buffer, size, ACL_MEM_MALLOC_HUGE_FIRST)); cache_element = n_element; // Initialize cache int64_t pool_ne[1] = { n_element }; size_t pool_nb[1] = { ggml_type_size(dtype) }; acl_tensor_ptr acl_value = ggml_cann_create_tensor(*buffer, ggml_cann_type_mapping(dtype), ggml_type_size(dtype), pool_ne, pool_nb, 1); aclnn_fill_scalar(ctx, value, acl_value.get()); } return ggml_cann_create_tensor(*buffer, ggml_cann_type_mapping(dtype), ggml_type_size(dtype), ne, nb, dims); } void ggml_cann_rms_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float eps; memcpy(&eps, dst->op_params, sizeof(float)); // build gamma. size_t acl_gamma_nb[GGML_MAX_DIMS]; // gamma's type is the same with dst. acl_gamma_nb[0] = ggml_type_size(dst->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { acl_gamma_nb[i] = acl_gamma_nb[i - 1] * src->ne[i - 1]; } acl_tensor_ptr acl_gamma = get_cache_acl_tensor( ctx, &ctx.rms_norm_one_tensor_cache.cache, ctx.rms_norm_one_tensor_cache.size, src->ne, acl_gamma_nb, dst->type, 1, // dims 1.0f // value ); // build rstd. int64_t acl_rstd_ne[] = { src->ne[1], src->ne[2], src->ne[3] }; size_t acl_rstd_nb[GGML_MAX_DIMS - 1]; // rstd will always be F32. acl_rstd_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { acl_rstd_nb[i] = acl_rstd_nb[i - 1] * acl_rstd_ne[i - 1]; } acl_tensor_ptr acl_rstd = get_cache_acl_tensor(ctx, &ctx.rms_norm_zero_tensor_cache.cache, ctx.rms_norm_zero_tensor_cache.size, acl_rstd_ne, acl_rstd_nb, GGML_TYPE_F32, GGML_MAX_DIMS - 1, 0.0f // value ); GGML_CANN_CALL_ACLNN_OP(ctx, RmsNorm, acl_src.get(), acl_gamma.get(), eps, acl_dst.get(), acl_rstd.get()); } // TODO: performace is low. void ggml_cann_diag_mask(ggml_backend_cann_context & ctx, ggml_tensor * dst, float value) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); const int n_past = ((int32_t *) dst->op_params)[0]; ggml_cann_pool_alloc one_tensor_allocator(ctx.pool(), ggml_nbytes(src)); void * buffer = one_tensor_allocator.get(); acl_tensor_ptr mask_tensor = ggml_cann_create_tensor(buffer, ggml_cann_type_mapping(src->type), ggml_type_size(src->type), src->ne, src->nb, GGML_MAX_DIMS); aclnn_fill_scalar(ctx, value, mask_tensor.get()); float alphaValue = 1.0f; acl_scalar_ptr alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceTriu, mask_tensor.get(), n_past + 1); GGML_CANN_CALL_ACLNN_OP(ctx, Tril, acl_src.get(), n_past + 1, acl_dst.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, acl_dst.get(), mask_tensor.get(), alpha.get()); } /** * @brief Permutes the dimensions of a tensor according to a specified order. * * This function permutes the dimensions of the source tensor `acl_src` * according to the order specified in the `new_dim` array and stores the result * in the destination tensor `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor whose dimensions will be permuted. * @param acl_dst The destination tensor where the permuted result will be * stored. * @param new_dim An array specifying the new order of dimensions for the * tensor. * @param dims The number of dimensions in the tensor. */ static void aclnn_permute(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, int64_t * new_dim, uint64_t dims) { acl_int_array_ptr acl_dims = ggml_cann_create_int_array(new_dim, dims); GGML_CANN_CALL_ACLNN_OP(ctx, Permute, acl_src, acl_dims.get(), acl_dst); } static void ggml_cann_im2col_2d_post_process(ggml_backend_cann_context & ctx, ggml_tensor * dst, ggml_tensor * src1, aclTensor * tmp_cast_tensor, aclTensor * tmp_im2col_tensor) { // Permute: [N, IC * KH * KW, OW * OH] -> [N, OW * OH, IC * KH * KW] int64_t dst_ne[] = { dst->ne[0], dst->ne[1] * dst->ne[2], dst->ne[3] }; size_t dst_nb[] = { dst->nb[0], dst->nb[1], dst->nb[3] }; acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, dst_ne, dst_nb, GGML_MAX_DIMS - 1); int64_t permute_dim[] = { 0, 2, 1 }; if (src1->type != dst->type) { aclnn_permute(ctx, tmp_cast_tensor, acl_dst.get(), permute_dim, 3); } else { aclnn_permute(ctx, tmp_im2col_tensor, acl_dst.get(), permute_dim, 3); } } static void ggml_cann_im2col_1d_post_process(ggml_backend_cann_context & ctx, ggml_tensor * dst, ggml_tensor * src1, aclTensor * tmp_cast_tensor, aclTensor * tmp_im2col_tensor, const std::vector & im2col_op_params) { // get params const int64_t KH = im2col_op_params[0]; const int64_t KW = im2col_op_params[1]; const int64_t IW = im2col_op_params[2]; const int64_t IC = im2col_op_params[3]; const int64_t N = im2col_op_params[4]; const int64_t OH = im2col_op_params[5]; const int64_t OW = im2col_op_params[6]; const int64_t s0 = im2col_op_params[7]; const int64_t p0 = im2col_op_params[8]; const int64_t d0 = im2col_op_params[9]; const int64_t n_bytes_factor = im2col_op_params[10]; // Permute: [N, IC * KH * KW, OW * OH] -> // [N, OW * OH * n_bytes_factor, IC * KH * KW] ggml_cann_pool_alloc tmp_permute_allocator(ctx.pool()); tmp_permute_allocator.alloc(ggml_nbytes(dst) * n_bytes_factor); void * tmp_permute_buffer = tmp_permute_allocator.get(); int64_t tmp_permute_ne[] = { IC * KH * KW, OW * OH * n_bytes_factor, N }; size_t tmp_permute_nb[GGML_MAX_DIMS - 1]; tmp_permute_nb[0] = ggml_type_size(dst->type); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { tmp_permute_nb[i] = tmp_permute_nb[i - 1] * tmp_permute_ne[i - 1]; } acl_tensor_ptr tmp_permute_tensor = ggml_cann_create_tensor(tmp_permute_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), tmp_permute_ne, tmp_permute_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); int64_t permute_dim[] = { 0, 2, 1 }; if (src1->type != dst->type) { aclnn_permute(ctx, tmp_cast_tensor, tmp_permute_tensor.get(), permute_dim, 3); } else { aclnn_permute(ctx, tmp_im2col_tensor, tmp_permute_tensor.get(), permute_dim, 3); } // number of times the kernel moves in W dimension const int n_step_w = (IW + 2 * p0 - d0 * (KW - 1) - 1) / s0 + 1; size_t offset; void * cur_dst_buffer = dst->data, *cur_permute_buffer = tmp_permute_buffer; // memory copy with offset to restore 1D im2col from 2d if (IC > 1) { offset = IC * KH * KW * n_step_w * ggml_type_size(dst->type); size_t cpy_size = KH * KW * ggml_type_size(dst->type); for (int c = 0; c < IC; c++) { cur_permute_buffer = (char *) tmp_permute_buffer + offset + KH * KW * c * ggml_type_size(dst->type); cur_dst_buffer = (char *) dst->data + c * KH * KW * n_step_w * ggml_type_size(dst->type); for (int i = 0; i < n_step_w; i++) { ACL_CHECK(aclrtMemcpyAsync(cur_dst_buffer, cpy_size, cur_permute_buffer, cpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, ctx.stream())); cur_dst_buffer = (char *) cur_dst_buffer + KH * KW * ggml_type_size(dst->type); cur_permute_buffer = (char *) cur_permute_buffer + KH * KW * IC * ggml_type_size(dst->type); } } } else { offset = KH * KW * n_step_w * ggml_type_size(dst->type); // equal to ggml_nbytes(dst) ACL_CHECK(aclrtMemcpyAsync(dst->data, offset, (char *) tmp_permute_buffer + offset, offset, ACL_MEMCPY_DEVICE_TO_DEVICE, ctx.stream())); } } void ggml_cann_im2col(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // kernel ggml_tensor * src1 = dst->src[1]; // input GGML_TENSOR_BINARY_OP_LOCALS; // aclnnIm2col only works on 2D. set s1, p1, d1 to 1 to perform 2D // im2col and do post-processing to restore it to 1D. const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; const int32_t s1 = is_2D ? ((const int32_t *) (dst->op_params))[1] : 1; const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; const int32_t p1 = is_2D ? ((const int32_t *) (dst->op_params))[3] : 1; const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; const int32_t d1 = is_2D ? ((const int32_t *) (dst->op_params))[5] : 1; const int64_t N = ne13; const int64_t IC = ne12; const int64_t KH = ne01; const int64_t KW = ne00; const int64_t IW = ne10; const int64_t OH = is_2D ? ne2 : 1; const int64_t OW = ne1; // memory allocated increased to 3x when is_2D == false const int64_t n_bytes_factor = is_2D ? 1 : 3; // im2col: [N,C,H,W] -> [N, IC * KH * KW, OW * OH * n_bytes_factor] acl_tensor_ptr acl_src1 = ggml_cann_create_tensor(src1); int64_t tmp_im2col_ne[] = { OW * OH * n_bytes_factor, IC * KH * KW, N }; size_t tmp_im2col_nb[GGML_MAX_DIMS - 1]; tmp_im2col_nb[0] = ggml_type_size(src1->type); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { tmp_im2col_nb[i] = tmp_im2col_nb[i - 1] * tmp_im2col_ne[i - 1]; } // Calculate im2col. // If dst is f16, tmp_buffer is f32, we need alloc src.typesize * // dst.elemcount. ggml_cann_pool_alloc im2col_allocator(ctx.pool(), ggml_nelements(dst) * ggml_element_size(src1) * n_bytes_factor); void * tmp_im2col_buffer = im2col_allocator.get(); acl_tensor_ptr tmp_im2col_tensor = ggml_cann_create_tensor(tmp_im2col_buffer, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), tmp_im2col_ne, tmp_im2col_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); std::vector kernel_dims = { KH, KW }; std::vector dilation_size = { d1, d0 }; std::vector padding_dims = { p1, p0 }; std::vector stride_dims = { s1, s0 }; acl_int_array_ptr kernel_size = ggml_cann_create_int_array(kernel_dims.data(), 2); acl_int_array_ptr dilations = ggml_cann_create_int_array(dilation_size.data(), 2); acl_int_array_ptr paddings = ggml_cann_create_int_array(padding_dims.data(), 2); acl_int_array_ptr strides = ggml_cann_create_int_array(stride_dims.data(), 2); GGML_CANN_CALL_ACLNN_OP(ctx, Im2col, acl_src1.get(), kernel_size.get(), dilations.get(), paddings.get(), strides.get(), tmp_im2col_tensor.get()); // Cast if dst is f16. acl_tensor_ptr tmp_cast_tensor; ggml_cann_pool_alloc tmp_cast_allocator(ctx.pool()); void * tmp_cast_buffer = nullptr; if (src1->type != dst->type) { tmp_cast_allocator.alloc(ggml_nbytes(dst) * n_bytes_factor); tmp_cast_buffer = tmp_cast_allocator.get(); size_t temp_cast_nb[GGML_MAX_DIMS - 1]; temp_cast_nb[0] = ggml_type_size(dst->type); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { temp_cast_nb[i] = temp_cast_nb[i - 1] * tmp_im2col_ne[i - 1]; } tmp_cast_tensor = ggml_cann_create_tensor(tmp_cast_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), tmp_im2col_ne, temp_cast_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); aclnn_cast(ctx, tmp_im2col_tensor.get(), tmp_cast_tensor.get(), ggml_cann_type_mapping(dst->type)); } // post-processing if (is_2D) { ggml_cann_im2col_2d_post_process(ctx, dst, src1, tmp_cast_tensor.get(), tmp_im2col_tensor.get()); } else { std::vector im2col_op_params = { KH, KW, IW, IC, N, OH, OW, s0, p0, d0, n_bytes_factor }; ggml_cann_im2col_1d_post_process(ctx, dst, src1, tmp_cast_tensor.get(), tmp_im2col_tensor.get(), im2col_op_params); } } /** * @brief Applies element-wise exponential function to the elements of a tensor. * * This function computes the exponential of each element in the source tensor * `acl_src` and stores the result back into the same tensor. * The operation is defined as: * \f[ * \text {acl_src }_i=e^{acl\_src_i} * \f] * * @param ctx The context for the CANN backend operations. * @param acl_src The tensor on which the exponential function will be applied. */ static void aclnn_exp(ggml_backend_cann_context & ctx, aclTensor * acl_src) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceExp, acl_src); } void aclnn_cos(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { if (acl_dst == nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceCos, acl_src); } else { GGML_CANN_CALL_ACLNN_OP(ctx, Cos, acl_src, acl_dst); } } void aclnn_sin(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { if (acl_dst == nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSin, acl_src); } else { GGML_CANN_CALL_ACLNN_OP(ctx, Sin, acl_src, acl_dst); } } void ggml_cann_timestep_embedding(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); const int dim = dst->op_params[0]; const int max_period = dst->op_params[1]; int half = dim / 2; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); // arange: [0, ..., half) float start = 0; float stop = half; float step = 1; int64_t n_elements_arange = half; int64_t tmp_arange_ne[] = { half }; size_t tmp_arange_nb[] = { sizeof(dst->type) }; ggml_cann_pool_alloc arange_allocator(ctx.pool(), half * sizeof(dst->type)); void * tmp_arange_buffer = arange_allocator.get(); acl_tensor_ptr tmp_arange_tensor = ggml_cann_create_tensor(tmp_arange_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), tmp_arange_ne, tmp_arange_nb, GGML_MAX_DIMS - 3, ACL_FORMAT_ND); aclnn_arange(ctx, tmp_arange_tensor.get(), start, stop, step, n_elements_arange); // freq float freq_param = -logf(max_period) / half; bool inplace = true; aclnn_muls(ctx, tmp_arange_tensor.get(), freq_param, nullptr, inplace); aclnn_exp(ctx, tmp_arange_tensor.get()); // permute: src [0,1,2,3]->[0,1,3,2] int64_t tmp_permute_ne[] = { src->ne[1], src->ne[0], src->ne[2], src->ne[3] }; size_t tmp_permute_nb[GGML_MAX_DIMS]; tmp_permute_nb[0] = ggml_type_size(src->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { tmp_permute_nb[i] = tmp_permute_nb[i - 1] * tmp_permute_ne[i - 1]; } ggml_cann_pool_alloc permute_allocator(ctx.pool(), ggml_nbytes(src)); void * tmp_permute_buffer = permute_allocator.get(); acl_tensor_ptr tmp_permute_tensor = ggml_cann_create_tensor(tmp_permute_buffer, ggml_cann_type_mapping(src->type), ggml_type_size(src->type), tmp_permute_ne, tmp_permute_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); int64_t permute_dim[] = { 0, 1, 3, 2 }; int64_t num_dims = 4; aclnn_permute(ctx, acl_src.get(), tmp_permute_tensor.get(), permute_dim, num_dims); // timestep * freq int64_t tmp_mul_ne[] = { src->ne[1] * half, src->ne[0], src->ne[2], src->ne[3] }; size_t tmp_mul_nb[GGML_MAX_DIMS]; tmp_mul_nb[0] = ggml_type_size(src->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { tmp_mul_nb[i] = tmp_mul_nb[i - 1] * tmp_mul_ne[i - 1]; } int mul_nelements = src->ne[1] * half * src->ne[0] * src->ne[2] * src->ne[3]; ggml_cann_pool_alloc mul_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); void * tmp_mul_buffer = mul_allocator.get(); acl_tensor_ptr tmp_mul_tensor = ggml_cann_create_tensor(tmp_mul_buffer, ggml_cann_type_mapping(src->type), ggml_type_size(src->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_mul(ctx, tmp_permute_tensor.get(), tmp_arange_tensor.get(), tmp_mul_tensor.get()); // cos ggml_cann_pool_alloc cos_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); void * tmp_cos_buffer = cos_allocator.get(); acl_tensor_ptr tmp_cos_tensor = ggml_cann_create_tensor(tmp_cos_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_cos(ctx, tmp_mul_tensor.get(), tmp_cos_tensor.get()); // sin ggml_cann_pool_alloc sin_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); void * tmp_sin_buffer = sin_allocator.get(); acl_tensor_ptr tmp_sin_tensor = ggml_cann_create_tensor(tmp_sin_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_sin(ctx, tmp_mul_tensor.get(), tmp_sin_tensor.get()); // concat int64_t concat_dim = 3; acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); acl_tensor_list_ptr tensor_list = ggml_cann_create_tensor_list(tmp_cos_tensor, tmp_sin_tensor); aclnn_concat(ctx, tensor_list.get(), acl_dst.get(), concat_dim); } /** * @brief Raises each element of a tensor to the power of the corresponding * element in another tensor. * * This function computes the element-wise power of the destination tensor * `acl_dst` raised to the power of the exponent tensor `acl_exp`. * The operation is defined as: * \f[ * \text {acl_dst }_i=acl\_dst_i^{\text {acl_exp }_i} * \f] * * @param ctx The context for the CANN backend operations. * @param acl_dst The destination tensor, which also serves as the base tensor. * @param acl_exp The exponent tensor, each element of which is used to raise * the corresponding element in the destination tensor. */ static void aclnn_pow_tensor_tensor(ggml_backend_cann_context & ctx, aclTensor * acl_dst, aclTensor * acl_exp) { GGML_CANN_CALL_ACLNN_OP(ctx, InplacePowTensorTensor, acl_dst, acl_exp); } /** * @brief Generate a range of values and apply a scalar base exponentiation. * * This function creates an evenly spaced sequence from `start` to `stop` (exclusive), * with step size `step`, stores it in a temporary buffer, and then computes: * * @f[ * slope[i] = m^{\left( start + i \cdot step \right)}, \quad 0 \le i < size * @f] * * The results are written to the provided @p slope_buffer. * * @param ctx CANN backend context for memory allocation and operator execution. * @param slope_buffer Pointer to the output buffer (float array) for the computed slope values. * @param m Scalar base for the exponentiation. * @param size Number of elements in the generated sequence. * @param start Starting exponent offset. * @param stop Stopping exponent offset (exclusive). * @param step Step size for the exponent increment. * @param dtype Data type for slope tensor. */ static void aclnn_get_slope_inner(ggml_backend_cann_context & ctx, void * slope_buffer, float m, int64_t size, float start, float stop, float step, ggml_type dtype) { aclDataType acl_type = ggml_cann_type_mapping(dtype); size_t type_size = ggml_type_size(dtype); int64_t ne[] = { size }; size_t nb[] = { type_size }; ggml_cann_pool_alloc arange_allocator(ctx.pool(), size * type_size); void * arange_buffer = arange_allocator.get(); acl_tensor_ptr arange_tensor = ggml_cann_create_tensor(arange_buffer, acl_type, type_size, ne, nb, 1); aclnn_arange(ctx, arange_tensor.get(), start, stop, step, size); acl_tensor_ptr slope_tensor = ggml_cann_create_tensor(slope_buffer, acl_type, type_size, ne, nb, 1); acl_scalar_ptr sc = ggml_cann_create_scalar(&m, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, PowScalarTensor, sc.get(), arange_tensor.get(), slope_tensor.get()); } /** * @brief Compute slope values for multiple attention heads based on ALiBi bias parameters. * * This function generates slope values for each attention head according to the ALiBi * (Attention with Linear Biases) method. It splits the computation into two ranges depending * on whether the head index is less than @p n_head_log2 or not, and uses different base values * (`m0` and `m1`) for the exponentiation. * * @f[ * slope[h] = * \begin{cases} * m_0^{(h + 1)}, & h < n\_head\_log2 \\ * m_1^{\left( 2 \cdot (h - n\_head\_log2) + 1 \right)}, & h \geq n\_head\_log2 * \end{cases} * \quad , \quad \text{if } max\_bias > 0 * @f] * * If @p max_bias <= 0, all slope values are set to 1.0. * * @param ctx CANN backend context for memory allocation and operator execution. * @param n_head Total number of attention heads. * @param slope_buffer Pointer to the output buffer (float array) for storing slopes. * @param max_bias Maximum bias value for slope computation. * @param dtype Data type for slope tensor. * */ static void aclnn_get_slope(ggml_backend_cann_context & ctx, int64_t n_head, void * slope_buffer, float max_bias, ggml_type dtype) { const int n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); float m0 = powf(2.0f, -(max_bias) / n_head_log2); float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); // const float slope = (max_bias > 0.0f) ? // h < n_head_log2 ? // powf(m0, h + 1) : // powf(m1, 2*(h - n_head_log2) + 1) : // 1.0f; // arange1 float start = 0 + 1; float end = (n_head_log2 - 1) + 1; float step = 1; float count = n_head_log2; // end needs to be +1 because aclnn uses a left-closed, right-open interval. aclnn_get_slope_inner(ctx, slope_buffer, m0, count, start, end + 1, step, dtype); if (n_head_log2 < n_head) { // arange2 start = 2 * (n_head_log2 - n_head_log2) + 1; end = 2 * ((n_head - 1) - n_head_log2) + 1; step = 2; count = n_head - n_head_log2; aclnn_get_slope_inner(ctx, (char *) slope_buffer + n_head_log2 * sizeof(float), m1, count, start, end + 1, step, dtype); } } /** * @brief Add ALiBi (Attention with Linear Biases) positional biases to the attention mask. * * This function computes the ALiBi slopes for each attention head (if max_bias > 0), * multiplies them with the attention mask to produce bias tensors, and adds these biases * to the destination tensor (@p dst). * * The function performs necessary broadcasting of the mask and slope tensors to match * the shape of the destination tensor, then applies element-wise multiplication and addition * using CANN operators. * * @param ctx CANN backend context for memory management and operator execution. * @param mask Input attention mask tensor, assumed to be contiguous. * @param dst Destination tensor to which ALiBi biases will be added. * @param dst_ptr Pointer to the memory of the destination tensor. * @param max_bias Maximum bias value controlling the slope scaling. * * @note * - Write data into dst_ptr using only the shape information of the dst tensor. * - `GGML_MAX_DIMS + 2` is used to extend tensor dimensions for broadcasting. */ static void aclnn_add_alibi(ggml_backend_cann_context & ctx, ggml_tensor * mask, ggml_tensor * dst, void * dst_ptr, float max_bias) { void * slope_buffer = nullptr; void * bias_buffer = nullptr; if (max_bias > 0.0f) { int64_t n_heads = dst->ne[2]; ggml_cann_pool_alloc slope_allocator(ctx.pool(), n_heads * sizeof(float)); slope_buffer = slope_allocator.get(); ggml_cann_pool_alloc bias_allocator(ctx.pool(), ggml_nelements(dst) * ggml_element_size(dst)); bias_buffer = bias_allocator.get(); aclnn_get_slope(ctx, n_heads, slope_buffer, max_bias, GGML_TYPE_F32); } // broadcast for mask, slop and dst; int64_t nr2 = dst->ne[2] / mask->ne[2]; int64_t nr3 = dst->ne[3] / mask->ne[3]; // broadcast the mask across rows int64_t mask_ne[] = { mask->ne[0], dst->ne[1], mask->ne[2], 1, mask->ne[3], 1 }; size_t mask_nb[] = { mask_nb[0] = mask->nb[0], mask_nb[1] = mask->nb[1], mask_nb[2] = mask->nb[2], mask_nb[3] = mask->nb[2], mask_nb[4] = mask->nb[3], mask_nb[5] = mask->nb[3] }; int64_t dst_ne[] = { dst->ne[0], dst->ne[1], mask->ne[2], nr2, mask->ne[3], nr3 }; size_t dst_nb[] = { dst_nb[0] = dst->nb[0], dst_nb[1] = dst->nb[1], dst_nb[2] = dst->nb[2], dst_nb[3] = dst->nb[2], dst_nb[4] = dst->nb[3], dst_nb[5] = dst->nb[3] }; // slope is a 1 dim tensor, slope.ne2 == dst.ne2 int64_t slope_ne[] = { 1, 1, mask->ne[2], nr2, 1, 1 }; size_t slope_nb[GGML_MAX_DIMS + 2]; slope_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS + 2; i++) { slope_nb[i] = slope_nb[i - 1] * slope_ne[i - 1]; } acl_tensor_ptr acl_slope = ggml_cann_create_tensor(slope_buffer, ACL_FLOAT, sizeof(float), slope_ne, slope_nb, GGML_MAX_DIMS + 2); acl_tensor_ptr acl_mask = ggml_cann_create_tensor(mask, mask_ne, mask_nb, GGML_MAX_DIMS + 2); // write data into dst_ptr using only the shape information of the dst tensor. acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst_ptr, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), dst_ne, dst_nb, GGML_MAX_DIMS + 2); if (max_bias > 0.0f) { int64_t bias_ne[] = { mask->ne[0], dst->ne[1], mask->ne[2], nr2, mask->ne[3], 1 }; size_t bias_nb[GGML_MAX_DIMS + 2]; bias_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS + 2; i++) { bias_nb[i] = bias_nb[i - 1] * bias_ne[i - 1]; } acl_tensor_ptr bias_tensor = ggml_cann_create_tensor(bias_buffer, ACL_FLOAT, sizeof(float), bias_ne, bias_nb, GGML_MAX_DIMS + 2); aclnn_mul(ctx, acl_slope.get(), acl_mask.get(), bias_tensor.get()); aclnn_add(ctx, acl_dst.get(), bias_tensor.get()); } else { aclnn_add(ctx, acl_dst.get(), acl_mask.get()); } } void ggml_cann_cpy(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_cann_dup(ctx, dst); } /** * @brief Applies the softmax function to a tensor along a specified dimension. * * This function computes the softmax of the source tensor `acl_src` along the * specified dimension `dim` and stores the result in the destination tensor * `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor on which the softmax function will be * applied. * @param dim The dimension along which the softmax function will be computed. * @param acl_dst The destination tensor where the softmax results will be * stored. */ static void aclnn_softmax(ggml_backend_cann_context & ctx, aclTensor * acl_src, int64_t dim, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, Softmax, acl_src, dim, acl_dst); } void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; // mask acl_tensor_ptr acl_src0 = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); // input mul scale acl_scalar_ptr acl_scale = ggml_cann_create_scalar(&scale, aclDataType::ACL_FLOAT); ggml_cann_pool_alloc src_tensor_allocator(ctx.pool(), ggml_nbytes(src0)); void * src_tensor_buffer = src_tensor_allocator.get(); acl_tensor_ptr softmax_tensor = ggml_cann_create_tensor(src_tensor_buffer, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), src0->ne, src0->nb, GGML_MAX_DIMS); aclnn_muls(ctx, acl_src0.get(), scale, softmax_tensor.get(), false); // mask if (src1) { aclnn_add_alibi(ctx, src1, src0, src_tensor_buffer, max_bias); } // softmax aclnn_softmax(ctx, softmax_tensor.get(), 3, acl_dst.get()); } /** * @brief Performs index select operation on a 4D tensor using the CANN backend. * * This function applies the `IndexSelect` operation along a specific dimension * of the source tensor (`src_buffer`) using the indices from the index tensor (`index`). * It iterates over the last two dimensions of the source tensor, creates the corresponding * CANN tensors for the source, index, and output slices, and executes the `IndexSelect` * operation for each slice. * * @param ctx The context for CANN backend operations. * @param src_buffer The source buffer containing the 4D input tensor data. * @param src_ne The dimensions of the source tensor. * @param src_nb The strides (byte offsets) of the source tensor. * @param dst_buffer The destination buffer where the output tensor data will be written. * @param dst_ne The dimensions of the destination tensor. * @param dst_nb The strides (byte offsets) of the destination tensor. * @param index The index tensor specifying the indices to select from the source tensor. * @param type The data type of the source and destination tensors. */ static void aclnn_index_select_4d(ggml_backend_cann_context & ctx, void * src_buffer, int64_t * src_ne, size_t * src_nb, void * dst_buffer, int64_t * dst_ne, size_t * dst_nb, ggml_tensor * index, ggml_type type) { for (int64_t i = 0; i < src_ne[3]; i++) { for (int64_t j = 0; j < src_ne[2]; j++) { // src acl_tensor_ptr acl_src_tensor = ggml_cann_create_tensor((char *) src_buffer + i * src_nb[3] + j * src_nb[2], ggml_cann_type_mapping(type), ggml_type_size(type), src_ne, src_nb, 2); // index acl_tensor_ptr acl_index = ggml_cann_create_tensor( (char *) index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], ggml_cann_type_mapping(index->type), ggml_element_size(index), index->ne, index->nb, 1); // out acl_tensor_ptr acl_out = ggml_cann_create_tensor((char *) dst_buffer + i * dst_nb[3] + j * dst_nb[2], ggml_cann_type_mapping(type), ggml_type_size(type), dst_ne, dst_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, IndexSelect, acl_src_tensor.get(), 0, acl_index.get(), acl_out.get()); } } } /** * @brief Performs inplace index copy operation on a 4D tensor using the CANN backend. * * This function applies the `IndexCopy` operation along a specific dimension of the * destination tensor (`dst_buffer`) by copying elements from the source tensor (`src_buffer`) * to positions specified by the index tensor (`index`). * It iterates over the last two dimensions of the tensors, creates the corresponding * CANN tensors for source, index, and destination slices, and performs the index copy * operation for each slice. * * @param ctx The context for CANN backend operations. * @param src_buffer The source buffer containing the 4D input tensor data to be copied. * @param src_ne The dimensions of the source tensor. * @param src_nb The strides (byte offsets) of the source tensor. * @param dst_buffer The destination buffer where values will be copied to. * @param dst_ne The dimensions of the destination tensor. * @param dst_nb The strides (byte offsets) of the destination tensor. * @param index The index tensor specifying target positions in the destination tensor. * @param type The data type of the source and destination tensors. */ static void aclnn_index_copy_4d(ggml_backend_cann_context & ctx, void * src_buffer, int64_t * src_ne, size_t * src_nb, void * dst_buffer, int64_t * dst_ne, size_t * dst_nb, ggml_tensor * index, ggml_type type) { for (int64_t i = 0; i < src_ne[3]; i++) { for (int64_t j = 0; j < src_ne[2]; j++) { // src acl_tensor_ptr acl_src_tensor = ggml_cann_create_tensor((char *) src_buffer + i * src_nb[3] + j * src_nb[2], ggml_cann_type_mapping(type), ggml_type_size(type), src_ne, src_nb, 2); // index acl_tensor_ptr acl_index = ggml_cann_create_tensor( (char *) index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], ggml_cann_type_mapping(index->type), ggml_element_size(index), index->ne, index->nb, 1); // out acl_tensor_ptr acl_out = ggml_cann_create_tensor((char *) dst_buffer + i * dst_nb[3] + j * dst_nb[2], ggml_cann_type_mapping(type), ggml_type_size(type), dst_ne, dst_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceIndexCopy, acl_out.get(), 0, acl_index.get(), acl_src_tensor.get()); } } } void ggml_cann_get_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // src ggml_tensor * src1 = dst->src[1]; // index GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); switch (src0->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: if (src0->type == dst->type) { aclnn_index_select_4d(ctx, src0->data, src0->ne, src0->nb, dst->data, dst->ne, dst->nb, src1, dst->type); } else { acl_tensor_ptr acl_src0 = ggml_cann_create_tensor(src0); ggml_cann_pool_alloc src_buffer_allocator(ctx.pool(), ggml_nelements(src0) * ggml_element_size(dst)); void * src_trans_buffer = src_buffer_allocator.get(); size_t src_trans_nb[GGML_MAX_DIMS]; src_trans_nb[0] = dst->nb[0]; for (int i = 1; i < GGML_MAX_DIMS; i++) { src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; } acl_tensor_ptr src_trans_tensor = ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src0.get(), src_trans_tensor.get(), ggml_cann_type_mapping(dst->type)); aclnn_index_select_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, dst->data, dst->ne, dst->nb, src1, dst->type); } break; case GGML_TYPE_Q8_0: { // add 1 dim for bcast mul. size_t weight_nb[GGML_MAX_DIMS + 1], scale_nb[GGML_MAX_DIMS + 1], dequant_nb[GGML_MAX_DIMS + 1]; int64_t weight_ne[GGML_MAX_DIMS + 1], scale_ne[GGML_MAX_DIMS + 1], *dequant_ne; int64_t scale_offset = 0; // [3,4,5,64] -> [3,4,5,2,32] weight_ne[0] = QK8_0; weight_ne[1] = src0->ne[0] / QK8_0; weight_nb[0] = sizeof(int8_t); weight_nb[1] = weight_nb[0] * weight_ne[0]; for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { weight_ne[i] = src0->ne[i - 1]; weight_nb[i] = weight_nb[i - 1] * weight_ne[i - 1]; } // [3,4,5,64] -> [3,4,5,2,1] scale_ne[0] = 1; scale_ne[1] = src0->ne[0] / QK8_0; scale_nb[0] = sizeof(uint16_t); scale_nb[1] = scale_nb[0] * scale_ne[0]; for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { scale_ne[i] = src0->ne[i - 1]; scale_nb[i] = scale_nb[i - 1] * scale_ne[i - 1]; } // [3,4,5,64] -> [3,4,5,2,32] dequant_ne = weight_ne; dequant_nb[0] = ggml_type_size(dst->type); for (int i = 1; i < GGML_MAX_DIMS + 1; i++) { dequant_nb[i] = dequant_nb[i - 1] * dequant_ne[i - 1]; } scale_offset = ggml_nelements(src0) * sizeof(int8_t); ggml_cann_pool_alloc dequant_buffer_allocator(ctx.pool(), ggml_nelements(src0) * ggml_type_size(dst->type)); acl_tensor_ptr acl_weight_tensor = ggml_cann_create_tensor(src0->data, ACL_INT8, sizeof(int8_t), weight_ne, weight_nb, GGML_MAX_DIMS + 1); acl_tensor_ptr acl_scale_tensor = ggml_cann_create_tensor(src0->data, ACL_FLOAT16, sizeof(uint16_t), scale_ne, scale_nb, GGML_MAX_DIMS + 1, ACL_FORMAT_ND, scale_offset); acl_tensor_ptr dequant_tensor = ggml_cann_create_tensor(dequant_buffer_allocator.get(), ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), dequant_ne, dequant_nb, GGML_MAX_DIMS + 1); aclnn_mul(ctx, acl_weight_tensor.get(), acl_scale_tensor.get(), dequant_tensor.get()); dequant_nb[0] = ggml_type_size(dst->type); dequant_ne = src0->ne; for (int i = 1; i < GGML_MAX_DIMS; i++) { dequant_nb[i] = dequant_nb[i - 1] * src0->ne[i - 1]; } aclnn_index_select_4d(ctx, dequant_buffer_allocator.get(), dequant_ne, dequant_nb, dst->data, dst->ne, dst->nb, src1, dst->type); break; } default: GGML_ABORT("Unsupported tensor type for GGML_OP_GET_ROWS"); break; } } void ggml_cann_set_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // src ggml_tensor * src1 = dst->src[1]; // index switch (dst->type) { case GGML_TYPE_F32: { aclnn_index_copy_4d(ctx, src0->data, src0->ne, src0->nb, dst->data, dst->ne, dst->nb, src1, dst->type); break; } case GGML_TYPE_F16: { acl_tensor_ptr acl_src0 = ggml_cann_create_tensor(src0); ggml_cann_pool_alloc src_buffer_allocator(ctx.pool(), ggml_nelements(src0) * sizeof(uint16_t)); void * src_trans_buffer = src_buffer_allocator.get(); size_t src_trans_nb[GGML_MAX_DIMS]; src_trans_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; } acl_tensor_ptr src_trans_tensor = ggml_cann_create_tensor( src_trans_buffer, ACL_FLOAT16, ggml_type_size(dst->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src0.get(), src_trans_tensor.get(), ggml_cann_type_mapping(dst->type)); aclnn_index_copy_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, dst->data, dst->ne, dst->nb, src1, dst->type); break; } default: GGML_ABORT("Unsupported tensor type for GGML_OP_SET_ROWS"); break; } } /** * @brief Repeats elements of a tensor along a specified dimension. * * This function repeats each element of the source tensor `acl_src` a specified * number of times (`repeats`) along the specified dimension `dim` and stores * the result in the destination tensor `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor whose elements will be repeated. * @param acl_dst The destination tensor where the repeated elements will be * stored. * @param dim The dimension along which the elements will be repeated. * @param repeats The number of times each element will be repeated. * @param output_size The size of the output tensor. */ static void aclnn_repeat_interleave(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, int64_t dim, int64_t repeats, int64_t output_size) { GGML_CANN_CALL_ACLNN_OP(ctx, RepeatInterleaveIntWithDim, acl_src, repeats, dim, output_size, acl_dst); } /** * @brief Performs matrix multiplication with floating-point precision on * tensors using the CANN backend. * * This function performs matrix multiplication of the input tensor and the * weight tensor, handling broadcasting and transposing as needed, and stores * the result in the destination tensor `dst`. * * @param ctx The context for the CANN backend operations. * @param dst The destination tensor where the result of the matrix * multiplication will be stored. */ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * weight = dst->src[0]; // weight ggml_tensor * input = dst->src[1]; // input // when weight ne2 or ne3 is 1, aclnnMatmulGetWorkspaceSize will auto // broadcast, when weight ne2 or ne3 is not 1, weight need repeat. BCAST_MUL_MAT_SHAPE(input, weight, dst); int64_t n_dims = bcast_dims; if (bcast_input_ne[3] == bcast_weight_ne[3] && bcast_input_ne[3] == 1) { if (bcast_input_ne[2] == 1 && bcast_weight_ne[2] == 1) { n_dims = 2; } else if (bcast_input_ne[2] == 1) { n_dims = 3; } } acl_tensor_ptr acl_input_tensor = ggml_cann_create_tensor(input, bcast_input_ne, bcast_input_nb, n_dims); int64_t transpose_ne[] = { bcast_weight_ne[1], bcast_weight_ne[0], bcast_weight_ne[2], bcast_weight_ne[3], bcast_weight_ne[4], bcast_weight_ne[5] }; size_t transpose_nb[] = { bcast_weight_nb[1], bcast_weight_nb[0], bcast_weight_nb[2], bcast_weight_nb[3], bcast_weight_nb[4], bcast_weight_nb[5] }; acl_tensor_ptr acl_weight_tensor; // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); if (weight_to_nz && is_matmul_weight(weight)) { acl_weight_tensor = ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_FRACTAL_NZ); } else { acl_weight_tensor = ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_ND); } acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, bcast_dst_ne, bcast_dst_nb, n_dims); switch (n_dims) { case 2: GGML_CANN_CALL_ACLNN_OP(ctx, Mm, acl_input_tensor.get(), acl_weight_tensor.get(), acl_dst.get(), 2); break; case 3: GGML_CANN_CALL_ACLNN_OP(ctx, BatchMatMul, acl_input_tensor.get(), acl_weight_tensor.get(), acl_dst.get(), 2); break; default: // ALLOW_FP32_DOWN_PRECISION, when input is // fp32, atlas a2 will transpose it to HFLOAT32. GGML_CANN_CALL_ACLNN_OP(ctx, Matmul, acl_input_tensor.get(), acl_weight_tensor.get(), acl_dst.get(), 1); break; } } /** * @brief Performs matrix multiplication with quantized weights and * floating-point inputs using the CANN backend. * * This function performs matrix multiplication of the input tensor `src1` and * the weight tensor `src0`, handling broadcasting, transposing, and * quantization as needed, and stores the result in the destination tensor * `dst`. * * @param ctx The context for the CANN backend operations. * @param dst The destination tensor where the result of the matrix * multiplication will be stored. */ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context & ctx, ggml_tensor * dst, const enum ggml_type type) { ggml_tensor * src0 = dst->src[0]; // weight ggml_tensor * src1 = dst->src[1]; // input // The shape of the weight is NCHW. // Matrix multiplication uses HW dims. // HC is regarded as batch. // weight need transpose. float weight_elem_size; if (type == GGML_TYPE_Q4_0) { weight_elem_size = float(sizeof(uint8_t)) / 2; } else if (type == GGML_TYPE_Q8_0) { weight_elem_size = float(sizeof(uint8_t)); } else { GGML_ABORT("Only support Q4_0 and Q8_0 MUL_MAT"); } float weight_nb[] = { src0->ne[0] * weight_elem_size, weight_elem_size }; size_t weight_stride = src0->ne[1] * src0->ne[0] * weight_elem_size; size_t weight_size = weight_stride * src0->ne[2] * src0->ne[3]; // scale stored at the end of weight. Also need transpose. size_t scale_elem_size = sizeof(uint16_t); size_t scale_nb[] = { src0->ne[0] / QK8_0 * scale_elem_size, scale_elem_size }; size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; char * scale_offset = (char *) src0->data + weight_size; // input size_t input_elem_size = sizeof(uint16_t); int64_t input_ne[] = { src1->ne[0], src1->ne[1] }; size_t input_nb[] = { input_elem_size, input_ne[0] * input_elem_size }; size_t input_stride = input_ne[0] * input_ne[1] * input_elem_size; ggml_cann_pool_alloc input_alloctor(ctx.pool()); void * input_buffer = src1->data; // case in if (src1->type != GGML_TYPE_F16) { acl_tensor_ptr acl_src1_tensor = ggml_cann_create_tensor(src1); input_buffer = input_alloctor.alloc(ggml_nelements(src1) * input_elem_size); int64_t * input_cast_ne = src1->ne; size_t input_cast_nb[GGML_MAX_DIMS]; input_cast_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_cast_nb[i] = input_cast_nb[i - 1] * input_cast_ne[i - 1]; } acl_tensor_ptr acl_input_tensor = ggml_cann_create_tensor(input_buffer, ACL_FLOAT16, input_elem_size, input_cast_ne, input_cast_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src1_tensor.get(), acl_input_tensor.get(), ACL_FLOAT16); } // output size_t output_elem_size = sizeof(uint16_t); size_t output_nb[] = { output_elem_size, dst->ne[0] * output_elem_size }; ggml_cann_pool_alloc output_allocator(ctx.pool()); void * output_buffer = output_allocator.alloc(ggml_nelements(dst) * output_elem_size); size_t output_stride = dst->ne[0] * dst->ne[1] * output_elem_size; // aclnn int64_t max_elem_size = 65535; int64_t split_size = (src0->ne[1] / max_elem_size) + 1; ggml_cann_pool_alloc workspace_allocator(ctx.pool()); for (int64_t n1 = 0; n1 < src1->ne[3]; n1++) { for (int64_t c1 = 0; c1 < src1->ne[2]; c1++) { int64_t n0 = n1 / (src1->ne[3] / src0->ne[3]); int64_t c0 = c1 / (src1->ne[2] / src0->ne[2]); int64_t batch1 = (n1 * src1->ne[2]) + c1; int64_t batch0 = (n0 * src0->ne[2]) + c0; acl_tensor_ptr acl_input_tensor = ggml_cann_create_tensor( (char *) input_buffer + batch1 * input_stride, ACL_FLOAT16, input_elem_size, input_ne, input_nb, 2); // first split int64_t weight_ne_offset = 0; int64_t weight_ne[2] = { max_elem_size > src0->ne[1] ? src0->ne[1] : max_elem_size, src0->ne[0] }; int64_t scale_ne_offset = 0; int64_t scale_ne[2] = { weight_ne[0], weight_ne[1] / QK8_0 }; int64_t output_ne_offset = 0; int64_t output_ne[2] = { weight_ne[0], dst->ne[1] }; acl_tensor_ptr acl_weight_tensor = ggml_cann_create_tensor((char *) src0->data + batch0 * weight_stride, ggml_cann_type_mapping(type), weight_elem_size, weight_ne, weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); acl_tensor_ptr acl_scale_tensor = ggml_cann_create_tensor(scale_offset + batch0 * scale_stride, ACL_FLOAT16, scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, scale_ne_offset); acl_tensor_ptr acl_output_tensor = ggml_cann_create_tensor((char *) output_buffer + batch1 * output_stride, ACL_FLOAT16, output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, output_ne_offset); int64_t antiquantGroupSize = 0; if (src0->ne[0] > QK8_0) { antiquantGroupSize = QK8_0; } GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor.get(), acl_weight_tensor.get(), acl_scale_tensor.get(), nullptr, nullptr, nullptr, nullptr, antiquantGroupSize, acl_output_tensor.get()); // other splits for (int64_t split = 1; split < split_size; split++) { weight_ne_offset += weight_elem_size * weight_ne[0] * weight_ne[1]; weight_ne[0] = max_elem_size * (split + 1) > src0->ne[1] ? src0->ne[1] - (max_elem_size * split) : max_elem_size; scale_ne_offset += scale_elem_size * scale_ne[0] * scale_ne[1]; scale_ne[0] = weight_ne[0]; output_ne_offset += output_elem_size * output_ne[0] * output_ne[1]; output_ne[0] = weight_ne[0]; acl_weight_tensor = ggml_cann_create_tensor((char *) src0->data + batch0 * weight_stride, ggml_cann_type_mapping(type), weight_elem_size, weight_ne, weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); acl_scale_tensor = ggml_cann_create_tensor(scale_offset + batch0 * scale_stride, ACL_FLOAT16, scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, scale_ne_offset); acl_output_tensor = ggml_cann_create_tensor((char *) output_buffer + batch1 * output_stride, ACL_FLOAT16, output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, output_ne_offset); GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor.get(), acl_weight_tensor.get(), acl_scale_tensor.get(), nullptr, nullptr, nullptr, nullptr, antiquantGroupSize, acl_output_tensor.get()); } } } // cast out if (dst->type != GGML_TYPE_F16) { int64_t * output_cast_ne = dst->ne; size_t output_cast_nb[GGML_MAX_DIMS]; output_cast_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { output_cast_nb[i] = output_cast_nb[i - 1] * output_cast_ne[i - 1]; } acl_tensor_ptr acl_output_tensor = ggml_cann_create_tensor(output_buffer, ACL_FLOAT16, output_elem_size, output_cast_ne, output_cast_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_dst_tensor = ggml_cann_create_tensor(dst); aclnn_cast(ctx, acl_output_tensor.get(), acl_dst_tensor.get(), ggml_cann_type_mapping(dst->type)); } } void ggml_cann_mul_mat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const enum ggml_type type = dst->src[0]->type; switch (type) { case GGML_TYPE_F32: case GGML_TYPE_F16: ggml_cann_mat_mul_fp(ctx, dst); break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: ggml_cann_mul_mat_quant(ctx, dst, type); break; default: GGML_ABORT("Unsupported type for mul_mat"); break; } } /** * @brief Rolls the elements of a tensor along a specified dimension. * * This function rolls the elements of the source tensor `acl_src` by the * specified shifts `shifts` along the specified dimensions `dims`, and stores * the result in the destination tensor `acl_dst`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor whose elements will be rolled. * @param acl_dst The destination tensor where the rolled elements will be * stored. * @param shifts An array specifying the number of positions by which elements * are shifted. * @param dims An array specifying the dimensions along which elements are * shifted. */ static void aclnn_roll(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst, int64_t * shifts, int64_t * dims) { acl_int_array_ptr acl_shifts = ggml_cann_create_int_array(shifts, 1); acl_int_array_ptr acl_dims = ggml_cann_create_int_array(dims, 1); GGML_CANN_CALL_ACLNN_OP(ctx, Roll, acl_src, acl_shifts.get(), acl_dims.get(), acl_dst); } /** * @brief Fills specified positions of a tensor with a scalar value. * * This function fills the positions in the source tensor `acl_src` specified by * `index` along the dimension `dim` with the scalar value `value`. * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor where the positions will be filled. * @param dim The dimension along which the positions are specified. * @param index An array specifying the positions to be filled. * @param index_num The number of positions specified in the index array. * @param value The scalar value used to fill the specified positions. */ static void aclnn_index_fill_tensor(ggml_backend_cann_context & ctx, aclTensor * acl_src, int64_t dim, int64_t * index, int64_t index_num, float value) { acl_int_array_ptr acl_index = ggml_cann_create_int_array(index, index_num); acl_scalar_ptr acl_value = ggml_cann_create_scalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceIndexFillTensor, acl_src, dim, acl_index.get(), acl_value.get()); } /** * @brief Initializes and caches all intermediate tensors required for RoPE * (Rotary Position Embedding), including support for Yarn, mRoPE, * i-mRoPE, Neox repeat strategy, independent sectors, frequency factors, * and multi-section rotary groups. * * This function computes and caches the per-dimension θ coefficients used for * Q/K rotary embedding. The cache is shared across layers, and recomputed only * when any dependent parameter changes. * * The function now supports: * - Yarn RoPE extrapolation (via @param corr_dims and @param ext_factor) * - Per-dimension independent sector exponent rules (indep_sects + sections[]) * - Multi-section RoPE (mRoPE) index mapping (mrope_used + is_imrope) * - Frequency factor division (src2) * - Neox / normal repeat expansion modes * * @param ctx CANN backend context, containing memory pool, * cached buffers, and runtime stream. * @param dst Destination ggml_tensor whose computation * depends on RoPE (typically Qcur or Kcur). * @param corr_dims [low, high] Yarn correction range. * @param ext_factor Yarn extrapolation strength. 0 = disabled. * @param theta_scale Base multiplier for per-dimension θ exponent. * @param freq_scale Global frequency scaling factor. * @param attn_factor Optional scaling applied to sin/cos (if needed). * @param is_neox Whether to use Neox-style dimension interleave. * @param sections 4-way sector sizes for independent-section RoPE * and multi-section mRoPE (t/h/w/e). * @param mrope_used Whether to enable multi-section rotary embedding. * @param is_imrope Whether to apply interleaved mRoPE rules. * @param indep_sects Whether each dimension runs independent exponent * resets based on @p sections. */ static void aclnn_rope_cache_init(ggml_backend_cann_context & ctx, ggml_tensor * dst, float * corr_dims, float ext_factor, float theta_scale, float freq_scale, float attn_factor, bool is_neox, int sections[4], bool mrope_used, bool is_imrope, bool indep_sects, int64_t rope_dims) { ggml_tensor * src1 = dst->src[1]; // position ggml_tensor * src2 = dst->src[2]; // freq_factors int64_t theta_scale_length = rope_dims / 2; int64_t position_length = dst->ne[2]; // TODO: check theta_scale_length and position_length. if (src2 == nullptr && ctx.rope_cache.cached && ctx.rope_cache.equal(theta_scale_length, position_length, ext_factor, theta_scale, freq_scale, attn_factor, is_neox, indep_sects, mrope_used, is_imrope, sections)) { // use cache. return; } // Step0: calculate tensor shape. int64_t theta_scale_ne[] = { theta_scale_length, 1, 1, 1 }; size_t theta_scale_nb[] = { sizeof(float), theta_scale_length * sizeof(float), theta_scale_length * sizeof(float), theta_scale_length * sizeof(float) }; GGML_ASSERT(src1->type == GGML_TYPE_I32); int64_t position_ne[] = { 1, 1, position_length, 1 }; size_t position_nb[] = { sizeof(int32_t), sizeof(int32_t), sizeof(int32_t), sizeof(int32_t) * position_length }; int64_t cache_ne[] = { theta_scale_length, 1, position_length, 1 }; size_t cache_nb[GGML_MAX_DIMS]; cache_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { cache_nb[i] = cache_nb[i - 1] * cache_ne[i - 1]; } // Step1: Compute the coefficient of theta. During the cache_init process, aside from // (1) multiplying by the position, // (2) dividing by freq_factors, // (3) computing the sine and cosine, // the other parameters used in the computation generally do not change in most scenarios. // Therefore, we can first compute this part of the result and then cache it. // Step1.1: prepare theta_scale exponent. if this exponent updated, should update theta_scale_tensor. acl_tensor_ptr acl_theta_scale_tensor; bool theta_scale_updated = false; if (ctx.rope_cache.theta_scale_length != theta_scale_length || ctx.rope_cache.theta_scale != theta_scale || ctx.rope_cache.indep_sects != indep_sects) { theta_scale_updated = true; if (ctx.rope_cache.theta_scale_exp_host != nullptr) { free(ctx.rope_cache.theta_scale_exp_host); } ctx.rope_cache.theta_scale_exp_host = (float *) malloc(theta_scale_length * sizeof(float)); GGML_ASSERT(ctx.rope_cache.theta_scale_exp_host != nullptr); if (!indep_sects) { ctx.rope_cache.theta_scale_exp_host[0] = 1; for (int i = 1; i < theta_scale_length; i++) { ctx.rope_cache.theta_scale_exp_host[i] = ctx.rope_cache.theta_scale_exp_host[i - 1] * theta_scale; } } else { int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; int sec_w = sections[1] + sections[0]; int sec_e = sections[2] + sec_w; ctx.rope_cache.theta_scale_exp_host[0] = 1; for (int i = 1; i < theta_scale_length; i++) { int sector = i % sect_dims; if (sector == 0 || sector == sections[0] || sector == sec_w || sector == sec_e) { ctx.rope_cache.theta_scale_exp_host[i] = 1; continue; } ctx.rope_cache.theta_scale_exp_host[i] = ctx.rope_cache.theta_scale_exp_host[i - 1] * theta_scale; } } if (ctx.rope_cache.theta_scale_cache != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.theta_scale_cache)); } ACL_CHECK(aclrtMalloc(&ctx.rope_cache.theta_scale_cache, theta_scale_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); ACL_CHECK(aclrtMemcpyAsync(ctx.rope_cache.theta_scale_cache, theta_scale_length * sizeof(float), ctx.rope_cache.theta_scale_exp_host, theta_scale_length * sizeof(float), ACL_MEMCPY_HOST_TO_DEVICE, ctx.stream())); } acl_theta_scale_tensor = ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), theta_scale_ne, theta_scale_nb, 1); // Step1.2: prepare rope_yarn_ramp, if this part updated, should update theta_scale_tensor. // TODO: acl_yarn_ramp_tensor use rope cache. bool yarn_ramp_tensor_updated = false; acl_tensor_ptr acl_yarn_ramp_tensor; if (ext_factor != 0 && (theta_scale_updated || ctx.rope_cache.theta_scale_length != theta_scale_length || ctx.rope_cache.freq_scale != freq_scale)) { yarn_ramp_tensor_updated = true; if (ctx.rope_cache.yarn_ramp_cache != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.yarn_ramp_cache)); } ACL_CHECK(aclrtMalloc(&ctx.rope_cache.yarn_ramp_cache, theta_scale_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); // -rope_yarn_ramp // const float y = (i0 / 2 - low) / MAX(0.001f, high - low); // return MIN(1, MAX(0, y)) - 1; acl_yarn_ramp_tensor = ggml_cann_create_tensor(ctx.rope_cache.yarn_ramp_cache, ACL_FLOAT, sizeof(float), theta_scale_ne, theta_scale_nb, 1); float zero_value = 0, one_value = 1; float denom_safe_value = MAX(0.001f, corr_dims[1] - corr_dims[0]); acl_scalar_ptr low = ggml_cann_create_scalar(&corr_dims[0], aclDataType::ACL_FLOAT); acl_scalar_ptr zero = ggml_cann_create_scalar(&zero_value, aclDataType::ACL_FLOAT); acl_scalar_ptr one = ggml_cann_create_scalar(&one_value, aclDataType::ACL_FLOAT); acl_scalar_ptr denom_safe = ggml_cann_create_scalar(&denom_safe_value, aclDataType::ACL_FLOAT); acl_scalar_ptr ext_factor_sc = ggml_cann_create_scalar(&ext_factor, aclDataType::ACL_FLOAT); aclnn_arange(ctx, acl_yarn_ramp_tensor.get(), 0, theta_scale_length, 1, theta_scale_length); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSubs, acl_yarn_ramp_tensor.get(), low.get(), one.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceDivs, acl_yarn_ramp_tensor.get(), denom_safe.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceThreshold, acl_yarn_ramp_tensor.get(), zero.get(), zero.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceClampMax, acl_yarn_ramp_tensor.get(), one.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSubs, acl_yarn_ramp_tensor.get(), one.get(), one.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMuls, acl_yarn_ramp_tensor.get(), ext_factor_sc.get()); // theta_interp = freq_scale * theta_extrap; // theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // theta = freq_scale * theta_extrap * (1 - ramp_mix) + theta_extrap * ramp_mix; // theta = freq_scale * theta_extrap - freq_scale * theta_extrap * ramp_mix + theta_extrap * ramp_mix; // theta = theta_extrap * (freq_scale - freq_scale * ramp_mix + ramp_mix); // // we cache (freq_scale - freq_scale * ramp_mix + ramp_mix), Considering that the rope_yarn_ramp here is the inverse // cache freq_scale + (freq_scale - 1) * ramp_mix float freq_scale_1 = freq_scale - 1; acl_scalar_ptr freq_scale_sc = ggml_cann_create_scalar(&freq_scale, aclDataType::ACL_FLOAT); acl_scalar_ptr freq_scale_1_sc = ggml_cann_create_scalar(&freq_scale_1, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMuls, acl_yarn_ramp_tensor.get(), freq_scale_1_sc.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdds, acl_yarn_ramp_tensor.get(), freq_scale_sc.get(), one.get()); } else { acl_yarn_ramp_tensor = ggml_cann_create_tensor(ctx.rope_cache.yarn_ramp_cache, ACL_FLOAT, sizeof(float), theta_scale_ne, theta_scale_nb, 1); } // Step 1.3: update theta_scale_tensor according to ext_factor or freq_scale. if (ext_factor != 0) { if (theta_scale_updated || yarn_ramp_tensor_updated) { theta_scale_updated = true; aclnn_mul(ctx, acl_theta_scale_tensor.get(), acl_yarn_ramp_tensor.get()); } } else { if (freq_scale != 1 && (ctx.rope_cache.freq_scale != freq_scale || theta_scale_updated)) { theta_scale_updated = true; aclnn_muls(ctx, acl_theta_scale_tensor.get(), freq_scale, nullptr, true); } } // Nothing changed, use cache. if (!theta_scale_updated) { acl_theta_scale_tensor = ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); } // Step 1.4: prepare select index if mrope acl_tensor_ptr position_select_index_tensor; if (mrope_used) { if (ctx.rope_cache.sections[0] != sections[0] || ctx.rope_cache.sections[1] != sections[1] || ctx.rope_cache.sections[2] != sections[2] || ctx.rope_cache.sections[3] != sections[3] || ctx.rope_cache.theta_scale_length != theta_scale_length || ctx.rope_cache.is_imrope != is_imrope) { if (ctx.rope_cache.position_select_index_host != nullptr) { free(ctx.rope_cache.position_select_index_host); } ctx.rope_cache.position_select_index_host = (int *) malloc(theta_scale_length * sizeof(int)); GGML_ASSERT(ctx.rope_cache.position_select_index_host != nullptr); int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; int sec_w = sections[1] + sections[0]; int sec_e = sections[2] + sec_w; // t,h,w,e for (int i = 0; i < theta_scale_length; i++) { int sector = i % sect_dims; if (is_imrope) { // qwen3vl apply interleaved mrope if (sector % 3 == 1 && sector < 3 * sections[1]) { ctx.rope_cache.position_select_index_host[i] = 1; } else if (sector % 3 == 2 && sector < 3 * sections[2]) { ctx.rope_cache.position_select_index_host[i] = 2; } else if (sector % 3 == 0 && sector < 3 * sections[0]) { ctx.rope_cache.position_select_index_host[i] = 0; } else { ctx.rope_cache.position_select_index_host[i] = 3; } } else { if (sector >= sections[0] && sector < sec_w) { ctx.rope_cache.position_select_index_host[i] = 1; } else if (sector >= sec_w && sector < sec_e) { ctx.rope_cache.position_select_index_host[i] = 2; } else if (sector >= sec_e) { ctx.rope_cache.position_select_index_host[i] = 3; } else { ctx.rope_cache.position_select_index_host[i] = 0; } } } if (ctx.rope_cache.position_select_index != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.position_select_index)); } ACL_CHECK(aclrtMalloc(&ctx.rope_cache.position_select_index, theta_scale_length * sizeof(int), ACL_MEM_MALLOC_HUGE_FIRST)); ACL_CHECK(aclrtMemcpyAsync(ctx.rope_cache.position_select_index, theta_scale_length * sizeof(int), ctx.rope_cache.position_select_index_host, theta_scale_length * sizeof(int), ACL_MEMCPY_HOST_TO_DEVICE, ctx.stream())); } position_select_index_tensor = ggml_cann_create_tensor(ctx.rope_cache.position_select_index, ACL_INT32, sizeof(int), theta_scale_ne, theta_scale_nb, 1); } // Step2: divide by freq_factors ggml_cann_pool_alloc freq_fac_res_allocator(ctx.pool()); if (src2) { freq_fac_res_allocator.alloc(theta_scale_length * sizeof(float)); void * freq_fac_res_ptr = freq_fac_res_allocator.get(); acl_tensor_ptr acl_freq_factors_tensor = ggml_cann_create_tensor(src2->data, ggml_cann_type_mapping(src2->type), ggml_type_size(src2->type), theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_freq_fac_res_tensor = ggml_cann_create_tensor(freq_fac_res_ptr, ACL_FLOAT, sizeof(float), theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); aclnn_div(ctx, acl_theta_scale_tensor.get(), acl_freq_factors_tensor.get(), acl_freq_fac_res_tensor.get()); std::swap(acl_theta_scale_tensor, acl_freq_fac_res_tensor); } // Step3: prepare position_tensor acl_tensor_ptr acl_position_tensor; ggml_cann_pool_alloc mrope_position_acllocator(ctx.pool()); if (mrope_used) { // Step3.1: select current position; // position : // pos1: [[0, 1 ,2 ,3 ], // pos2: [4, 5 ,6 ,7 ], // pos3: [8, 9 ,10,11], // pos4: [12,13,14,15] ] // // select index = [0, 1, 2, 2, 1, 0] // // selected_tensor: // [[0, 1 ,2 ,3 ], // [4, 5 ,6 ,7 ], // [8, 9 ,10,11], // [8, 9 ,10,11], // [4, 5 ,6 ,7 ], // [0, 1 ,2 ,3 ]] // // transpose, from [seq_len:dims] to [dims:seq_len] // [0, 4, 8 ,8 ,4, 0], // [1, 5, 9, 9, 5, 1], // [2, 6, 10,10,6 ,2], // [3, 7, 11,11,7 3 ]] // // multipy by theta_scale_tensor // [theta_scale^0, theta_scale^1, ..., theta_scale ^ n] int64_t mrope_position_ne[] = { position_length, 4 }; size_t mrope_position_nb[] = { sizeof(int), position_length * sizeof(int) }; acl_tensor_ptr mrope_position = ggml_cann_create_tensor(src1->data, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), mrope_position_ne, mrope_position_nb, 2); // selected position tensor's shape is a transpose of cache tensor. int64_t selected_position_ne[] = { position_length, theta_scale_length }; size_t selected_position_nb[] = { sizeof(float), position_length * sizeof(float) }; mrope_position_acllocator.alloc(theta_scale_length * position_length * sizeof(float)); void * mrope_position_buffer = mrope_position_acllocator.get(); acl_position_tensor = ggml_cann_create_tensor(mrope_position_buffer, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), selected_position_ne, selected_position_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, IndexSelect, mrope_position.get(), 0, position_select_index_tensor.get(), acl_position_tensor.get()); // transpose int64_t transposed_ne[] = { position_length, 1, theta_scale_length, 1 }; size_t transposed_nb[GGML_MAX_DIMS]; transposed_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { transposed_nb[i] = transposed_nb[i - 1] * transposed_ne[i - 1]; } std::swap(transposed_ne[0], transposed_ne[2]); std::swap(transposed_nb[0], transposed_nb[2]); acl_position_tensor = ggml_cann_create_tensor(mrope_position_buffer, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), transposed_ne, transposed_nb, GGML_MAX_DIMS); } else { // auto bcast. acl_position_tensor = ggml_cann_create_tensor(src1->data, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), position_ne, position_nb, GGML_MAX_DIMS); } // Step4: multiply by the position int64_t theta_length = theta_scale_length * position_length; ggml_cann_pool_alloc theta_allocator(ctx.pool(), theta_length * sizeof(float)); void * theta_buffer = theta_allocator.get(); acl_tensor_ptr acl_theta_tensor = ggml_cann_create_tensor(theta_buffer, ACL_FLOAT, sizeof(float), cache_ne, cache_nb, GGML_MAX_DIMS); aclnn_mul(ctx, acl_position_tensor.get(), acl_theta_scale_tensor.get(), acl_theta_tensor.get()); // Step5: calculate sin cos. // init sin_repeat && cos_repeat, only to accelerate first layer on each device if (position_length > ctx.rope_cache.position_length) { ctx.rope_cache.position_length = position_length; if (ctx.rope_cache.sin_cache != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.sin_cache)); } if (ctx.rope_cache.cos_cache != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.cos_cache)); } int64_t repeat_theta_length = theta_scale_length * position_length * 2; ACL_CHECK( aclrtMalloc(&ctx.rope_cache.sin_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); ACL_CHECK( aclrtMalloc(&ctx.rope_cache.cos_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); } // sin/cos ggml_cann_pool_alloc sin_allocator(ctx.pool(), theta_length * sizeof(float)); void * sin_buffer = sin_allocator.get(); acl_tensor_ptr acl_sin_tensor = ggml_cann_create_tensor(sin_buffer, ACL_FLOAT, sizeof(float), cache_ne, cache_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_sin(ctx, acl_theta_tensor.get(), acl_sin_tensor.get()); ggml_cann_pool_alloc cos_allocator(ctx.pool(), theta_length * sizeof(float)); void * cos_buffer = cos_allocator.get(); acl_tensor_ptr acl_cos_tensor = ggml_cann_create_tensor(cos_buffer, ACL_FLOAT, sizeof(float), cache_ne, cache_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_cos(ctx, acl_theta_tensor.get(), acl_cos_tensor.get()); if (ext_factor != 0) { attn_factor *= 1.0f + 0.1f * logf(1.0f / freq_scale); } // Step 5: multiply by attn_factor if (attn_factor != 1) { aclnn_muls(ctx, acl_sin_tensor.get(), attn_factor, nullptr, true); aclnn_muls(ctx, acl_cos_tensor.get(), attn_factor, nullptr, true); } int64_t sin_reshape_ne[4] = { rope_dims, 1, dst->ne[2], 1 }; size_t sin_reshape_nb[GGML_MAX_DIMS]; sin_reshape_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { sin_reshape_nb[i] = sin_reshape_nb[i - 1] * sin_reshape_ne[i - 1]; } acl_tensor_ptr acl_sin_repeat_tensor = ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_cos_repeat_tensor = ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); // Step 6: repeat if (is_neox) { // [sinθ1, sinθ1, sinθ2, sinθ2, ..., sinθn, sinθn] int64_t repeatsArray[] = { 1, 1, 1, 2 }; aclnn_repeat(ctx, acl_sin_tensor.get(), acl_sin_repeat_tensor.get(), repeatsArray); aclnn_repeat(ctx, acl_cos_tensor.get(), acl_cos_repeat_tensor.get(), repeatsArray); } else { int64_t num_repeats = 2; int64_t dim = 3; int64_t output_size = theta_scale_length * num_repeats; // [sinθ1, sinθ2, ..., sinθn, sinθ1, sinθ2, ..., sinθn] aclnn_repeat_interleave(ctx, acl_sin_tensor.get(), acl_sin_repeat_tensor.get(), dim, num_repeats, output_size); aclnn_repeat_interleave(ctx, acl_cos_tensor.get(), acl_cos_repeat_tensor.get(), dim, num_repeats, output_size); } // Update cached value. ctx.rope_cache.cached = true; ctx.rope_cache.set(theta_scale_length, position_length, ext_factor, theta_scale, freq_scale, attn_factor, is_neox, indep_sects, mrope_used, is_imrope, sections); } #ifdef __cplusplus extern "C" { #endif aclnnStatus aclnnRotaryPositionEmbeddingGetWorkspaceSize(const aclTensor * x, const aclTensor * cos, const aclTensor * sin, int64_t mode, const aclTensor * yOut, uint64_t * workspaceSize, aclOpExecutor ** executor); aclnnStatus aclnnRotaryPositionEmbedding(void * workspace, uint64_t workspaceSize, aclOpExecutor * executor, aclrtStream stream); #ifdef __cplusplus } #endif void ggml_cann_rope(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // input // param float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; int sections[4]; // const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; // const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; GGML_TENSOR_UNARY_OP_LOCALS memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); GGML_ASSERT(n_dims % 2 == 0); GGML_ASSERT(n_dims <= ne00); const float theta_scale = powf(freq_base, -2.0f / n_dims); float corr_dims[2]; ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); bool is_neox = mode & GGML_ROPE_TYPE_NEOX; const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope // mrope_used means the GGML_ROPE_TYPE_MROPE bit is set. // Note: this bit is also set for imrope and some vision modes, // so mrope_used does NOT exclusively indicate pure mrope. const bool mrope_used = mode & GGML_ROPE_TYPE_MROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; if (mrope_used) { GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); } if (is_vision) { GGML_ASSERT(n_dims == ne0 / 2); } if (is_imrope || mrope_used) { is_neox = true; } int64_t rope_dims = n_dims; //Our current RotaryPositionEmbedding does not support the VISION mode, //but essentially it only modifies theta_base in mrope, //then repeats it at the end in the same way as is_neox. //In fact, RoPE is still applied across all dimensions. if (is_vision) { rope_dims = src0->ne[0]; } int64_t tail_dims = ne00 - rope_dims; bool has_tail = tail_dims > 0; // init ctx.rope_cos/rope_sin cache aclnn_rope_cache_init(ctx, dst, corr_dims, ext_factor, theta_scale, freq_scale, attn_factor, is_neox, sections, mrope_used, is_imrope, is_vision, rope_dims); // Cache is generated with ne00 dimensions, so we use ne00 for reshape int64_t sin_reshape_ne[4] = { rope_dims, 1, ne02, 1 }; size_t sin_reshape_nb[GGML_MAX_DIMS]; sin_reshape_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { sin_reshape_nb[i] = sin_reshape_nb[i - 1] * sin_reshape_ne[i - 1]; } acl_tensor_ptr acl_sin_reshape_tensor = ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_cos_reshape_tensor = ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); #ifdef ASCEND_310P // Special ROPE operation for 310P // roll input void * input_roll_buffer; acl_tensor_ptr acl_minus_one_tensor; void * minus_one_scale_buffer = nullptr; ggml_cann_pool_alloc roll_allocator(ctx.pool(), ggml_nbytes(src0)); ggml_cann_pool_alloc minus_one_scale_allocator(ctx.pool(), sizeof(float) * src0->ne[0]); if (!is_neox) { // roll input: [q0,q1,q2,q3,...] -> [q1,q0,q3,q2,...] input_roll_buffer = roll_allocator.get(); int64_t input_roll_ne[4] = { 2, src0->ne[1] * (src0->ne[0] / 2), src0->ne[2], src0->ne[3] }; size_t input_roll_nb[GGML_MAX_DIMS]; input_roll_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_roll_nb[i] = input_roll_nb[i - 1] * input_roll_ne[i - 1]; } acl_tensor_ptr acl_input_roll_tensor = ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), input_roll_ne, input_roll_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_input_tensor = ggml_cann_create_tensor(src0->data, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), input_roll_ne, input_roll_nb, GGML_MAX_DIMS); int64_t shifts[] = { 1 }; int64_t dims[] = { 3 }; aclnn_roll(ctx, acl_input_tensor.get(), acl_input_roll_tensor.get(), shifts, dims); // init [-1, 1, -1, 1, ...] minus_one_scale_buffer = minus_one_scale_allocator.get(); int64_t minus_one_ne[4] = { src0->ne[0], 1, 1, 1 }; size_t minus_one_nb[GGML_MAX_DIMS]; minus_one_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } acl_minus_one_tensor = aclnn_values(ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); int64_t dim = 3; int64_t * index = new int64_t[src0->ne[0]]; for (int i = 0; i < src0->ne[0]; i++) { index[i] = i / 2 * 2; } int64_t index_num = src0->ne[0]; float value = -1; aclnn_index_fill_tensor(ctx, acl_minus_one_tensor.get(), dim, index, index_num, value); } else { // roll input: [q0,q1,q2,...] -> // [q_half,q_half+1,...,q_end,q0,q1,...q_half-1] input_roll_buffer = roll_allocator.get(); acl_tensor_ptr acl_input_roll_tensor = ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src0->ne, src0->nb, GGML_MAX_DIMS); acl_tensor_ptr acl_input_tensor = ggml_cann_create_tensor(src0); int64_t shifts[] = { src0->ne[0] / 2 }; int64_t dims[] = { 3 }; aclnn_roll(ctx, acl_input_tensor.get(), acl_input_roll_tensor.get(), shifts, dims); // init [-1, -1, -1, 1, 1,1,...] minus_one_scale_buffer = minus_one_scale_allocator.get(); int64_t minus_one_ne[4] = { src0->ne[0], 1, 1, 1 }; size_t minus_one_nb[GGML_MAX_DIMS]; minus_one_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } acl_minus_one_tensor = aclnn_values(ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); // -1 * first half int64_t first_half_ne[4] = { src0->ne[0] / 2, 1, 1, 1 }; size_t first_half_nb[GGML_MAX_DIMS]; first_half_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { first_half_nb[i] = first_half_nb[i - 1] * first_half_ne[i - 1]; } acl_tensor_ptr acl_first_half_tensor = ggml_cann_create_tensor(minus_one_scale_buffer, ACL_FLOAT, sizeof(float), first_half_ne, first_half_nb, GGML_MAX_DIMS); bool inplace = true; float scale = -1; aclnn_muls(ctx, acl_first_half_tensor.get(), scale, nullptr, inplace); } // TODO: n_dims < ne0 GGML_ASSERT(n_dims == src0->ne[0]); // input * scale ggml_cann_pool_alloc roll_mul_scale_allocator(ctx.pool(), ggml_nbytes(src0)); void * input_roll_mul_scale_buffer = roll_mul_scale_allocator.get(); size_t input_nb[GGML_MAX_DIMS]; input_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_nb[i] = input_nb[i - 1] * src0->ne[i - 1]; } acl_tensor_ptr acl_input_roll_mul_scale_tensor = ggml_cann_create_tensor(input_roll_mul_scale_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src0->ne, input_nb, GGML_MAX_DIMS); acl_tensor_ptr acl_input_roll_reshape_tensor = ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src0->ne, input_nb, GGML_MAX_DIMS); aclnn_mul(ctx, acl_input_roll_reshape_tensor.get(), acl_minus_one_tensor.get(), acl_input_roll_mul_scale_tensor.get()); // output void * output_fp32_buffer; if (src0->type == GGML_TYPE_F32) { aclnn_mul(ctx, acl_src.get(), acl_cos_reshape_tensor.get()); aclnn_mul(ctx, acl_input_roll_mul_scale_tensor.get(), acl_sin_reshape_tensor.get()); aclnn_add(ctx, acl_src.get(), acl_input_roll_mul_scale_tensor.get(), acl_dst.get()); // TODO: ne0 != n_dims in mode2 } else if (src0->type == GGML_TYPE_F16) { size_t input_fp32_nb[GGML_MAX_DIMS]; input_fp32_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_fp32_nb[i] = input_fp32_nb[i - 1] * dst->ne[i - 1]; } ggml_cann_pool_alloc fp32_allocator1(ctx.pool(), ggml_nelements(dst) * sizeof(float)); void * input_fp32_buffer1 = fp32_allocator1.get(); acl_tensor_ptr input_fp32_tensor1 = ggml_cann_create_tensor(input_fp32_buffer1, ACL_FLOAT, sizeof(float), dst->ne, input_fp32_nb, GGML_MAX_DIMS); ggml_cann_pool_alloc fp32_allocator2(ctx.pool(), ggml_nelements(dst) * sizeof(float)); void * input_fp32_buffer2 = fp32_allocator2.get(); acl_tensor_ptr input_fp32_tensor2 = ggml_cann_create_tensor(input_fp32_buffer2, ACL_FLOAT, sizeof(float), dst->ne, input_fp32_nb, GGML_MAX_DIMS); ggml_cann_pool_alloc fp32_allocator(ctx.pool(), ggml_nelements(dst) * sizeof(float)); output_fp32_buffer = fp32_allocator.get(); acl_tensor_ptr output_fp32_tensor = ggml_cann_create_tensor(output_fp32_buffer, ACL_FLOAT, sizeof(float), dst->ne, input_fp32_nb, GGML_MAX_DIMS); aclnn_mul(ctx, acl_src.get(), acl_cos_reshape_tensor.get(), input_fp32_tensor1.get()); aclnn_mul(ctx, acl_input_roll_mul_scale_tensor.get(), acl_sin_reshape_tensor.get(), input_fp32_tensor2.get()); aclnn_add(ctx, input_fp32_tensor1.get(), input_fp32_tensor2.get(), output_fp32_tensor.get()); aclnn_cast(ctx, output_fp32_tensor.get(), acl_dst.get(), ACL_FLOAT16); } return; #endif int64_t acl_mode = is_neox ? 0 : 1; // Pre-define head and tail dimensions for reuse int64_t head_ne[GGML_MAX_DIMS] = { rope_dims, ne01, ne02, ne03 }; int64_t tail_ne[GGML_MAX_DIMS] = { tail_dims, ne01, ne02, ne03 }; // Step 1: Prepare trans tensors for F16 type conversion to F32 if needed bool src_dst_need_trans = false; ggml_cann_pool_alloc src_trans_allocator(ctx.pool()); ggml_cann_pool_alloc dst_trans_allocator(ctx.pool()); acl_tensor_ptr acl_src_trans_tensor; acl_tensor_ptr acl_dst_trans_tensor; void * src_trans_buffer = nullptr; void * dst_trans_buffer = nullptr; size_t src_dst_trans_nb[GGML_MAX_DIMS]; if (src0->type == GGML_TYPE_F16) { src_dst_need_trans = true; src_trans_buffer = src_trans_allocator.alloc(ggml_nelements(src0) * sizeof(float)); dst_trans_buffer = dst_trans_allocator.alloc(ggml_nelements(dst) * sizeof(float)); src_dst_trans_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { src_dst_trans_nb[i] = src_dst_trans_nb[i - 1] * src0->ne[i - 1]; } acl_src_trans_tensor = ggml_cann_create_tensor(src_trans_buffer, ACL_FLOAT, sizeof(float), src0->ne, src_dst_trans_nb, GGML_MAX_DIMS); acl_dst_trans_tensor = ggml_cann_create_tensor(dst_trans_buffer, ACL_FLOAT, sizeof(float), dst->ne, src_dst_trans_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src.get(), acl_src_trans_tensor.get(), ACL_FLOAT); } // Step 2: Prepare head tensors for tail splitting if needed acl_tensor_ptr acl_src_head; acl_tensor_ptr acl_dst_head; if (has_tail) { // Create head views for RotaryPositionEmbedding (only first rope_dims dimensions) // RotaryPositionEmbedding requires contiguous dst tensor, so we use a temporary buffer if (src_dst_need_trans) { // Use F32 trans tensor strides acl_src_head = ggml_cann_create_tensor((char *) src_trans_buffer, ACL_FLOAT, sizeof(float), head_ne, src_dst_trans_nb, GGML_MAX_DIMS); } else { // Use original F32 tensor strides acl_src_head = ggml_cann_create_tensor((char *) src0->data, ACL_FLOAT, sizeof(float), head_ne, src0->nb, GGML_MAX_DIMS); } int64_t head_elements = rope_dims * ne01 * ne02 * ne03; ggml_cann_pool_alloc dst_head_contiguous_allocator(ctx.pool(), head_elements * sizeof(float)); void * dst_head_contiguous_buffer = dst_head_contiguous_allocator.get(); size_t head_contiguous_nb[GGML_MAX_DIMS]; head_contiguous_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { head_contiguous_nb[i] = head_contiguous_nb[i - 1] * head_ne[i - 1]; } acl_dst_head = ggml_cann_create_tensor(dst_head_contiguous_buffer, ACL_FLOAT, sizeof(float), head_ne, head_contiguous_nb, GGML_MAX_DIMS); } // Step 3: Execute RotaryPositionEmbedding if (has_tail) { // Rotate only the head portion (first rope_dims dimensions) GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src_head.get(), acl_cos_reshape_tensor.get(), acl_sin_reshape_tensor.get(), acl_mode, acl_dst_head.get()); // Copy head result from contiguous buffer back to destination tensor if (src_dst_need_trans) { acl_tensor_ptr acl_dst_head_target = ggml_cann_create_tensor( (char *) dst_trans_buffer, ACL_FLOAT, sizeof(float), head_ne, src_dst_trans_nb, GGML_MAX_DIMS); cann_copy(ctx, acl_dst_head.get(), acl_dst_head_target.get()); } else { acl_tensor_ptr acl_dst_head_target = ggml_cann_create_tensor((char *) dst->data, ACL_FLOAT, sizeof(float), head_ne, dst->nb, GGML_MAX_DIMS); cann_copy(ctx, acl_dst_head.get(), acl_dst_head_target.get()); } } else if (src_dst_need_trans) { // Rotate full tensor (no tail), using trans tensors GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src_trans_tensor.get(), acl_cos_reshape_tensor.get(), acl_sin_reshape_tensor.get(), acl_mode, acl_dst_trans_tensor.get()); } else { // Rotate full tensor (no tail), using original tensors GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src.get(), acl_cos_reshape_tensor.get(), acl_sin_reshape_tensor.get(), acl_mode, acl_dst.get()); } // Step 4: Copy unrotated tail portion from source to destination if (has_tail) { size_t src_tail_offset; size_t dst_tail_offset; auto copy_tail_device = [&](void * src_ptr, void * dst_ptr, aclDataType dtype, size_t elem_size, size_t * nb_src_arr, size_t * nb_dst_arr) { acl_tensor_ptr acl_src_tail = ggml_cann_create_tensor(src_ptr, dtype, elem_size, tail_ne, nb_src_arr, GGML_MAX_DIMS); acl_tensor_ptr acl_dst_tail = ggml_cann_create_tensor(dst_ptr, dtype, elem_size, tail_ne, nb_dst_arr, GGML_MAX_DIMS); cann_copy(ctx, acl_src_tail.get(), acl_dst_tail.get()); }; if (src_dst_need_trans) { // Use F32 trans tensor strides and offsets src_tail_offset = rope_dims * src_dst_trans_nb[0]; dst_tail_offset = rope_dims * src_dst_trans_nb[0]; copy_tail_device((char *) src_trans_buffer + src_tail_offset, (char *) dst_trans_buffer + dst_tail_offset, ACL_FLOAT, sizeof(float), src_dst_trans_nb, src_dst_trans_nb); } else { // Use original tensor strides and offsets src_tail_offset = rope_dims * nb00; dst_tail_offset = rope_dims * nb0; copy_tail_device((char *) src0->data + src_tail_offset, (char *) dst->data + dst_tail_offset, ggml_cann_type_mapping(dst->type), ggml_element_size(dst), src0->nb, dst->nb); } } // Step 5: Cast back to F16 if needed if (src_dst_need_trans) { aclnn_cast(ctx, acl_dst_trans_tensor.get(), acl_dst.get(), ACL_FLOAT16); } } void ggml_cann_argmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3); GGML_CANN_CALL_ACLNN_OP(ctx, ArgMax, acl_src.get(), 3, false, acl_dst.get()); } void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){ ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; // stride int64_t s0 = ((const int32_t*)(dst->op_params))[0]; acl_tensor_ptr acl_input = ggml_cann_create_tensor(src1, src1->ne, src1->nb, 3, ACL_FORMAT_NCL); acl_tensor_ptr acl_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3, ACL_FORMAT_NCL); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3, ACL_FORMAT_NCL); // get base information of input and kernel int64_t input_len = *(src1->ne); int64_t dst_len = *(dst->ne); int64_t kernel_size = *(src0->ne); // set the max kernel size for each conv int64_t max_kernel_size = 255; // compute the partition of kernel int64_t part_num = 1; part_num = (kernel_size + max_kernel_size - 1) / max_kernel_size; int64_t strideVal[1]; strideVal[0] = s0; acl_int_array_ptr stride = ggml_cann_create_int_array(strideVal, 1); int64_t paddingVal[] = {0}; acl_int_array_ptr padding = ggml_cann_create_int_array(paddingVal, 1); int64_t dilationVal[] = {1}; acl_int_array_ptr dilation = ggml_cann_create_int_array(dilationVal, 1); bool transposed = true; int64_t groups = 1; int8_t cubeMathType = 0; #ifdef ASCEND_310P cubeMathType = 1; #endif auto weight_type = ggml_cann_type_mapping(src0->type); auto dst_type = ggml_cann_type_mapping(dst->type); // slice the kernel to make each conv available int64_t slice_dim = -1; int64_t slice_start = 0; int64_t slice_end = max_kernel_size; int64_t slice_step = 1; int64_t interval = max_kernel_size; int64_t left_pad_len = dilationVal[0] * (max_kernel_size - 1) + 1 - 2 * paddingVal[0]; int64_t right_pad_len = 0; acl_scalar_ptr alpha = nullptr; float alphaValue = 1.0; alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); // set zero to destination GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, acl_dst.get()); for(int k = 0; k < part_num; k++){ // create part kernel tensor and slice from big kernel slice_start = max_kernel_size * k; if(k == part_num - 1){ slice_end = kernel_size; interval = kernel_size - max_kernel_size * k; }else{ slice_end = max_kernel_size * (k+1); } int64_t part_ne[4]; for(int i = 0; i < 4; i++) { part_ne[i] = *(src0->ne + i); } part_ne[0] = interval; size_t part_nb[4]; part_nb[0] = sizeof(weight_type); for (int i = 1; i < 4; i++) { part_nb[i] = part_nb[i - 1] * part_ne[i - 1]; } ggml_cann_pool_alloc part_kernel_allocator; part_kernel_allocator.alloc(ctx.pool(), part_nb[3]); void* part_kernel_buf = part_kernel_allocator.get(); acl_tensor_ptr part_kernel = ggml_cann_create_tensor(part_kernel_buf, weight_type, ggml_element_size(src0), part_ne, part_nb, 3, ACL_FORMAT_NCL); GGML_CANN_CALL_ACLNN_OP(ctx, Slice, acl_weight.get(), slice_dim, slice_start, slice_end, slice_step, part_kernel.get()); // create the part conv result tensor int64_t part_dst_ne[4]; for(int i = 0; i < 4; i++){ part_dst_ne[i] = *(dst->ne + i); } part_dst_ne[0] = (input_len - 1) * strideVal[0] - 2 * paddingVal[0] + dilationVal[0] * (part_ne[0] - 1) + 1; size_t part_dst_nb[4]; part_dst_nb[0] = sizeof(weight_type); for (int i = 1; i < 4; i++) { part_dst_nb[i] = part_dst_nb[i - 1] * part_dst_ne[i - 1]; } ggml_cann_pool_alloc part_dst_allocator; part_dst_allocator.alloc(ctx.pool(), part_dst_nb[3]); void* part_dst_buf = part_dst_allocator.get(); acl_tensor_ptr acl_part_dst = ggml_cann_create_tensor(part_dst_buf, dst_type, ggml_element_size(dst), part_dst_ne, part_dst_nb, 3, ACL_FORMAT_NCL); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, acl_part_dst.get()); // compute part conv transpose 1d GGML_CANN_CALL_ACLNN_OP(ctx, Convolution, acl_input.get(), part_kernel.get(), nullptr, stride.get(), padding.get(), dilation.get(), transposed, padding.get(), groups, acl_part_dst.get(), cubeMathType); // compute the position of part result in final result int64_t global_start = slice_start; int64_t global_end = std::min((input_len - 1) * strideVal[0] + slice_end, dst_len); left_pad_len = global_start; right_pad_len = dst_len - global_end; std::vector padDataVal = {left_pad_len,right_pad_len}; acl_int_array_ptr padData = ggml_cann_create_int_array(padDataVal.data(), 2); acl_scalar_ptr pad_value = nullptr; float pad_valueVal = 0.0; pad_value = ggml_cann_create_scalar(&pad_valueVal, aclDataType::ACL_FLOAT); int64_t conv_result_ne[4]; for(int i = 0; i < 4; i++){ conv_result_ne[i] = *(dst->ne + i); } size_t conv_result_nb[4]; conv_result_nb[0] = sizeof(weight_type); for (int i = 1; i < 4; i++) { conv_result_nb[i] = conv_result_nb[i - 1] * conv_result_ne[i - 1]; } ggml_cann_pool_alloc conv_result_allocator; conv_result_allocator.alloc(ctx.pool(), conv_result_nb[3]); void* conv_result_buf = conv_result_allocator.get(); acl_tensor_ptr conv_result = ggml_cann_create_tensor(conv_result_buf, dst_type, ggml_element_size(dst), conv_result_ne, conv_result_nb, 3, ACL_FORMAT_NCL); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, conv_result.get()); GGML_CANN_CALL_ACLNN_OP(ctx, ConstantPadNd, acl_part_dst.get(), padData.get(), pad_value.get(), conv_result.get()); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, acl_dst.get(), conv_result.get(), alpha.get()); } } void ggml_cann_elu(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; acl_tensor_ptr acl_input = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float alphaValue = 1.0f; acl_scalar_ptr alpha = nullptr; alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Elu, acl_input.get(), alpha.get(), alpha.get(), alpha.get(), acl_dst.get()); } void ggml_cann_mean(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); int64_t reduceDimValue[] = { 3 }; acl_int_array_ptr reduceDim = ggml_cann_create_int_array(reduceDimValue, 1); bool keepDim = true; GGML_CANN_CALL_ACLNN_OP(ctx, Mean, acl_src.get(), reduceDim.get(), keepDim, ACL_FLOAT, acl_dst.get()); } void ggml_cann_pad_reflect_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; int32_t * opts = (int32_t *) dst->op_params; int64_t paddingsArray[2] = { opts[0], opts[1] }; acl_int_array_ptr paddings = ggml_cann_create_int_array(paddingsArray, 2); for (int64_t i = 0; i < src0->ne[3]; i++) { acl_tensor_ptr acl_src = ggml_cann_create_tensor((char *) src0->data + i * src0->ne[3], ggml_cann_type_mapping(src0->type), ggml_element_size(src0), src0->ne, src0->nb, 3); acl_tensor_ptr acl_dst = ggml_cann_create_tensor((char *) dst->data + i * src0->ne[3], ggml_cann_type_mapping(dst->type), ggml_element_size(dst), dst->ne, dst->nb, 3); GGML_CANN_CALL_ACLNN_OP(ctx, ReflectionPad1d, acl_src.get(), paddings.get(), acl_dst.get()); } } void ggml_cann_count_equal(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; acl_tensor_ptr acl_self = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_other = ggml_cann_create_tensor(src1); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceEqTensor, acl_self.get(), acl_other.get()); ggml_cann_sum(ctx, dst); } void ggml_cann_step(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src0); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); float alphaValue = 0.0f; acl_scalar_ptr alpha = nullptr; alpha = ggml_cann_create_scalar(&alphaValue, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, GtScalar, acl_src.get(), alpha.get(), acl_dst.get()); } /** * @brief Performs expert-specific matrix multiplication (MoE) with * floating-point precision using the CANN backend. * * This function executes a matrix multiplication operation tailored for * Mixture of Experts (MoE) models, where the input tensor is multiplied * with expert-specific weight matrices. It uses the CANN backend for * efficient computation and stores the result in the destination tensor `dst`. * The operation may leverage identity-based optimizations or routing masks * as part of sparse expert selection. * * @param ctx The context for executing CANN backend operations. * @param dst The destination tensor where the MoE multiplication result * will be stored. * * @note This function assumes floating-point data types and is designed for * MoE architectures, possibly involving sparse expert routing. */ static void ggml_cann_mul_mat_id_fp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { //dst [M, K, N, 1] ggml_tensor * src0 = dst->src[0]; //src0 [D, M, A, 1] -> [D, M, K, 1] ggml_tensor * src1 = dst->src[1]; //src1 [D, B, N, 1], B = K or B = 1 -> [D, 1, K, 1] ggml_tensor * ids = dst->src[2]; //ids [K, N] GGML_ASSERT(src0->ne[3] == 1); GGML_ASSERT(src1->ne[3] == 1); GGML_ASSERT(dst->ne[3] == 1); int64_t batch = src1->ne[2]; GGML_ASSERT(batch == ids->ne[1]); ggml_cann_pool_alloc export_allocator(ctx.pool(), src0->ne[0] * src0->ne[1] * ids->ne[0] * ggml_element_size(src0)); void * export_ptr = export_allocator.get(); for (int64_t i = 0; i < batch; i++) { acl_tensor_ptr select_index = ggml_cann_create_tensor(ids, ids->ne, ids->nb, 1, ACL_FORMAT_ND, i * ids->nb[1]); acl_tensor_ptr export_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3); int64_t select_export_ne[] = { src0->ne[0], src0->ne[1], ids->ne[0] }; size_t select_export_nb[3]; select_export_nb[0] = src0->nb[0]; for (int k = 1; k < 3; k++) { select_export_nb[k] = select_export_nb[k - 1] * select_export_ne[k - 1]; } acl_tensor_ptr select_export = ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), select_export_ne, select_export_nb, 3); GGML_CANN_CALL_ACLNN_OP(ctx, IndexSelect, export_weight.get(), 0, select_index.get(), select_export.get()); int64_t select_transpose_ne[] = { select_export_ne[1], select_export_ne[0], select_export_ne[2] }; size_t select_transpose_nb[] = { select_export_nb[1], select_export_nb[0], select_export_nb[2] }; acl_tensor_ptr select_export_transpose = ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), select_transpose_ne, select_transpose_nb, 3); int64_t active_tensor_ne[] = { src1->ne[0], 1, src1->ne[1] }; size_t active_tensor_nb[] = { src1->nb[0], src1->nb[1], src1->nb[1] }; acl_tensor_ptr active_tensor = ggml_cann_create_tensor(src1, active_tensor_ne, active_tensor_nb, 3, ACL_FORMAT_ND, i * src1->nb[2]); int64_t dst_ne[] = { dst->ne[0], 1, dst->ne[1] }; size_t dst_nb[] = { dst->nb[0], dst->nb[1], dst->nb[1] }; acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst, dst_ne, dst_nb, 3, ACL_FORMAT_ND, i * dst->nb[2]); GGML_CANN_CALL_ACLNN_OP(ctx, BatchMatMul, active_tensor.get(), select_export_transpose.get(), acl_dst.get(), 2); } } /** * @brief Performs expert-specific matrix multiplication (MoE) with * quantized precision using the CANN backend. * * This function executes a matrix multiplication operation tailored for * Mixture of Experts (MoE) models, where the input tensor is multiplied * with expert-specific quantized weight matrices. It leverages the CANN * backend to perform efficient low-precision computations and stores the * quantized result in the destination tensor `dst`. * * Quantization techniques reduce memory footprint and improve performance * by using lower-bit representations (e.g., int8) instead of floating-point. * This function is designed to work with such formats and may incorporate * optimizations like identity-based fast paths or routing masks for sparse * expert selection. * * @param ctx The context for executing CANN backend operations. * @param dst The destination tensor where the quantized MoE multiplication result * will be stored. * * @note This function assumes quantized data types and is designed for * MoE architectures with potential sparse expert routing. */ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context & ctx, ggml_tensor * dst) { // TODO: Use aclnnGroupedMatMul //dst [M, K, N, 1] ggml_tensor * src0 = dst->src[0]; //src0 [D, M, A, 1] ggml_tensor * src1 = dst->src[1]; //src1 [D, B, N, 1], B = K or B = 1 ggml_tensor * ids = dst->src[2]; //ids [K, N] GGML_TENSOR_BINARY_OP_LOCALS // copy index from npu to cpu int64_t n_as = ne02; // A int64_t n_ids = ids->ne[0]; // K std::vector ids_host(ggml_nbytes(ids)); ACL_CHECK(aclrtMemcpyAsync(ids_host.data(), ggml_nbytes(ids), ids->data, ggml_nbytes(ids), ACL_MEMCPY_DEVICE_TO_HOST, ctx.stream())); ACL_CHECK(aclrtSynchronizeStream(ctx.stream())); char * src0_original = (char *) src0->data; char * src1_original = (char *) src1->data; char * dst_original = (char *) dst->data; ggml_tensor src0_row = *src0; ggml_tensor src1_row = *src1; ggml_tensor dst_row = *dst; const enum ggml_type type = dst->src[0]->type; float weight_elem_size; if (type == GGML_TYPE_Q4_0) { weight_elem_size = float(sizeof(uint8_t)) / 2; } else if (type == GGML_TYPE_Q8_0) { weight_elem_size = float(sizeof(uint8_t)); } else { GGML_ABORT("MUL_MAT_ID only support quant type Q4_0 and Q8_0 "); } // src0_row [D, M, 1, 1] weight without permute src0_row.ne[2] = 1; src0_row.ne[3] = 1; src0_row.nb[0] = weight_elem_size; src0_row.nb[1] = weight_elem_size * ne00; src0_row.nb[2] = weight_elem_size * ne00; src0_row.nb[3] = weight_elem_size * ne00; size_t weight_stride = ne00 * ne01 * weight_elem_size; size_t weight_size = weight_stride * ne02 * ne03; // scale [D, M, 1, 1] -> scale && permute size_t scale_elem_size = sizeof(uint16_t); size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; // src1_row [D, 1, 1, 1] -> input src1_row.ne[1] = 1; src1_row.ne[2] = 1; src1_row.ne[3] = 1; src1_row.nb[2] = nb11; src1_row.nb[3] = nb11; // dst_row [M, 1, 1, 1] -> out dst_row.ne[1] = 1; dst_row.ne[2] = 1; dst_row.ne[3] = 1; dst_row.nb[2] = nb1; dst_row.nb[3] = nb1; //create weight for one row ggml_cann_pool_alloc weight_allocator(ctx.pool()); void * weight_buffer = weight_allocator.alloc(nb02); for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) { for (int64_t id = 0; id < n_ids; id++) { // expert index int32_t i02 = *(int32_t *) (ids_host.data() + iid1 * ids->nb[1] + id * ids->nb[0]); GGML_ASSERT(i02 >= 0 && i02 < n_as); // If B = 1 (broadcast), always use 0; otherwise, use id. int64_t i11 = (ne11 == 1 ? 0 : id); int64_t i12 = iid1; int64_t i1 = id; int64_t i2 = i12; void * src0_tmp_ptr = src0_original + i02 * weight_stride; void * scale_tmp_ptr = src0_original + weight_size + i02 * scale_stride; void * src1_tmp_ptr = src1_original + i11 * nb11 + i12 * nb12; void * dst_tmp_ptr = dst_original + i1 * nb1 + i2 * nb2; // mem cpy ACL_CHECK(aclrtMemcpyAsync(weight_buffer, weight_stride, src0_tmp_ptr, weight_stride, ACL_MEMCPY_DEVICE_TO_DEVICE, ctx.stream())); void * scale_buffer = (char *) weight_buffer + weight_stride; ACL_CHECK(aclrtMemcpyAsync(scale_buffer, scale_stride, scale_tmp_ptr, scale_stride, ACL_MEMCPY_DEVICE_TO_DEVICE, ctx.stream())); src0_row.data = weight_buffer; src1_row.data = src1_tmp_ptr; dst_row.data = dst_tmp_ptr; dst_row.src[0] = &src0_row; dst_row.src[1] = &src1_row; ggml_cann_mul_mat(ctx, &dst_row); } } return; } void ggml_cann_mul_mat_id(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const enum ggml_type type = dst->src[0]->type; switch (type) { case GGML_TYPE_F32: case GGML_TYPE_F16: ggml_cann_mul_mat_id_fp(ctx, dst); break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: ggml_cann_mul_mat_id_quant(ctx, dst); break; default: GGML_ABORT("Unsupported type for mul_mat_id"); break; } } void ggml_cann_flash_attn_ext(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // q, fp32 | B, N, S, D (uncont) -> B, S, N, D (cont) ggml_tensor * src1 = dst->src[1]; // k, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) ggml_tensor * src2 = dst->src[2]; // v, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) ggml_tensor * src3 = dst->src[3]; // mask, fp16 // B, N, S, D (uncont) -> B, S, N, D (cont) int64_t src0_bsnd_ne[GGML_MAX_DIMS]; memcpy(src0_bsnd_ne, src0->ne, GGML_MAX_DIMS * sizeof(int64_t)); size_t src0_bsnd_nb[GGML_MAX_DIMS]; memcpy(src0_bsnd_nb, src0->nb, GGML_MAX_DIMS * sizeof(size_t)); int64_t src1_bsnd_ne[GGML_MAX_DIMS]; memcpy(src1_bsnd_ne, src1->ne, GGML_MAX_DIMS * sizeof(int64_t)); size_t src1_bsnd_nb[GGML_MAX_DIMS]; memcpy(src1_bsnd_nb, src1->nb, GGML_MAX_DIMS * sizeof(size_t)); int64_t src2_bsnd_ne[GGML_MAX_DIMS]; memcpy(src2_bsnd_ne, src2->ne, GGML_MAX_DIMS * sizeof(int64_t)); size_t src2_bsnd_nb[GGML_MAX_DIMS]; memcpy(src2_bsnd_nb, src2->nb, GGML_MAX_DIMS * sizeof(size_t)); auto transpose12 = [](int64_t * ne, size_t * nb) { int64_t ne_tmp = ne[1]; size_t nb_tmp = nb[1]; ne[1] = ne[2]; nb[1] = nb[2]; ne[2] = ne_tmp; nb[2] = nb_tmp; }; transpose12(src0_bsnd_ne, src0_bsnd_nb); transpose12(src1_bsnd_ne, src1_bsnd_nb); transpose12(src2_bsnd_ne, src2_bsnd_nb); float maxBias = 0.0f; float scaleValue = 1.0f; float logitSoftcap = 0.0f; memcpy(&scaleValue, (float *) dst->op_params + 0, sizeof(float)); memcpy(&maxBias, (float *) dst->op_params + 1, sizeof(float)); memcpy(&logitSoftcap, (float *) dst->op_params + 2, sizeof(float)); if (logitSoftcap == 0.0f) { size_t faElemSize = sizeof(uint16_t); auto faDataType = ACL_FLOAT16; //ACL_BF16; acl_tensor_ptr acl_q_tensor = nullptr; acl_tensor_ptr acl_k_tensor = nullptr; acl_tensor_ptr acl_v_tensor = nullptr; // Step 1: cast the src0 (Query) to fp16 if needed ggml_cann_pool_alloc src0_f16_allocator(ctx.pool()); void * src0_f16_buffer = nullptr; if (ggml_cann_type_mapping(src0->type) != faDataType) { acl_tensor_ptr acl_src0_f32_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, src0_bsnd_nb, GGML_MAX_DIMS); src0_f16_buffer = src0_f16_allocator.alloc(ggml_nelements(src0) * faElemSize); int64_t * src0_f16_ne = src0_bsnd_ne; size_t src0_f16_nb[GGML_MAX_DIMS]; src0_f16_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; ++i) { src0_f16_nb[i] = src0_f16_nb[i - 1] * src0_f16_ne[i - 1]; } acl_q_tensor = ggml_cann_create_tensor(src0_f16_buffer, faDataType, faElemSize, src0_f16_ne, src0_f16_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src0_f32_tensor.get(), acl_q_tensor.get(), faDataType); } else { acl_q_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, src0_bsnd_nb, GGML_MAX_DIMS); } // Step 2: create the acl tensors for src1 (Key), src2 (Value), // and the direct output from FusedInferAttention acl_k_tensor = ggml_cann_create_tensor(src1, src1_bsnd_ne, src1_bsnd_nb, GGML_MAX_DIMS); acl_v_tensor = ggml_cann_create_tensor(src2, src2_bsnd_ne, src2_bsnd_nb, GGML_MAX_DIMS); // Step 3: create the PSEShift tensor if needed // this tensor is considered as mask (f16) in the llama.cpp acl_tensor_ptr bcast_pse_tensor; ggml_cann_pool_alloc bcast_pse_allocator(ctx.pool()); if (src3 != nullptr) { // Construct the truncated pse tensor (common for prefill/decode) int64_t trunc_pse_ne[GGML_MAX_DIMS] = { src3->ne[0], // D src0->ne[1], // S (number of Q tokens) src3->ne[2], // mask N src3->ne[3] // B }; size_t * trunc_pse_nb = src3->nb; acl_tensor_ptr acl_mask_f16_trunc_tensor = ggml_cann_create_tensor( src3->data, ACL_FLOAT16, sizeof(uint16_t), trunc_pse_ne, trunc_pse_nb, GGML_MAX_DIMS); int64_t bcast_pse_ne[GGML_MAX_DIMS]; size_t bcast_pse_nb[GGML_MAX_DIMS]; bcast_pse_ne[0] = src3->ne[0]; // D bcast_pse_ne[1] = src0->ne[1]; // S bcast_pse_ne[2] = src0->ne[2]; // N (num_heads) bcast_pse_ne[3] = src3->ne[3]; // B if (maxBias == 0.0f) { // When maxBias == 0.0f, use nb = 0 reduce once repeat (Qwen2) // Construct the bcast tensor (simulate repeat on the head dimension using stride=0) bcast_pse_nb[0] = sizeof(uint16_t); bcast_pse_nb[1] = bcast_pse_nb[0] * bcast_pse_ne[0]; bcast_pse_nb[2] = 0; // <---- the head dimension shares the same data bcast_pse_nb[3] = src3->nb[3]; bcast_pse_tensor = ggml_cann_create_tensor(src3->data, ACL_FLOAT16, sizeof(uint16_t), bcast_pse_ne, bcast_pse_nb, GGML_MAX_DIMS); } else { bcast_pse_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { bcast_pse_nb[i] = bcast_pse_nb[i - 1] * bcast_pse_ne[i - 1]; } void * bcast_pse_buffer = bcast_pse_allocator.alloc(ggml_nelements(src3) * src0->ne[2] * sizeof(uint16_t)); bcast_pse_tensor = ggml_cann_create_tensor(bcast_pse_buffer, ACL_FLOAT16, sizeof(uint16_t), bcast_pse_ne, bcast_pse_nb, GGML_MAX_DIMS); int64_t repeats[] = { 1, src0->ne[2], 1, 1 }; aclnn_repeat(ctx, acl_mask_f16_trunc_tensor.get(), bcast_pse_tensor.get(), repeats); // alibi // Compute the slope if needed. Derived from ggml_cann_softmax(). const int64_t n_heads = src0->ne[2]; ggml_cann_pool_alloc slope_allocator(ctx.pool(), n_heads * sizeof(uint16_t)); void * slope_buffer = slope_allocator.get(); aclnn_get_slope(ctx, n_heads, slope_buffer, maxBias, GGML_TYPE_F16); int64_t slope_ne[] = { 1, 1, n_heads, 1 }; size_t slope_nb[GGML_MAX_DIMS]; slope_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { slope_nb[i] = slope_nb[i - 1] * slope_ne[0]; } acl_tensor_ptr slope_tensor = ggml_cann_create_tensor(slope_buffer, ACL_FLOAT16, sizeof(uint16_t), slope_ne, slope_nb, GGML_MAX_DIMS); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMul, bcast_pse_tensor.get(), slope_tensor.get()); } } // Step 4: set the inputs for FusedInferAttention. acl_tensor_list_ptr acl_k_tensor_list = ggml_cann_create_tensor_list(acl_k_tensor); acl_tensor_list_ptr acl_v_tensor_list = ggml_cann_create_tensor_list(acl_v_tensor); int64_t numHeads = src0->ne[2]; // N int64_t numKeyValueHeads = src1->ne[2]; // double scaleValue = 1 / sqrt(src0->ne[0]); // 1/sqrt(d) int64_t preTokens = 65535; int64_t nextTokens = 65535; char layout[5] = { 'B', 'S', 'N', 'D', 0 }; int64_t sparseMode = 0; int64_t innerPrecise = (src0->ne[1] == 1) ? 0 : 2; int64_t blockSize = 0; int64_t antiquantMode = 0; bool softmaxLseFlag = false; int64_t keyAntiquantMode = 0; int64_t valueAntiquantMode = 0; GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); acl_tensor_ptr fa_dst_tensor; acl_tensor_ptr acl_dst_tensor; ggml_cann_pool_alloc out_f16_allocator(ctx.pool()); if (dst->type == GGML_TYPE_F32) { void * out_f16_buffer = out_f16_allocator.alloc(ggml_nelements(dst) * faElemSize); int64_t * out_f16_ne = src0_bsnd_ne; size_t out_f16_nb[GGML_MAX_DIMS]; out_f16_nb[0] = faElemSize; for (int i = 1; i < GGML_MAX_DIMS; ++i) { out_f16_nb[i] = out_f16_nb[i - 1] * out_f16_ne[i - 1]; } fa_dst_tensor = ggml_cann_create_tensor(out_f16_buffer, faDataType, faElemSize, out_f16_ne, out_f16_nb, GGML_MAX_DIMS); } else { fa_dst_tensor = ggml_cann_create_tensor(dst); } GGML_CANN_CALL_ACLNN_OP(ctx, FusedInferAttentionScoreV2, acl_q_tensor.get(), acl_k_tensor_list.get(), acl_v_tensor_list.get(), // q, k, v bcast_pse_tensor.get(), nullptr, // pse, mask nullptr, nullptr, // actSeqLen, actSeqLenkv nullptr, nullptr, // deqScale1, quantScale1 nullptr, nullptr, nullptr, // deqScale2, quantScale2, quantOffset2 nullptr, nullptr, // antiquantScale, antiquantOffset nullptr, // blockTable nullptr, nullptr, // qPadSize, kvPadSize nullptr, nullptr, // kAntiquantScale, kAntiQuantOffset nullptr, nullptr, // vAntiquantScale, vAntiQuantOffset nullptr, nullptr, nullptr, // kSharedPrefix, vSharedPrefix, actSharedLen numHeads, scaleValue, // heads, scaleValue preTokens, nextTokens, // preTokens, nextTokens layout, // inputLayout numKeyValueHeads, // numKVHeads sparseMode, innerPrecise, // sparseMode, innerPrecise blockSize, antiquantMode, // blockSize, antiquantMode softmaxLseFlag, // softmaxLseFlag keyAntiquantMode, valueAntiquantMode, // keyAntiqMode, valueAntiqMode fa_dst_tensor.get(), // attentionOut nullptr // softmaxLse ); if (dst->type == GGML_TYPE_F32) { // Step 6: post-processing, permute and cast to f32 acl_tensor_ptr acl_dst_tensor = ggml_cann_create_tensor(dst); aclnn_cast(ctx, fa_dst_tensor.get(), acl_dst_tensor.get(), ggml_cann_type_mapping(dst->type)); } } else { GGML_ABORT("Function is not implemented."); } } static void ggml_cann_out_prod_fp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // weight ggml_tensor * src1 = dst->src[1]; // input GGML_TENSOR_BINARY_OP_LOCALS acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, acl_dst.get()); const int64_t dps2 = ne2 / ne02; const int64_t dps3 = ne3 / ne03; for (int64_t i3 = 0; i3 < ne3; i3++) { for (int64_t i2 = 0; i2 < ne2; i2++) { const int64_t i02 = i2 / dps2; const int64_t i03 = i3 / dps3; const int64_t i12 = i2; const int64_t i13 = i3; acl_tensor_ptr accumulator = ggml_cann_create_tensor((char *) dst->data + i2 * nb2 + i3 * nb3, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), dst->ne, dst->nb, 2); // The outer product needs to be accumulated in this dimension. for (int64_t i1 = 0; i1 < ne11; i1++) { acl_tensor_ptr acl_input = ggml_cann_create_tensor( (char *) src1->data + i1 * nb11 + i12 * nb12 + i13 * nb13, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src1->ne, src1->nb, 1); acl_tensor_ptr acl_weight = ggml_cann_create_tensor( (char *) src0->data + i1 * nb01 + i02 * nb02 + i03 * nb03, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), src0->ne, src0->nb, 1); ggml_cann_pool_alloc output_allocator(ctx.pool()); void * output_buffer = output_allocator.alloc(ggml_nbytes(dst)); acl_tensor_ptr acl_out = ggml_cann_create_tensor(output_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), dst->ne, dst->nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, Ger, acl_input.get(), acl_weight.get(), acl_out.get()); float alpha_value = 1.0f; aclScalar * alpha = aclCreateScalar(&alpha_value, ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, accumulator.get(), acl_out.get(), alpha); } } } } void ggml_cann_out_prod(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; const enum ggml_type type = src0->type; switch (type) { case GGML_TYPE_F32: case GGML_TYPE_F16: ggml_cann_out_prod_fp(ctx, dst); break; default: GGML_ABORT("Unsupport type for GGML_OP_OUT_PROD"); break; } } void ggml_cann_ssm_conv(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; // conv_x ggml_tensor * src1 = dst->src[1]; // conv1d.weight // This op is currently defined only for F32 in ggml_cpu GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); // Shapes follow ggml_compute_forward_ssm_conv_f32 const int64_t nc = src1->ne[0]; // d_conv const int64_t ncs = src0->ne[0]; // d_conv - 1 + n_t const int64_t nr = src0->ne[1]; // d_inner const int64_t n_s = src0->ne[2]; // n_seqs const int64_t n_t = dst->ne[1]; // tokens per sequence GGML_ASSERT(dst->ne[0] == nr); // dst: {d_inner, n_t, n_s} GGML_ASSERT(src1->ne[1] == nr); // weight: {d_conv, d_inner} GGML_ASSERT(ncs == nc - 1 + n_t); // conv_x: {d_conv - 1 + n_t, d_inner, n_s} GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); // --- Build CANN tensors --- // 1) Input: conv_x as NCL // // src0->ne = { ncs, nr, n_s, 1 } // {L_in, C, N} // Passing ACL_FORMAT_NCL here means: // reversed dims -> [N, C, L_in] = [n_s, nr, ncs] acl_tensor_ptr acl_x = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3, ACL_FORMAT_NCL); // 2) Weights: depthwise conv kernel, view src1 as {K, 1, C} // // src1 original: ne = { nc, nr, 1, 1 } // [K, C, 1, 1] // we want a view: ne_w = { nc, 1, nr } // [K, 1, C] // so that reversed dims -> [C, 1, K] which matches // [out_channels, in_channels/groups, kernel_size] int64_t w_ne[GGML_MAX_DIMS] = { nc, 1, nr, 1 }; // [K, 1 input ch. per group, C groups] // Layout: src1 data is [K, C] with // offset(k, c) = k*nb0 + c*nb1 // We want offset_w(k, 0, c) = k*nb0 + c*nb1, // so we can reuse nb0 and nb1, and set nb2 = nb1. size_t w_nb[GGML_MAX_DIMS] = { src1->nb[0], src1->nb[1], src1->nb[1], src1->nb[3] }; // same as src1 acl_tensor_ptr acl_w = ggml_cann_create_tensor( src1->data, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), w_ne, w_nb, 3, ACL_FORMAT_NCL); // 3) Output: dst is { d_inner, n_t, n_s } (CLN) // // We need an NCL view of the same buffer: // desired NCL logical shape: { L_out = n_t, C = nr, N = n_s } // // Original CLN layout: // dst->ne = { nr, n_t, n_s } // dst->nb[0] = sizeof(float) // dst->nb[1] = nr * sizeof(float) // dst->nb[2] = nr * n_t * sizeof(float) // // We want offset_new(L, C, N) = offset_orig(C, L, N). // Choose: // nb_y[0] = nr * sizeof(float); // step in L // nb_y[1] = sizeof(float); // step in C // nb_y[2] = nr * n_t * sizeof(float); // step in N int64_t y_ne[GGML_MAX_DIMS] = { n_t, nr, n_s, 1 }; // [L_out, C, N] size_t y_nb[GGML_MAX_DIMS] = { dst->ne[0] * sizeof(float), sizeof(float), dst->ne[0] * dst->ne[1] * sizeof(float), dst->nb[3] }; // [nr, 1, nr * n_t] acl_tensor_ptr acl_y = ggml_cann_create_tensor( dst->data, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), y_ne, y_nb, 3, ACL_FORMAT_NCL); // --- Conv1d parameters: depthwise, stride 1, no padding ("valid") --- int64_t strideVal[1] = { 1 }; int64_t paddingVal[1] = { 0 }; int64_t dilationVal[1] = { 1 }; acl_int_array_ptr stride = ggml_cann_create_int_array(strideVal, 1); acl_int_array_ptr padding = ggml_cann_create_int_array(paddingVal, 1); acl_int_array_ptr dilation = ggml_cann_create_int_array(dilationVal, 1); const bool transposed = false; const int64_t groups = nr; // depthwise: one group per inner dim int8_t cubeMathType = 0; #ifdef ASCEND_310P cubeMathType = 1; #endif GGML_CANN_CALL_ACLNN_OP(ctx, Convolution, acl_x.get(), // input: N, C, L_in = ncs acl_w.get(), // weight: [C, 1, K] with groups=nr nullptr, // bias stride.get(), padding.get(), dilation.get(), transposed, padding.get(), // output padding (unused for non-transposed) groups, acl_y.get(), cubeMathType); } ggml-org-ggml-3678254/src/ggml-cann/aclnn_ops.h000066400000000000000000001452741512524704700211600ustar00rootroot00000000000000/** * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef CANN_ACLNN_OPS #define CANN_ACLNN_OPS #include "acl_tensor.h" #include "common.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * @brief Repeats a ggml tensor along each dimension to match the dimensions * of another tensor. * * @details This function repeats the elements of a source ggml tensor along * each dimension to create a destination tensor with the specified * dimensions. The operation is performed using the ACL backend and * executed asynchronously on the device. * * @param ctx The CANN context used for operations. * @param dst The ggml tensor representing the destination, which op is * GGML_OP_REPEAT and specifies the desired dimensions. */ void ggml_cann_repeat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the Leaky ReLU activation function to a tensor using the CANN * backend. * * @details This function computes the Leaky ReLU activation for each element of * the input tensor. The Leaky ReLU function allows a small gradient * when the unit is not active (i.e., when the input is negative). The * Leaky ReLU function is defined as: * \f[ * \text{dst} = \max(0, src) + \text{negativeSlope} \cdot \min(0, * src) * \f] * `negativeSlope` is in dst->params. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the result of the Leaky ReLU * activation is stored, which op is `GGML_OP_LEAKY_RELU` */ void ggml_cann_leaky_relu(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Concatenates multiple tensors along a specified dimension using the * CANN backend. * * @param ctx The CANN context used for operations. * @param tensorList A pointer to the list of tensors to be concatenated. * @param dst The destination tensor where the result of the * concatenation is stored. dst->op is `GGML_OP_CONCAT`. * @param concat_dim The dimension along which the tensors are concatenated. * * @attention tensorList length should be 2 and the dimension using for concat * default to 1. */ void ggml_cann_concat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Generates a sequence of evenly spaced values within a specified * interval for a ggml tensor using the CANN backend. * * @details This function creates a sequence of numbers over a specified i * nterval, starting from `start`, ending before `stop`, and * incrementing by `step`. The sequence is stored in the destination * tensor `dst`. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the generated sequence will be stored. * `start`, 'stop' and 'step' are in dst->op_params and dst->op is * `GGML_OP_ARANGE`. */ void ggml_cann_arange(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies a clamp operation to the elements of a ggml tensor using the * CANN backend. * * @details This function clamps the elements of the input tensor `src` to a * specified range defined by `min` and `max` values. The result is * stored in the destination tensor `dst`. The operation is defined as: * \f[ * y = \max(\min(x, max\_value), min\_value) * \f] * where `x` is an element of the input tensor, and `y` is the * corresponding element in the output tensor. * @param ctx The CANN context used for operations. * @param dst The destination tensor where the clamped values will be stored. * dst->op is `GGML_OP_CLAMP`, `min` and `max` value is in dst->params. */ void ggml_cann_clamp(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Scales the elements of a ggml tensor by a constant factor using the * CANN backend. * * @details This function multiplies each element of the input tensor `src` by * a scaling factor `scale`, storing the result in the destination * tensor `dst`. The operation is defined as: * \f[ * dst = src \times scale * \f] * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the scaled values will be stored. * dst->op is `GGML_OP_SCALE` and `scale` value is in dst->params. */ void ggml_cann_scale(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Sorts the elements of a ggml tensor and returns the indices that * would sort the tensor using the CANN backend. * * @details This function performs an argsort operation on the input tensor * `src`. It sorts the elements of `src` in either ascending or * descending order, depending on the `GGML_SORT_ORDER_DESC`, * and returns the indices that would sort the original tensor. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the sorted indices will be stored. * dst->op is `GGML_OP_ARGSORT`. */ void ggml_cann_argsort(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Layer Normalization for a ggml tensor using the CANN * backend. * * @details This function applies the Layer Normalization operation on the * input tensor `src` and stores the result in the destination tensor * `dst`. Layer Normalization normalizes the features at each sample in * a mini-batch independently. It is commonly used in neural networks * to normalize the activations of a layer by adjusting and scaling * the outputs. * The operation is defined as: * \f[ * \text { out }=\frac{x-\mathrm{E}[x]}{\sqrt{\text{Var}[x]+eps}} * \f] * `Var` defaults dst->ne[0]. `eps` is in dst->params. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the normalized values will be stored. * @attention `Var` defaults to dst->ne[0]. */ void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the L2 Normalization for a ggml tensor using the CANN * backend. * * @details This function applies the L2 Normalization operation on the * input tensor `src` and stores the result in the destination tensor * `dst`. L2 Normalization scales the input tensor such that the * L2 norm along the specified dimension equals 1. This operation * is commonly used in neural networks for feature normalization * and vector scaling. * The operation is defined as: * \f[ * \text{out} = \frac{x}{\sqrt{\sum{x^2}}} * \f] * The normalization is performed along the last dimension by default. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the normalized values will be stored. * @attention The normalization is performed along the last dimension of the * input tensor by default. */ void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Cross Entropy Loss for a ggml tensor using the CANN * backend. * * @details This function computes the cross entropy loss between the predicted * logits and target probability distributions. The operation follows * the same computation pattern as the CPU implementation: * 1. Applies log_softmax to the logits along the class dimension * 2. Element-wise multiplication with target distributions * 3. Summation along the class dimension to get per-sample losses * 4. Global summation and scaling by -1/nr to get final loss * * The computation can be expressed as: * \f[ * \text{loss} = -\frac{1}{N} \sum_{i=1}^{N} \sum_{j=1}^{C} y_{ij} \cdot \log(\text{softmax}(x_{ij})) * \f] * where \f$N\f$ is the total number of samples, \f$C\f$ is the number * of classes, \f$x\f$ are the logits, and \f$y\f$ are the target * probability distributions. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the computed loss will be stored. * This should be a scalar tensor containing the final loss value. * * @note This implementation computes cross entropy between probability * distributions, not the typical classification cross entropy that * expects class indices as targets. Both input tensors (src0 and src1) * should have the same shape and represent probability distributions * over the class dimension. * @note The function expects two source tensors: * - dst->src[0]: Logits tensor (before softmax) * - dst->src[1]: Target probability distributions tensor * @note The computation is performed using CANN backend operators including * LogSoftmax, Mul, ReduceSum, and Muls for the final scaling. */ void ggml_cann_cross_entropy_loss(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Group Normalization for a ggml tensor using the CANN * backend. * * @brief This function applies the Group Normalization operation on the input * tensor `src` and stores the result in the destination tensor `dst`. * Group Normalization divides the channels into groups and normalizes * the features within each group across spatial locations. * It is commonly used in convolutional neural networks to improve * training stability and performance. * The operation is defined as: * \f[ * \text { out }=\frac{x-\mathrm{E}[x]}{\sqrt{\text{Var}[x]+eps}} * \f] * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the normalized values will be stored. * `n_groups` is in dst->params, which split C channel to `n_groups`. * dst->op is `GGML_OP_GROUP_NORM`. * * @attention eps defaults to 1e-6f. */ void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the accumulation of tensors using the CANN backend. * * @details This function performs an accumulation operation on two tensors. * Depending on the `inplace` flag, it either updates the destination * tensor `dst` in place by adding `alpha * src1` to it, or it creates * a new tensor as the result of `src0 + alpha * src1` and stores it in * `dst`. * The operation is defined as: * \f[ * dst = src0 + alpha \times src1 * \f] * if `inplace` is `true`, `src0` is equal to 'dst'. * @param ctx The CANN context used for operations. * @param dst The destination tensor where the accumulated values will be stored. * `inplace` is in dst->params, and dst->op is `GGML_OP_ACC`. */ void ggml_cann_acc(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the sum of elements along the last dimension of a ggml tensor * using the CANN backend. * * @details This function performs a reduction sum operation along the last * dimension of the input tensor `src`. The result of the sum is stored * in the destination tensor `dst`. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the reduced values will be stored。 * dst->op is `GGML_OP_SUM_ROWS`. * * @attention `reduce_dims` defaults to 3, which means the last dimension. */ void ggml_cann_sum_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the sum of elements in a ggml tensor. * * @details This function performs a reduction sum operation along the last * dimension of the input tensor `src`. The result of the sum is stored * in the destination tensor `dst`. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the reduced values will be stored。 * */ void ggml_cann_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Upsamples a ggml tensor using nearest neighbor interpolation using * the CANN backend. * * @details This function performs upsampling of the input tensor `src` using * nearest neighbor interpolation. The upsampling is applied to the * height and width dimensions (last two dimensions) of the tensor. The * result is stored in the destination tensor `dst`, which must have * the appropriate dimensions for the upsampled output. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the upsampled values will be stored. * dst->op is `GGML_OP_UPSCALE`. */ void ggml_cann_upsample_nearest2d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Pads a ggml tensor to match the dimensions of the destination tensor * using the CANN backend. * * @details This function pads the input tensor `src` so that it matches the * dimensions of the destination tensor `dst`. The amount of padding * is calculated based on the difference in sizes between `src` and * `dst` along each dimension. The padded tensor is stored in `dst`. * * @param ctx The CANN context used for operations. * @param dst The destination tensor, which specifies the target dimensions for * padding. dst->op is `GGML_OP_PAD`. */ void ggml_cann_pad(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Executes a 2D pooling operation on a ggml tensor using the CANN * backend. * * @details This function dispatches the execution of a 2D pooling operation on * the input tensor `dst`. The type of pooling (average or max) is * determined by the `op` parameter, which is read from the operation * parameters of `dst`. The function supports average pooling * (`GGML_OP_POOL_AVG`) and max pooling (`GGML_OP_POOL_MAX`). If an * invalid operation is encountered, the function asserts a failure. * * @param ctx The CANN context used for operations. * @param dst The destination tensor on which the pooling operation is to be * performed. dst->op is `GGML_OP_POOL_2D`. */ void ggml_cann_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Duplicates a ggml tensor using the CANN backend. * * @details This function duplicates the contents of the source tensor `src` to * the destination tensor `dst`. The function supports various tensor * types and configurations, including handling of extra data, type * conversions, and special cases for contiguous and non-contiguous * tensors. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the duplicated data will be stored. * dst->op is `GGML_OP_DUP` * * @attention Only support Fp16/FP32. Not support when src and dst have * different shape and dst is no-contiguous. * @note: This func need to simplify. */ void ggml_cann_dup(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Root Mean Square (RMS) normalization of a ggml tensor * using the CANN backend. * * @details This function applies RMS normalization to the input tensor `src` * and stores the result in the destination tensor `dst`. RMS * normalization involves computing the root mean square of the input * tensor along a specified dimension and then dividing each element of * the tensor by this value, adjusted by a small epsilon value to * prevent division by zero. * The operation is defined as: * \f[ * \text{RmsNorm}\left(x_i\right)=\frac{x_i}{\text{Rms}(\mathbf{x})} g_i, * \quad \text { where } \text{Rms}(\mathbf{x})=\sqrt{\frac{1}{n} \sum_{i=1}^n x_i^2+e p s} * \f] * `eps` is in dst->op_params. * @param ctx The CANN context used for operations. * @param dst The destination tensor where the normalized values will be stored. * dst->op is `GGML_OP_RMS_NORM`. */ void ggml_cann_rms_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies a diagonal mask to the tensor with a specified value. * * @details This function creates a mask tensor filled with ones, then applies * an upper triangular and lower triangular operation to it based on * the number of past elements specified. Afterward, it adds the masked * tensor to the destination tensor in-place. * * @param ctx The backend CANN context used for operations. * @param dst The destination tensor where the result will be stored. dst->op is * `GGML_OP_DIAG_MASK` * @param value The value to use for masking. */ void ggml_cann_diag_mask(ggml_backend_cann_context & ctx, ggml_tensor * dst, float value); /** * @brief Performs an image-to-column transformation on the input tensor. * * @details This function takes an input tensor and applies an image-to-column * operation, converting spatial dimensions into column-like * structures suitable for convolutional operations. It supports both * half-precision (F16) and single-precision (F32) floating-point data * types. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor that stores the result of the operation. * dst->op is `GGML_OP_IM2COL`. */ void ggml_cann_im2col(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes time step embeddings using sine and cosine functions. * * @details This function calculates time step embeddings by applying sine and * cosine transformations to a given input tensor, which is typically * used in temporal models like diffusion models or transformers to * encode time information effectively. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the result of the embedding operation * will be stored. dst->op is `GGML_OP_TIMESTEP_EMBEDDING`. */ void ggml_cann_timestep_embedding(ggml_backend_cann_context & ctx, ggml_tensor * dst); // @see ggml_cann_dup. void ggml_cann_cpy(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the softmax activation with optional masking. * * @details This function computes the softmax activation over the input tensor, * optionally applying a mask and scaling factor. It supports both FP16 * and FP32 data types and can handle masking by broadcasting the mask * across rows if necessary. * The function performs the following steps: * 1. Multiplies the input tensor by a scale factor. * 2. Optionally casts the mask tensor to FP32 if it is in FP16 format. * 3. Broadcasts the mask tensor if its dimensions do not match the * input tensor's dimensions. * 4. Adds the mask to the scaled input tensor. * 5. Applies the softmax activation function along the specified * dimension. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the result will be stored. dst->op is * `GGML_OP_SOFTMAX`. */ void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Extracts specific rows from a tensor based on indices. * * @details This function retrieves rows from a source tensor src0 according to * the indices provided in another tensor src1 and stores the result in * a destination tensor (\p dst). * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the extracted rows will be stored. */ void ggml_cann_get_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Writes specific rows into a tensor at positions specified by indices. * * @details This function copies rows from a source tensor into a destination * tensor (\p dst) at the positions indicated by the indices in another * tensor. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the specified rows will be updated. */ void ggml_cann_set_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Executes matrix multiplication for the given tensor. * * @details This function performs matrix multiplication on the source tensors * associated with the destination tensor. It supports matrix * multiplication F32, F16, and Q8_0. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor for storing the result of the matrix * multiplication. dst->op is `GGML_OP_MUL_MAT`. */ void ggml_cann_mul_mat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies Rotary Positional Embedding (RoPE) to the input tensor. * * @details This function implements the RoPE mechanism, which is a method to * encode positional information into sequence data, particularly * useful in transformer models. It supports both F32 and F16 data * types. * * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the RoPE-transformed data will be * stored. dst->op is `GGML_OP_ROPE`. * * @note The function currently does not support cases where the n_dims is less * than the input tensor's first dimension. * @note The function currently does not support cases where the freq_factors is * not NULL. * @note The function currently does not support cases where the ext_factor is * not equal 0. * @note The function currently does not support cases where the freq_scale is * not equal 1. */ void ggml_cann_rope(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the index of the maximum value along the specified dimension * of a ggml tensor using the CANN backend. * * @details This function performs an argmax operation on the input tensor. * It finds the index of the maximum value along the specified axis * and stores these indices in the destination tensor `dst`. The * operation is executed using the CANN backend for optimized performance. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the indices of the maximum values will * be stored. dst->op is `GGML_OP_ARGMAX`. */ void ggml_cann_argmax(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Adds two tensors element-wise and stores the result in a destination * tensor. * * This function performs the operation: * \f[ * dst = acl\_src0 + alpha \times acl\_src1 * \f] * where alpha is a scalar value and defaults to 1.0f. * * @param ctx The context for the CANN backend operations. * @param acl_src0 The first source tensor. * @param acl_src1 The second source tensor. * @param acl_dst The destination tensor where the result will be stored. */ void aclnn_add(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst = nullptr); /** * @brief Sub two tensors element-wise and stores the result in a destination * tensor. * * This function performs the operation: * \f[ * dst = acl\_src0 - alpha \times acl\_src1 * \f] * where alpha is a scalar value and defaults to 1.0f. * * @param ctx The context for the CANN backend operations. * @param acl_src0 The first source tensor. * @param acl_src1 The second source tensor. * @param acl_dst The destination tensor where the result will be stored. */ void aclnn_sub(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst = nullptr); /** * @brief Performs element-wise multiplication of two tensors and stores the * result in a destination tensor. * * This function performs element-wise multiplication of the tensors `acl_src` * and `acl_other` and stores the result in the destination tensor `acl_dst`. * The operation is defined as: * \f[ * \text {acl_dst }_i=\text {acl_src }_i \times \text {acl_other }_i * \f] * * @param ctx The context for the CANN backend operations. * @param acl_src The first tensor for element-wise multiplication. * @param acl_other The second tensor for element-wise multiplication. * @param acl_dst The destination tensor where the result will be stored. */ void aclnn_mul(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst = nullptr); /** * @brief Matrix division, optionally in-place. * * This function division each element of the source tensor `acl_src` by the * tensor `acl_other` and stores the result in the destination tensor `acl_dst`. * If `inplace` is true, `acl_dst` will not be used and the operation is * performed in-place on `acl_src`. The operation is defined as: \f[ * \text{dst}_i = \frac{\text{acl_src}_i}{\text{acl_other}_i} * \f] * * @param ctx The context for the CANN backend operations. * @param acl_src Numerator tensor.. * @param acl_other Denominator tensor. * @param acl_dst The destination tensor where the result will be stored if * `inplace` is false. * @param inplace Flag indicating whether to perform the operation in-place on * `acl_src`. */ void aclnn_div(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst = nullptr); /** * @brief Applies element-wise cosine function to the elements of a tensor. * * This function computes the cosine of each element in the source tensor * `acl_src` and stores the result in the destination tensor `acl_dst`. The * operation is defined as: \f[ \text {acl_dst }_i=\cos \left(\text {acl_src * }_i\right) \f] * * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor on which the cosine function will be * applied. * @param acl_dst The destination tensor where the cosine results will be * stored. */ void aclnn_cos(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst); /** * @brief Applies element-wise sine function to the elements of a tensor. * * This function computes the sine of each element in the source tensor `acl_src` * and stores the result in the destination tensor `acl_dst`. * The operation is defined as: * \f[ * \text {acl_dst }_i=\sin \left(\text {acl_src }_i\right) * \f] * @param ctx The context for the CANN backend operations. * @param acl_src The source tensor on which the sine function will be applied. * @param acl_dst The destination tensor where the sine results will be stored. */ void aclnn_sin(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst); /** * @brief Prepares broadcast-compatible ACL tensors for two input tensors and one * output tensor. * * This function checks whether broadcasting is needed between `src0` and `src1`. * If broadcasting is required, it calculates the proper shapes and creates * ACL tensors with broadcast parameters. Otherwise, it directly creates ACL tensors * based on the original tensor shapes. * * @param src0 The first input tensor (reference shape). * @param src1 The second input tensor (possibly broadcasted). * @param dst The destination/output tensor. * @param acl_src0 Output pointer to the created ACL tensor corresponding to src0. * @param acl_src1 Output pointer to the created ACL tensor corresponding to src1. * @param acl_dst Output pointer to the created ACL tensor corresponding to dst. */ void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, acl_tensor_ptr & acl_src0, acl_tensor_ptr & acl_src1, acl_tensor_ptr & acl_dst); /** * @brief Computes the 1D transposed convolution (deconvolution) of a ggml * tensor using the CANN backend. * * @details This function performs a 1D transposed convolution (also known as * deconvolution) operation on the input tensor. The computed result is stored * in the destination tensor `dst`. The operation is optimized using the CANN * backend for improved performance. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the transposed convolution result * will be stored. dst->op is `GGML_OP_CONV_TRANSPOSE_1D`. */ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the ELU (Exponential Linear Unit) activation to a ggml tensor * using the CANN backend. * * @details This function performs an element-wise ELU activation on the input * tensor. * The result is written to the destination tensor `dst` in-place. * The ELU function is defined as: * * \text{ELU}(x) = * \begin{cases} * x, & \text{if } x > 0 \\ * \alpha \left( \exp(x) - 1 \right), & \text{if } x \leq 0 * \end{cases} * * where α (alpha) is a hyperparameter, typically set to 1.0. * This operation is optimized using the CANN backend for high-performance * inference or training. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the ELU-activated result will be stored. * dst->op is expected to be `GGML_OP_ELU`. */ void ggml_cann_elu(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the mean of a ggml tensor element-wise using the CANN backend. * * @details This function calculates the element-wise mean of the input tensor. * The result is written to the destination tensor `dst`. * The mean is computed by averaging the values across the entire tensor. * * This operation is optimized using the CANN backend for high-performance inference or training. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the mean result will be stored. * dst->op is expected to be `GGML_OP_MEAN`. */ void ggml_cann_mean(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies 1D reflect padding to a ggml tensor using the CANN backend. * * @details This function performs 1D reflect padding on the input tensor. * The amount of padding on each side is specified by parameters stored in `dst->op_params`. * The operation reflects the values at the borders of the tensor to generate the padded output. * * This operation is optimized using the CANN backend for high-performance inference or training. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the padded result will be stored. * dst->op is expected to be `GGML_OP_PAD_REFLECT_1D`. */ void ggml_cann_pad_reflect_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Counts the number of equal elements in two ggml tensors using the CANN backend. * * @details This function performs an element-wise comparison between two input tensors, * and counts the number of positions where the elements are equal. The result is * stored in the destination tensor `dst` as a scalar. * * The operation is optimized using the CANN backend, making it suitable for * high-performance inference or training scenarios. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_COUNT_EQUAL`. */ void ggml_cann_count_equal(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the Step activation function to a ggml tensor using the CANN backend. * * @details This function applies a step function element-wise to the input tensor, where * each element is transformed to 1.0 if it is greater than 0, and 0.0 otherwise. * The result is stored in the destination tensor `dst`. * * This operation is accelerated using the CANN backend to improve runtime performance. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_STEP`. */ void ggml_cann_step(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Performs the Flash Attention extended operator using the CANN backend. * * @details This function implements the memory-efficient Flash Attention algorithm * for computing scaled dot-product attention with hardware acceleration. * The result is stored in the destination tensor `dst`. * * This operation is accelerated using the CANN backend to improve runtime performance. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_FLASH_ATTN_EXT`. */ void ggml_cann_flash_attn_ext(ggml_backend_cann_context & ctx, ggml_tensor * dst); /* * @brief A generic wrapper for ACL resources with custom deleter support. */ using any_acl_resource = std::unique_ptr>; /** * @brief Trait structure used to define how to destroy a given ACL resource type. * * @tparam T ACL resource type. */ template struct acl_resource_traits; /** * @brief Specialization for aclTensor, defines how to destroy an aclTensor resource. */ template <> struct acl_resource_traits { static void destroy(void * p) { ACL_CHECK(aclDestroyTensor(static_cast(p))); } }; /** * @brief Specialization for aclIntArray, defines how to destroy an aclIntArray resource. */ template <> struct acl_resource_traits { static void destroy(void * p) { ACL_CHECK(aclDestroyIntArray(static_cast(p))); } }; /** * @brief Specialization for aclScalar, defines how to destroy an aclScalar resource. */ template <> struct acl_resource_traits { static void destroy(void * p) { ACL_CHECK(aclDestroyScalar(static_cast(p))); } }; /** * @brief Specialization for aclTensorList, defines how to destroy an aclTensorList resource. */ template <> struct acl_resource_traits { static void destroy(void * p) { ACL_CHECK(aclDestroyTensorList(static_cast(p))); } }; /** * @brief Creates a generic ACL resource wrapper with proper destruction logic. * * @tparam T ACL resource type. * @param ptr Raw pointer to ACL resource. * @return any_acl_resource Smart pointer that handles destruction. */ template any_acl_resource make_acl_resource(T * ptr) { return any_acl_resource(static_cast(ptr), [](void * p) { acl_resource_traits::destroy(p); }); } /** * @brief Registers multiple ACL resources into a vector for lifetime management. * * @tparam Args Variadic list of ACL resource types. * @param vec Target vector to hold ACL resources. * @param args Raw pointers to ACL resources. */ template void register_acl_resources(std::vector & vec, Args *... args) { (vec.emplace_back(make_acl_resource(args)), ...); } /** * @brief Launches an asynchronous task using the memory allocator. * * This macro submit an asynchronous task on the specified stream. * The task uses memory allocated by the allocator. It is guaranteed * that the memory will not be accessed by other tasks until this task * completes, due to the sequential execution order within the same stream. * * @param OP_NAME aclnn operator name. * @param args Additional arguments required by the task. * * @note * Memory from the allocator will be "freed" immediately and can be * reallocated to other pointers. However, it won't be accessed by any * other task before this asynchronous task ends, because all tasks in the * same stream are executed in queue order. */ #define GGML_CANN_CALL_ACLNN_OP(CTX, OP_NAME, ...) \ do { \ uint64_t workspaceSize = 0; \ aclOpExecutor * executor; \ void * workspaceAddr = nullptr; \ ACL_CHECK(aclnn##OP_NAME##GetWorkspaceSize(__VA_ARGS__, &workspaceSize, &executor)); \ /* workspace should alloced in main thread to keep malloc order when using vmm. */ \ if (workspaceSize > 0) { \ ggml_cann_pool_alloc workspace_allocator(CTX.pool(), workspaceSize); \ workspaceAddr = workspace_allocator.get(); \ } \ ACL_CHECK(aclnn##OP_NAME(workspaceAddr, workspaceSize, executor, CTX.stream())); \ } while (0) /** * @brief Performs sparse expert-based matrix multiplication using the CANN backend. * * @details This function implements a MoE-style batched matrix multiplication, where each input token * is routed to one or more experts, and each expert corresponds to a specific [D, M] weight matrix * in the source tensor `src0`. The routing indices are provided via the `ids` tensor. * * For each token (from `src1`), the function selects the corresponding expert(s) as specified by `ids`, * performs the matrix multiplication with the selected expert's weight submatrix (from `src0`), * and stores the results in `dst`. This operation is optimized and executed on the CANN backend. * * Dimensions: * - src0: [D, M, A, 1], where A is the number of experts * - src1: [D, B, N, 1], where N is batch size and B is the slot count per sample * - ids : [K, N], where K is the number of experts each token is routed to * - dst : [M, K, N, 1], output tensor storing the result of expert × token multiplication * * The function handles two main modes: * - If `ne12 == 1`, a simpler per-token loop is used. * - TODO: If `ne12 > 1`, grouped multiplication and memory copying is used for efficiency. * * @param ctx The CANN context used for operations. * @param dst The destination tensor where the expert-weighted token outputs are stored. * Expected to be of shape [M, K, N, 1]. */ void ggml_cann_mul_mat_id(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Check whether a tensor is a weight tensor for matrix multiplication. * * @details Checks whether the given tensor serves as weight parameters in matrix multiplication operations, * typically within neural network layers. The function maintains a static set of canonical weight * naming suffixes from Transformer-based architectures. Uses substring matching to identify weight * tensors even with hierarchical naming patterns. * * @param tensor Pointer to the target ggml_tensor object (const-qualified). */ static bool is_matmul_weight(const ggml_tensor * tensor) { std::string name = ggml_get_name(tensor); static const std::unordered_set weight_suffixes{ "output.weight", "attn_q.weight", "attn_k.weight", "attn_v.weight", "attn_output.weight", "ffn_gate.weight", "ffn_up.weight", "ffn_down.weight" }; for (const auto & suffix : weight_suffixes) { if (name.find(suffix) != std::string::npos) { return true; } } return false; } /** * @brief Applies a element-wise operation to two input tensors using the CANN * backend. * * This templated function takes a binary operator and applies it to two source * tensors * associated with the destination tensor. The function handles broadcasting as * needed. * * @tparam binary_op A callable object (e.g., lambda or function pointer) representing * the binary operation to be performed. It must take three arguments: * (ggml_backend_cann_context&, aclTensor*, aclTensor*, aclTensor*). * * @param ctx The CANN backend context used to manage execution and resources. * @param dst The destination tensor. */ template void ggml_cann_binary_op(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; acl_tensor_ptr acl_src0, acl_src1, acl_dst; // Need bcast bcast_shape(src0, src1, dst, acl_src0, acl_src1, acl_dst); binary_op(ctx, acl_src0.get(), acl_src1.get(), acl_dst.get()); } /** * @brief Applies a unary operation to an input tensor using the CANN backend. * * This templated function applies a unary operator to the source tensor of `dst` * and stores the result in the destination tensor. * * @tparam unary_op A callable with the signature: * void(ggml_backend_cann_context&, aclTensor *, aclTensor *) * where the first aclTensor is the source and the second is the destination. * @param ctx The CANN backend context for managing resources and execution. * @param dst The destination tensor. Its src[0] is treated as the input tensor. */ template void ggml_cann_op_unary(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; acl_tensor_ptr acl_src = ggml_cann_create_tensor(src); acl_tensor_ptr acl_dst = ggml_cann_create_tensor(dst); unary_op(ctx, acl_src.get(), acl_dst.get()); } /** * @brief Applies a unary operation to a ggml tensor using the CANN backend. * * @details This function applies a unary operation to the input tensor using * a user-provided lambda or callable `unary_op`. The lambda receives the * CANN backend context and two ACL tensors: the source and the destination. * * Internally, this function handles the conversion from GGML tensors to ACL tensors, * calls the provided unary op, and manages resource cleanup. The input is assumed * to be `dst->src[0]`, and the result is written to `dst`. * * This utility simplifies writing unary op wrappers by abstracting tensor preparation. * * @param unary_op A callable that performs the unary operation using CANN ACL APIs. * @param ctx The CANN context for operation execution. * @param dst The destination ggml_tensor where the result will be stored. * The input tensor is assumed to be `dst->src[0]`. * * @see GGML_CANN_CALL_OP_UNARY */ void ggml_cann_op_unary(std::function unary_op, ggml_backend_cann_context & ctx, ggml_tensor * dst); void ggml_cann_ssm_conv(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies a gated (GLU-style) unary operation using the CANN backend. * * @details This function performs a gated activation such as GEGLU or ReGLU. * It supports two input modes: * * 1. **Dual input mode**: `dst->src[0]` and `dst->src[1]` are both valid tensors. * These are used directly as the value and gate tensors. * * 2. **Packed input mode**: Only `dst->src[0]` is valid, and it is assumed to * contain a concatenation of value and gate along the first dimension. This tensor * will be split into two equal halves to form the value and gate inputs. * * The function applies a user-provided unary operation (e.g., GELU) to the value tensor, * then multiplies the result in-place with the gate tensor: * * @code * dst = unary_op(value) * gate; * @endcode * * The `swapped` parameter (from `dst->op_params[1]`) allows flipping the * order of value/gate in the packed input case. * * @param unary_op A callable that performs the unary operation using CANN ACL APIs. * It receives (ctx, acl_value_tensor, acl_output_tensor). * @param ctx The CANN context used for execution. * @param dst The destination ggml_tensor. Source tensors are in `dst->src[0]` and optionally `src[1]`. * * @see GGML_CANN_CALL_OP_UNARY_GATED */ void ggml_cann_op_unary_gated(std::function unary_op, ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Helper macro to call a unary ACL operator via ggml_cann_op_unary. * * This macro wraps the specified ACLNN unary operator name into a lambda expression, * and passes it to `ggml_cann_op_unary`, which handles the common logic for executing * unary ops in the CANN backend. * * Internally, this macro expands to a lambda like: * @code * [](ggml_backend_cann_context& ctx, aclTensor* acl_src, aclTensor* acl_dst) { * GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); * }; * @endcode * * This lambda is then passed to `ggml_cann_op_unary`, which applies the operation. * * @param OP_NAME The name of the ACL unary operator to invoke via GGML_CANN_CALL_ACLNN_OP. * * @see ggml_cann_op_unary * @see GGML_CANN_CALL_ACLNN_OP */ #define GGML_CANN_CALL_OP_UNARY(OP_NAME) \ do { \ auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { \ GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ }; \ ggml_cann_op_unary(lambda, ctx, dst); \ } while (0) /** * @brief Helper macro to call a gated unary ACL operator via ggml_cann_op_unary_gated. * * This macro wraps the specified ACLNN unary operator name into a lambda expression, * and passes it to `ggml_cann_op_unary_gated`, which handles the common logic for * executing gated unary ops in the CANN backend. * * Internally, this macro expands to a lambda like: * @code * [](ggml_backend_cann_context& ctx, aclTensor* acl_src, aclTensor* acl_dst) { * GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); * }; * @endcode * * This lambda is then passed to `ggml_cann_op_unary_gated`, which applies the operation. * * @param OP_NAME The name of the ACL unary operator to invoke via GGML_CANN_CALL_ACLNN_OP. * * @see ggml_cann_op_unary_gated * @see GGML_CANN_CALL_ACLNN_OP */ #define GGML_CANN_CALL_OP_UNARY_GATED(OP_NAME) \ do { \ auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { \ GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ }; \ ggml_cann_op_unary_gated(lambda, ctx, dst); \ } while (0) #endif // CANN_ACLNN_OPS /** * @brief Performs outer product operation on two ggml tensors using the CANN backend. * * @details This function computes the outer product of two input tensors (src0 and src1) * and stores the result in the destination tensor. The outer product operation is defined as: * dst[i,j,k,l] = sum_m (src0[i,m,k,l] * src1[j,m,k,l]) * * The function supports multiple data types including F32, F16. For floating-point * types, it uses batch matrix multiplication for efficient computation. * * The implementation handles 4D tensor broadcasting and batch processing automatically. * * @param ctx The CANN backend context for operation execution and memory management. * @param dst The destination ggml_tensor where the outer product result will be stored. * The input tensors are assumed to be `dst->src[0]` and `dst->src[1]`. * * @see GGML_CANN_CALL_ACLNN_OP for CANN operator invocation */ void ggml_cann_out_prod(ggml_backend_cann_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cann/common.h000066400000000000000000000547561512524704700205000ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef CANN_COMMON_H #define CANN_COMMON_H #include "../ggml-impl.h" #include "../include/ggml-cann.h" #include "../include/ggml.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MATRIX_ROW_PADDING 512 #define GGML_CANN_MAX_STREAMS 8 /** * @brief Handles CANN-related errors by printing an error message and * terminating the program. * @param stmt The statement that caused the error. * @param func The function in which the error occurred. * @param file The file in which the error occurred. * @param line The line number at which the error occurred. * @param msg The error message. */ [[noreturn]] void ggml_cann_error(const char * stmt, const char * func, const char * file, int line, const char * msg); /** * @brief Checks the result of a CANN function call and invokes the error * handler if the call fails. * @param stmt The CANN function call to check. * @param success The success code that indicates the call was successful. * @param error_fn The function to call to retrieve the error message. */ #define ACL_CHECK_GEN(stmt, success, error_fn) \ do { \ int err_code = (stmt); \ if (err_code != (success)) { \ ggml_cann_error(#stmt, __func__, __FILE__, __LINE__, error_fn()); \ } \ } while (0); #define ACL_CHECK(stmt) ACL_CHECK_GEN(stmt, 0, aclGetRecentErrMsg) /** * @brief Contains information about CANN devices. */ struct ggml_cann_device_info { /** * @brief Number of CANN devices available. */ int32_t device_count; /** * @brief Information about a single CANN device. */ struct cann_device_info { int cc; /**< Compute capability. */ size_t smpb; /**< Maximum shared memory per block. */ bool vmm; /**< Virtual memory support. */ size_t vmm_granularity; /**< Granularity of virtual memory. */ size_t total_vram; /**< Total video RAM available on the device. */ }; cann_device_info devices[GGML_CANN_MAX_DEVICES] = {}; /**< Array of CANN device information. */ }; const ggml_cann_device_info & ggml_cann_info(); void ggml_cann_set_device(int32_t device); int32_t ggml_cann_get_device(); std::optional get_env(const std::string & name); bool parse_bool(const std::string & value); int parse_integer(const std::string & value); /** * @brief Abstract base class for memory pools used by CANN. */ struct ggml_cann_pool { /** * @brief Virtual destructor for the memory pool. */ virtual ~ggml_cann_pool() = default; /** * @brief Allocates memory from the pool. * * @param size The size of the memory block to allocate. * @param actual_size Pointer to a variable where the actual allocated size * will be stored. * @return Pointer to the allocated memory block. */ virtual void * alloc(size_t size, size_t * actual_size) = 0; /** * @brief Frees a previously allocated memory block. * * @param ptr Pointer to the memory block to free. * @param size Size of the memory block to free. * @note Note that all CANN opertors are running async. Make sure memory is * still avaiable before this operator finished. */ virtual void free(void * ptr, size_t size) = 0; }; /** * @brief RAII wrapper for managing memory allocations from a CANN memory pool. */ struct ggml_cann_pool_alloc { ggml_cann_pool * pool = nullptr; /**< Pointer to the memory pool. */ void * ptr = nullptr; /**< Pointer to the allocated memory block. */ size_t actual_size = 0; /**< Actual size of the allocated memory block. */ /** * @brief Default constructor. */ ggml_cann_pool_alloc() = default; /** * @brief Constructor that initializes the memory pool. * @param pool Reference to the memory pool. */ explicit ggml_cann_pool_alloc(ggml_cann_pool & pool) : pool(&pool) {} /** * @brief Constructor that initializes the memory pool and allocates memory. * @param pool Reference to the memory pool. * @param size Size of the memory block to allocate. */ ggml_cann_pool_alloc(ggml_cann_pool & pool, size_t size) : pool(&pool) { alloc(size); } /** * @brief Destructor that frees the allocated memory block. */ ~ggml_cann_pool_alloc() { if (ptr != nullptr) { pool->free(ptr, actual_size); } } /** * @brief Allocates memory from the pool. * @param size Size of the memory block to allocate. * @return Pointer to the allocated memory block. */ void * alloc(size_t size) { GGML_ASSERT(pool != nullptr); GGML_ASSERT(ptr == nullptr); ptr = pool->alloc(size, &this->actual_size); return ptr; } /** * @brief Allocates memory from a specific memory pool. * @param pool Reference to the memory pool. * @param size Size of the memory block to allocate. * @return Pointer to the allocated memory block. */ void * alloc(ggml_cann_pool & pool, size_t size) { this->pool = &pool; return alloc(size); } /** * @brief Gets the pointer to the allocated memory block. * @return Pointer to the allocated memory block. */ void * get() { return ptr; } // Deleted copy constructor ggml_cann_pool_alloc(const ggml_cann_pool_alloc &) = delete; // Deleted move constructor ggml_cann_pool_alloc(ggml_cann_pool_alloc &&) = delete; // Deleted copy assignment operator ggml_cann_pool_alloc & operator=(const ggml_cann_pool_alloc &) = delete; // Deleted move assignment operator ggml_cann_pool_alloc & operator=(ggml_cann_pool_alloc &&) = delete; }; #ifdef USE_ACL_GRAPH struct ggml_graph_node_properties { // dst tensor void * node_address; int64_t ne[GGML_MAX_DIMS]; size_t nb[GGML_MAX_DIMS]; // src tensor void * src_address[GGML_MAX_SRC]; int64_t src_ne[GGML_MAX_SRC][GGML_MAX_DIMS]; size_t src_nb[GGML_MAX_SRC][GGML_MAX_DIMS]; // op ggml_op node_op; int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; /** * @brief Check if a ggml tensor node matches this property set. * * This function compares all relevant fields (address, op type, shape, source inputs, op params) * to determine whether the current node matches these previously recorded properties. * * @param node The current ggml tensor node. * @return true if all fields match (excluding GGML_OP_VIEW); false otherwise. */ bool has_matching_properties(ggml_tensor * node) { if (node->data != this->node_address && node->op != GGML_OP_VIEW) { return false; } if (node->op != this->node_op) { return false; } for (int i = 0; i < GGML_MAX_DIMS; i++) { if (node->ne[i] != this->ne[i]) { return false; } if (node->nb[i] != this->nb[i]) { return false; } } for (int i = 0; i < GGML_MAX_SRC; i++) { if (node->src[i]) { if (node->src[i]->data != this->src_address[i] && node->op != GGML_OP_VIEW) { return false; } for (int d = 0; d < GGML_MAX_DIMS; d++) { if (node->src[i]->ne[d] != this->src_ne[i][d]) { return false; } if (node->src[i]->nb[d] != this->src_nb[i][d]) { return false; } } } else { if (this->src_address[i] != nullptr) { return false; } } } if (node->op == GGML_OP_SCALE || node->op == GGML_OP_UNARY || node->op == GGML_OP_GLU) { return memcmp(this->op_params, node->op_params, GGML_MAX_OP_PARAMS) == 0; } return true; } }; struct ggml_cann_graph { ~ggml_cann_graph() { if (graph != nullptr) { ACL_CHECK(aclmdlRIDestroy(graph)); } } aclmdlRI graph = nullptr; std::vector ggml_graph_properties; /** * @brief Create a new CANN graph from a ggml computation graph. * * This function creates a new ggml_cann_graph object and fills its node properties * (operation type, dimensions, strides, input sources, and operation parameters) * based on the current ggml computation graph. * * Each node in the ggml graph is mapped to a property entry in the new CANN graph: * - node address * - operation type * - shape (ne) and strides (nb) * - source tensor addresses * - operation parameters * * @param cgraph The current ggml computation graph. * @return Pointer to the newly created ggml_cann_graph object. */ static ggml_cann_graph * create_from_cgraph(ggml_cgraph * cgraph) { ggml_cann_graph * new_graph = new ggml_cann_graph(); new_graph->ggml_graph_properties.resize(cgraph->n_nodes); for (int node_idx = 0; node_idx < cgraph->n_nodes; ++node_idx) { ggml_tensor * node = cgraph->nodes[node_idx]; auto & prop = new_graph->ggml_graph_properties[node_idx]; prop.node_address = node->data; prop.node_op = node->op; std::copy_n(node->ne, GGML_MAX_DIMS, prop.ne); std::copy_n(node->nb, GGML_MAX_DIMS, prop.nb); for (int src = 0; src < GGML_MAX_SRC; ++src) { if (node->src[src]) { prop.src_address[src] = node->src[src]->data; std::copy_n(node->src[src]->ne, GGML_MAX_DIMS, prop.src_ne[src]); std::copy_n(node->src[src]->nb, GGML_MAX_DIMS, prop.src_nb[src]); } else { prop.src_address[src] = nullptr; std::fill_n(prop.src_ne[src], GGML_MAX_DIMS, 0); std::fill_n(prop.src_nb[src], GGML_MAX_DIMS, 0); } } memcpy(prop.op_params, node->op_params, GGML_MAX_OP_PARAMS); } return new_graph; } /** * @brief Check whether this CANN graph matches the given ggml computation graph. * * This function compares the number of nodes and each node's properties * (operation type, dimensions, strides, inputs, and operation parameters) * to determine whether this CANN graph matches the given ggml graph. * * @param cgraph The current ggml computation graph. * @return true if this CANN graph matches the ggml graph; false otherwise. */ bool matches_cgraph(ggml_cgraph * cgraph) { if (this->ggml_graph_properties.size() != static_cast(cgraph->n_nodes)) { return false; } for (int i = 0; i < cgraph->n_nodes; ++i) { if (!this->ggml_graph_properties[i].has_matching_properties(cgraph->nodes[i])) { return false; } } return true; } }; /** * @brief LRU cache for managing ggml_cann_graph objects. * * This class maintains a list of shared_ptr to ggml_cann_graph objects * and enforces a maximum capacity. It provides methods to push new graphs, * move existing graphs to the front (most recently used), and clear the cache. */ struct ggml_cann_graph_lru_cache { size_t capacity; /**< Maximum number of graphs in the cache. */ std::list cache_list; /**< List storing cached graphs as raw pointers. */ ggml_cann_graph_lru_cache() { capacity = parse_integer(get_env("GGML_CANN_GRAPH_CACHE_CAPACITY").value_or("12")); } /** * @brief Push a new graph to the front of the cache. * If the cache exceeds capacity, the least recently used graph is deleted. * @param new_node Pointer to the new ggml_cann_graph to cache. * Ownership is transferred to the cache (cache will delete it). */ void push(ggml_cann_graph * new_node) { if (cache_list.size() >= capacity) { ggml_cann_graph * old = cache_list.back(); cache_list.pop_back(); delete old; // free the old graph } cache_list.push_front(new_node); } /** * @brief Clear all graphs from the cache (also frees memory). */ void clear() { for (auto ptr : cache_list) { delete ptr; } cache_list.clear(); } /** * @brief Destructor that clears the cache and frees all cached graphs. */ ~ggml_cann_graph_lru_cache() { clear(); } /** * @brief Find a cached CANN graph that matches the given ggml graph and move it to front. * * This function iterates through the cached CANN graphs stored in the LRU cache and * compares them against the given ggml computation graph. If a matching graph is found, * it is promoted to the front of the LRU cache and returned. Otherwise, the function * returns nullptr. * * @param cgraph The current ggml computation graph. * @return true if found; false otherwise. */ bool find_and_move_to_front(ggml_cgraph * cgraph) { for (auto & graph_ptr : this->cache_list) { if (graph_ptr->matches_cgraph(cgraph)) { cache_list.remove(graph_ptr); cache_list.push_front(graph_ptr); return true; } } return false; } }; #endif // USE_ACL_GRAPH struct ggml_cann_rope_cache { ~ggml_cann_rope_cache() { if (theta_scale_cache) { ACL_CHECK(aclrtFree(theta_scale_cache)); } if (sin_cache) { ACL_CHECK(aclrtFree(sin_cache)); } if (cos_cache) { ACL_CHECK(aclrtFree(cos_cache)); } if (position_select_index) { ACL_CHECK(aclrtFree(position_select_index)); } if (theta_scale_exp_host) { free(theta_scale_exp_host); } if (position_select_index_host) { free(position_select_index_host); } if (yarn_ramp_cache) { ACL_CHECK(aclrtFree(yarn_ramp_cache)); } } bool equal(int64_t theta_scale_length, int64_t position_length, float ext_factor, float theta_scale, float freq_scale, float attn_factor, bool is_neox, bool indep_sects, bool mrope_used, bool is_imrope, int sections[4]) { return this->theta_scale_length == theta_scale_length && this->position_length == position_length && this->ext_factor == ext_factor && this->theta_scale == theta_scale && this->freq_scale == freq_scale && this->attn_factor == attn_factor && this->is_neox == is_neox && this->indep_sects == indep_sects && this->mrope_used == mrope_used && this->is_imrope == is_imrope && this->sections[0] == sections[0] && this->sections[1] == sections[1] && this->sections[2] == sections[2] && this->sections[3] == sections[3]; } void set(int64_t theta_scale_length, int64_t position_length, float ext_factor, float theta_scale, float freq_scale, float attn_factor, bool is_neox, bool indep_sects, bool mrope_used, bool is_imrope, int sections[4]) { this->theta_scale_length = theta_scale_length; this->position_length = position_length; this->ext_factor = ext_factor; this->theta_scale = theta_scale; this->freq_scale = freq_scale; this->attn_factor = attn_factor; this->is_neox = is_neox; this->indep_sects = indep_sects; this->mrope_used = mrope_used; this->is_imrope = is_imrope; this->sections[0] = sections[0]; this->sections[1] = sections[1]; this->sections[2] = sections[2]; this->sections[3] = sections[3]; } // memory cache, prepare before inferencing. void * theta_scale_cache = nullptr; float * theta_scale_exp_host = nullptr; int * position_select_index_host = nullptr; void * position_select_index = nullptr; void * yarn_ramp_cache = nullptr; // sin/cos cache, used only to accelerate first layer on each device void * sin_cache = nullptr; void * cos_cache = nullptr; // Properties to check before reusing the sincos cache int64_t theta_scale_length = 0; int64_t position_length = 0; bool cached = false; float ext_factor = 0.0f; float theta_scale = 0.0f; float freq_scale = 0.0f; float attn_factor = 0.0f; bool is_neox = false; bool indep_sects = false; bool mrope_used = false; int sections[4] = { 0, 0, 0, 0 }; bool is_imrope = false; }; struct ggml_cann_tensor_cache { ~ggml_cann_tensor_cache() { if (cache != nullptr) { ACL_CHECK(aclrtFree(cache)); } } void * cache = nullptr; int64_t size = 0; }; /** * @brief Context for managing CANN backend operations. */ struct ggml_backend_cann_context { int32_t device; /**< Device ID. */ std::string name; /**< Name of the device. */ std::string description; /**< Description of the device. */ aclrtEvent copy_event = nullptr; /**< Event for managing copy operations. */ #ifdef USE_ACL_GRAPH /// Cached CANN ACL graph used for executing the current ggml computation graph. ggml_cann_graph_lru_cache graph_lru_cache; bool acl_graph_mode = true; #endif bool async_mode; // Rope Cache ggml_cann_rope_cache rope_cache; // Constant Pool ggml_cann_tensor_cache rms_norm_one_tensor_cache; ggml_cann_tensor_cache rms_norm_zero_tensor_cache; aclrtStream streams[GGML_CANN_MAX_STREAMS] = { nullptr }; /**< Array of streams for the device. */ /** * @brief Constructor for initializing the context with a given device. * @param device Device ID. */ explicit ggml_backend_cann_context(int device) : device(device), name("CANN" + std::to_string(device)) { ggml_cann_set_device(device); description = aclrtGetSocName(); #ifdef USE_ACL_GRAPH acl_graph_mode = parse_bool(get_env("GGML_CANN_ACL_GRAPH").value_or("on")); GGML_LOG_INFO("%s: device %d execution mode is %s (%s)\n", __func__, device, acl_graph_mode ? "GRAPH" : "EAGER", acl_graph_mode ? "acl graph enabled" : "acl graph disabled"); #endif } /** * @brief Destructor for cleaning up resources. */ ~ggml_backend_cann_context() { ggml_cann_set_device(device); if (copy_event != nullptr) { ACL_CHECK(aclrtDestroyEvent(copy_event)); } for (int i = 0; i < GGML_CANN_MAX_STREAMS; ++i) { if (streams[i] != nullptr) { ACL_CHECK(aclrtDestroyStream(streams[i])); } } } /** * @brief Get or create a stream for a given index. * @param stream Index of the stream. * @return The stream corresponding to the given index. */ aclrtStream stream(int stream) { if (streams[stream] == nullptr) { // If the device is not set here, destroying the stream later may cause a mismatch // between the thread contexts where the stream was created and destroyed. // However, I printed the device_id, thread_id, and stream, and they are all consistent. ACL_CHECK(aclrtSetDevice(device)); ACL_CHECK(aclrtCreateStream(&streams[stream])); } return streams[stream]; } /** * @brief Get or create the default stream (index 0). * @return The default stream. */ aclrtStream stream() { return stream(0); } // TODO: each stream should have a memory pool. std::unique_ptr mem_pool; /**< Memory pool for the device. */ /** * @brief Create a new memory pool for a given device. * @param device Device ID. * @return A unique pointer to the new memory pool. */ static std::unique_ptr new_pool_for_device(int device); /** * @brief Get or create the memory pool for the context. * @return Reference to the memory pool. */ ggml_cann_pool & pool() { if (mem_pool == nullptr) { mem_pool = new_pool_for_device(device); } return *mem_pool; } }; #endif // CANN_COMMON_H ggml-org-ggml-3678254/src/ggml-cann/ggml-cann.cpp000066400000000000000000003202641512524704700213740ustar00rootroot00000000000000/* * Copyright (c) 2023-2024 The ggml authors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "ggml-cann.h" #include "ggml-backend-impl.h" #include "ggml-cann/aclnn_ops.h" #include "ggml-cann/common.h" #include "ggml-impl.h" #include "ggml.h" #include #include #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #define GGML_CANN_NAME "CANN" /** * @brief Handles CANN errors by printing an error message and aborting. * * @param stmt The statement that caused the error. * @param func The function in which the error occurred. * @param file The file in which the error occurred. * @param line The line number where the error occurred. * @param msg The error message. */ [[noreturn]] void ggml_cann_error(const char * stmt, const char * func, const char * file, int line, const char * msg) { int32_t id = -1; aclrtGetDevice(&id); GGML_LOG_ERROR("CANN error: %s\n", msg); GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func, file, line); GGML_LOG_ERROR(" %s\n", stmt); // abort with GGML_ASSERT to get a stack trace GGML_ABORT("CANN error"); } // Thread-local variable to record the current device of this thread. thread_local int g_current_cann_device = -1; /** * @brief Set the CANN device to be used. * * @param device The target device ID to set. */ void ggml_cann_set_device(const int32_t device) { // int current_device = -1; // Note: In some CANN versions, if no device has been set yet, // aclrtGetDevice(¤t_device) may return 0 by default. // aclrtGetDevice(¤t_device); // If the current device is already the target one, no need to switch. if (device == g_current_cann_device) { return; } // Switch to the new device. ACL_CHECK(aclrtSetDevice(device)); // Update the global device record. g_current_cann_device = device; } /** * @brief Retrieves the current device ID. * * @return The current device ID. */ int32_t ggml_cann_get_device() { int32_t id; ACL_CHECK(aclrtGetDevice(&id)); return id; } /** * @brief Get the value of the specified environment variable (name). * if not empty, return a std::string object */ std::optional get_env(const std::string & name) { const char * val = std::getenv(name.c_str()); if (!val) { return std::nullopt; } std::string res = std::string(val); std::transform(res.begin(), res.end(), res.begin(), ::tolower); return res; } /** * @brief Verify whether the environment variable is a valid value. */ bool parse_bool(const std::string & value) { std::unordered_set valid_values = { "on", "1", "yes", "y", "enable", "true" }; return valid_values.find(value) != valid_values.end(); } /** * @brief Parse a string as an integer, returning 0 if invalid. * * This function attempts to convert the input string `value` to an `int`. * If the string is not a valid integer or is out of the `int` range, * it returns 0. * * @param value The string to parse. * @return The parsed integer, or 0 if conversion fails. */ int parse_integer(const std::string & value) { try { return std::stoi(value); } catch (...) { return 0; } } /** * @brief Initialize the CANN device information. * * This function initializes the CANN device information by obtaining the * device count and setting the memory allocation granularity for each device. * * @return A structure containing the device information. */ static ggml_cann_device_info ggml_cann_init() { ggml_cann_device_info info = {}; aclError err = aclrtGetDeviceCount((uint32_t *) &info.device_count); if (err != ACL_SUCCESS) { GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n", __func__, aclGetRecentErrMsg()); return info; } GGML_ASSERT(info.device_count <= GGML_CANN_MAX_DEVICES); for (int id = 0; id < info.device_count; ++id) { aclrtPhysicalMemProp prop = {}; prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; prop.memAttr = ACL_HBM_MEM_HUGE; prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; prop.location.id = id; prop.reserve = 0; err = aclrtMemGetAllocationGranularity(&prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED, &info.devices[id].vmm_granularity); info.devices[id].vmm = err == ACL_SUCCESS; size_t free, total; ggml_backend_cann_get_device_memory(id, &free, &total); info.devices[id].total_vram = free; } // TODO: add more device info later. return info; } /** * @brief Retrieve the CANN device information. * * This function returns a reference to a structure containing the CANN device * information. The device information is initialized once and reused on * subsequent calls. * * @return A reference to the structure containing the device information. */ const ggml_cann_device_info & ggml_cann_info() { static ggml_cann_device_info info = ggml_cann_init(); return info; } //#define DEBUG_CANN_MALLOC /** * @brief A pool of CANN buffers(priority segment buffer). * * This class manages a pool of CANN buffers for a specific device. */ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { /** * @brief The maximum reuse margin for a buffer. */ static const size_t max_reuse_margin = 1ull << 22; // 4MB /** * @brief The minimum free margin for a buffer. */ static const size_t min_free_margin = 1ull << 20; // 1MB /** * @brief The alignment for buffer allocation. */ static const size_t alignment = 128; /** * @brief The device ID associated with this buffer pool. */ int device; /** * @brief Whether to disable clean during buffer allocation. */ bool disable_clean = false; /** * @brief Structure representing a CANN buffer. */ struct ggml_cann_buffer { void * ptr = nullptr; ///< Pointer to the buffer. size_t size = 0; ///< Size of the buffer. std::chrono::steady_clock::time_point last_used; ///< Last used time. bool operator>(const ggml_cann_buffer & other) const { return size > other.size; } }; /** * @brief Array of CANN buffers in the pool. */ std::unordered_map buffer_pool; std::priority_queue, std::greater<>> free_buffers; /** * @brief Total size of all buffers in the pool. */ size_t pool_size = 0; /** * @brief Constructor to initialize the buffer pool for a specific device. * * @param device The device ID to associate with this buffer pool. */ explicit ggml_cann_pool_buf_prio(int device) : device(device) { disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or("")); } /** * @brief Destructor to free all buffers in the pool. */ ~ggml_cann_pool_buf_prio() { ggml_cann_set_device(device); for (auto & [b_ptr, b_size] : buffer_pool) { aclrtFree(b_ptr); pool_size -= b_size; } buffer_pool.clear(); GGML_ASSERT(pool_size == 0); } /** * @brief Allocate a buffer of the given size. * * @param size The size of the buffer to allocate. * @param actual_size A pointer to a variable to receive the actual size of * the allocated buffer. * @return A pointer to the allocated buffer. */ void * alloc(size_t size, size_t * actual_size) override { size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } void * ptr = nullptr; auto now = std::chrono::steady_clock::now(); std::vector free_buffers_rest; free_buffers_rest.reserve(free_buffers.size()); while (!free_buffers.empty()) { auto b = free_buffers.top(); free_buffers.pop(); if (b.size >= size) { // reuse the buffer if the size is enough const size_t margin = b.size - size; if (margin <= max_reuse_margin) { *actual_size = b.size; ptr = b.ptr; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: reused %p, " "pool_size = %5u MB, " "size = %5u MB, " "margin = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(size, 1048576) / 1048576), (uint32_t) (GGML_PAD(margin, 1048576) / 1048576)); #endif break; } } bool should_clean = !disable_clean && b.size > min_free_margin && std::chrono::duration_cast(now - b.last_used).count() > 100; if (should_clean) { // free the buffer if the size is needed to be freed ACL_CHECK(aclrtFree(b.ptr)); pool_size -= b.size; buffer_pool.erase(b.ptr); #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: clean %p, " "pool_size = %5u MB, " "size = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif continue; } free_buffers_rest.push_back(b); } for (ggml_cann_buffer & b : free_buffers_rest) { free_buffers.push(std::move(b)); } #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO("cann pool[%d] free pool_size = %5u MB\n\n", device, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif if (ptr != nullptr) { return ptr; } // allocate a new buffer if no buffer can be reused ggml_cann_set_device(device); ACL_CHECK(aclrtMalloc(&ptr, size, ACL_MEM_MALLOC_HUGE_FIRST)); *actual_size = size; pool_size += size; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: allocate %p, " "pool_size = %5u MB, " "size = %5u MB\n", device, ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(size, 1048576) / 1048576)); #endif buffer_pool.emplace(ptr, size); return ptr; } /** * @brief Free a buffer and return it to the pool. * * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ void free(void * ptr, size_t size) override { GGML_UNUSED(size); auto it = buffer_pool.find(ptr); if (it == buffer_pool.end()) { GGML_ABORT("cann pool[%d]: buffer %p not found in pool\n", device, ptr); } auto now = std::chrono::steady_clock::now(); free_buffers.emplace(ggml_cann_buffer{ ptr, it->second, now }); #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: return %p, " "pool_size = %5u MB\n", device, ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif } }; /** * @brief A pool of CANN buffers(segment buffer). * * This class manages a pool of CANN buffers for a specific device. */ struct ggml_cann_pool_buf : public ggml_cann_pool { /** * @brief The maximum reuse margin for a buffer. */ static const size_t max_reuse_margin = 1ull << 22; // 4MB /** * @brief The minimum free margin for a buffer. */ static const size_t min_free_margin = 1ull << 20; // 1MB /** * @brief The alignment for buffer allocation. */ static const size_t alignment = 128; /** * @brief The maximum number of buffers in the pool. */ static const int MAX_BUFFERS = 256; /** * @brief The device ID associated with this buffer pool. */ int device; /** * @brief Whether to disable clean during buffer allocation. */ bool disable_clean = false; /** * @brief Structure representing a CANN buffer. */ struct ggml_cann_buffer { void * ptr = nullptr; ///< Pointer to the buffer memory. size_t size = 0; ///< Size of the buffer. bool used = false; ///< Whether the buffer is currently in use. std::chrono::steady_clock::time_point last_used; ///< Last used time. }; /** * @brief Array of CANN buffers in the pool. */ ggml_cann_buffer buffer_pool[MAX_BUFFERS] = {}; /** * @brief Total size of all buffers in the pool. */ size_t pool_size = 0; /** * @brief Constructor to initialize the buffer pool for a specific device. * * @param device The device ID to associate with this buffer pool. */ explicit ggml_cann_pool_buf(int device) : device(device) { disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or("")); } /** * @brief Destructor to free all buffers in the pool. */ ~ggml_cann_pool_buf() { ggml_cann_set_device(device); for (int i = 0; i < MAX_BUFFERS; ++i) { ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr != nullptr) { aclrtFree(b.ptr); pool_size -= b.size; } } GGML_ASSERT(pool_size == 0); } /** * @brief Allocate a buffer of the given size. * * @param size The size of the buffer to allocate. * @param actual_size A pointer to a variable to receive the actual size of * the allocated buffer. * @return A pointer to the allocated buffer. */ void * alloc(size_t size, size_t * actual_size) override { size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } void * ptr = nullptr; auto now = std::chrono::steady_clock::now(); int i = 0; for (; i < MAX_BUFFERS; ++i) { ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr == nullptr) { break; } if (b.used) { continue; } if (b.size >= size) { // reuse the buffer if the size is enough const size_t margin = b.size - size; if (margin <= max_reuse_margin) { *actual_size = b.size; b.used = true; ptr = b.ptr; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: reused %p, " "pool_size = %5u MB, " "size = %5u MB, " "margin = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(size, 1048576) / 1048576), (uint32_t) (GGML_PAD(margin, 1048576) / 1048576)); #endif break; } } bool should_clean = !disable_clean && b.size > min_free_margin && std::chrono::duration_cast(now - b.last_used).count() > 100; if (should_clean) { // free the buffer if the size is needed to be freed ACL_CHECK(aclrtFree(b.ptr)); pool_size -= b.size; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: clean %p, " "pool_size = %5u MB, " "size = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif b.ptr = nullptr; } } if (ptr != nullptr) { return ptr; } if (i < MAX_BUFFERS) { // allocate a new buffer if no buffer can be reused ggml_cann_buffer & b = buffer_pool[i]; ggml_cann_set_device(device); ACL_CHECK(aclrtMalloc(&b.ptr, size, ACL_MEM_MALLOC_HUGE_FIRST)); pool_size += size; *actual_size = size; b.size = size; b.used = true; if (i >= MAX_BUFFERS - 8) { GGML_LOG_WARN("cann pool[%d]: slots almost full\n", device); } #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: allocate %p, " "pool_size = %5u MB, " "size = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif return b.ptr; } GGML_ABORT("cann pool[%d]: slots full\n", device); } /** * @brief Free a buffer and return it to the pool. * * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ void free(void * ptr, size_t size) override { GGML_UNUSED(size); for (int i = 0; i < MAX_BUFFERS; ++i) { ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr != ptr) { continue; } b.used = false; b.last_used = std::chrono::steady_clock::now(); #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: return %p, " "pool_size = %5u MB\n", device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif return; } GGML_ABORT("cann pool[%d]: slots full\n", device); } }; /** * @brief A pool of CANN buffers with virtual memory. * * This class manages a pool of CANN buffers with virtual memory for a specific * device. */ struct ggml_cann_pool_vmm : public ggml_cann_pool { /** * @brief The maximum size of the virtual memory pool (32 GB). */ size_t max_size; /** * @brief The device ID associated with this buffer pool. */ int device; /** * @brief Pointer to the start of the virtual memory pool. */ void * pool_addr = 0; /** * @brief Amount of virtual memory used in the pool. */ size_t pool_used = 0; /** * @brief Total size of the virtual memory pool. */ size_t pool_size = 0; /** * @brief Allocation granularity for the virtual memory pool. */ size_t granularity; /** * @brief Handles for the physical memory allocated. */ std::vector handles; /** * @brief Offsets for the mapped memory regions. */ std::vector map_offsets; /** * @brief Constructor to initialize the buffer pool with virtual memory for * a specific device. * * @param device The device ID to associate with this buffer pool. */ explicit ggml_cann_pool_vmm(int device) : device(device) { auto dev = ggml_cann_info().devices[device]; granularity = dev.vmm_granularity; max_size = dev.total_vram; } /** * @brief Destructor to free all buffers in the virtual memory pool. */ ~ggml_cann_pool_vmm() { if (pool_addr != 0) { for (auto & offset : map_offsets) { ACL_CHECK(aclrtUnmapMem(offset)); } for (auto & handle : handles) { ACL_CHECK(aclrtFreePhysical(handle)); } ACL_CHECK(aclrtReleaseMemAddress(pool_addr)); } } /** * @brief Allocate a buffer of the given size in the virtual memory pool. * * @param size The size of the buffer to allocate. * @param actual_size A pointer to a variable to receive the actual size of * the allocated buffer. * @return A pointer to the allocated buffer. */ void * alloc(size_t size, size_t * actual_size) override { // round up the allocation size to the alignment to ensure that all // allocations are aligned for all data types const size_t alignment = 128; size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } size_t avail = pool_size - pool_used; if (size > avail) { // round up to the next multiple of the granularity size_t reserve_size = size - avail; reserve_size = GGML_PAD(reserve_size, granularity); GGML_ASSERT(pool_size + reserve_size <= max_size); // allocate more physical memory aclrtPhysicalMemProp prop = {}; prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; prop.memAttr = ACL_HBM_MEM_HUGE; prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; prop.location.id = device; prop.reserve = 0; aclrtDrvMemHandle handle; ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0)); // reserve virtual address space (if not already reserved) if (pool_addr == 0) { ACL_CHECK(aclrtReserveMemAddress(&pool_addr, max_size, 0, NULL, 1)); } // map at the end of the pool ACL_CHECK(aclrtMapMem((char *) pool_addr + pool_size, reserve_size, 0, handle, 0)); handles.push_back(handle); map_offsets.push_back((char *) pool_addr + pool_size); // add to the pool pool_size += reserve_size; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n", device, (unsigned long long) (pool_size / 1024 / 1024), (unsigned long long) (reserve_size / 1024 / 1024)); #endif } GGML_ASSERT(pool_addr != 0); void * ptr = (void *) ((char *) pool_addr + pool_used); *actual_size = size; pool_used += size; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device, (unsigned long long) size, (unsigned long long) ptr); #endif return ptr; } /** * @brief Free a buffer and return it to the virtual memory pool. * * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ void free(void * ptr, size_t size) override { #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device, (unsigned long long) size, (unsigned long long) ptr); #endif pool_used -= size; // all deallocations must be in reverse order of the allocations GGML_ASSERT(ptr == (void *) ((char *) pool_addr + pool_used)); } }; /** * @brief Create a new CANN pool for a specific device. * * Factory method to create a new CANN pool object based on the device type. * * @param device The device ID for which to create the pool. * @return A unique pointer to the created CANN pool. */ std::unique_ptr ggml_backend_cann_context::new_pool_for_device(int device) { std::string mem_pool_type = get_env("GGML_CANN_MEM_POOL").value_or(""); if (mem_pool_type == "prio") { GGML_LOG_INFO("%s: device %d use buffer pool with priority queue\n", __func__, device); return std::unique_ptr(new ggml_cann_pool_buf_prio(device)); } if (ggml_cann_info().devices[device].vmm && mem_pool_type != "leg") { GGML_LOG_INFO("%s: device %d use vmm pool\n", __func__, device); return std::unique_ptr(new ggml_cann_pool_vmm(device)); } GGML_LOG_INFO("%s: device %d use buffer pool\n", __func__, device); return std::unique_ptr(new ggml_cann_pool_buf(device)); } // cann buffer /** * @brief Context for managing a CANN buffer associated with a specific device. * * This structure holds information about a CANN buffer, including the device * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID. */ struct ggml_backend_cann_buffer_context { int32_t device; ///< The device ID associated with this buffer context. void * dev_ptr = nullptr; ///< Pointer to the device memory allocated for the buffer. /** * @brief Constructor to initialize the CANN buffer context. * * @param device The device ID associated with this buffer context. * @param dev_ptr Pointer to the device memory allocated for the buffer. */ ggml_backend_cann_buffer_context(int32_t device, void * dev_ptr) : device(device), dev_ptr(dev_ptr) {} /** * @brief Destructor to free the device memory allocated for the buffer. */ ~ggml_backend_cann_buffer_context() { ACL_CHECK(aclrtFree(dev_ptr)); } }; /** * @brief Check if a buffer is a CANN buffer. * * This function checks if a given buffer is a CANN buffer by comparing its * `get_name` function pointer to `ggml_backend_cann_buffer_get_name`. * * @param buffer The buffer to check. * @return true if the buffer is a CANN buffer, false otherwise. */ static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft); static bool ggml_backend_buffer_is_cann(ggml_backend_buffer_t buffer) { return ggml_backend_buft_is_cann(buffer->buft); } /** * @brief Free resources associated with a CANN buffer. * * This function frees the resources associated with a CANN buffer, including * its context. * * @param buffer The CANN buffer to free. */ static void ggml_backend_cann_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; delete ctx; } /** * @brief Retrieve the base pointer of a CANN buffer. * * This function returns the base pointer of a CANN buffer, which points to the * device memory allocated for the buffer. * * @param buffer The CANN buffer whose base pointer is to be retrieved. * @return A pointer to the base of the device memory allocated for the buffer. */ static void * ggml_backend_cann_buffer_get_base(ggml_backend_buffer_t buffer) { ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; return ctx->dev_ptr; } /** * @brief Transform quantized Q4.0 tensor data into a format suitable for CANN * processing. * * This function transforms quantized Q4.0 tensor data into a format suitable * for CANN processing. It extracts quantization values and scales from the * source data and prepares them in a format expected by CANN operations. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source data in Q4.0 format. * @param dst Pointer to the destination buffer where transformed data will be * stored. */ static void ggml_backend_cann_transform_q4_0(ggml_tensor * tensor, const void * src, void * dst) { int64_t n_elems = ggml_nelements(tensor); int64_t groups = n_elems / QK4_0; size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; uint8_t * quant_offset = (uint8_t *) dst; uint16_t * scale_offset = (uint16_t *) ((char *) dst + quant_bytes); for (int i = 0; i < groups; i++) { const block_q4_0 * group = (const block_q4_0 *) ((const char *) src + i * sizeof(block_q4_0)); *scale_offset = group->d; scale_offset++; // 0-15 for (int j = 0; j < QK4_0 / 2; j += 2) { (*quant_offset) = (group->qs[j] & 0x0F); (*quant_offset) |= ((group->qs[j + 1] << 4)); quant_offset++; } // 16-31 for (int j = 0; j < QK4_0 / 2; j += 2) { (*quant_offset) = (group->qs[j] >> 4); (*quant_offset) |= (group->qs[j + 1] & 0xF0); quant_offset++; } } // put (uint4b_t -8) into int4b_t for (quant_offset = (uint8_t *) dst; quant_offset < (uint8_t *) dst + quant_bytes; quant_offset++) { (*quant_offset) ^= 0x88; } } /** * @brief Transform CANN processed data back into quantized Q4.0 format. * * This function transforms CANN processed data back into quantized Q4.0 format. * It reverses the transformation performed by * ggml_backend_cann_transform_q4_0(), converting the data back into its * original quantized form. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source buffer containing transformed data. * @param dst Pointer to the destination buffer where the Q4.0 formatted data * will be stored. */ static void ggml_backend_cann_transform_back_q4_0(const ggml_tensor * tensor, void * src, void * dst) { int64_t n_elems = ggml_nelements(tensor); int64_t groups = n_elems / QK4_0; size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; uint8_t * quant_offset = (uint8_t *) src; uint16_t * scale_offset = (uint16_t *) ((char *) src + quant_bytes); for (; quant_offset < (uint8_t *) src + quant_bytes; quant_offset++) { (*quant_offset) ^= 0x88; } quant_offset = (uint8_t *) src; for (int i = 0; i < groups; i++) { block_q4_0 * group = (block_q4_0 *) ((char *) dst + i * sizeof(block_q4_0)); group->d = *scale_offset; scale_offset++; // 0-15 for (int j = 0; j < QK4_0 / 2; j += 2) { group->qs[j] = ((*quant_offset) & 0x0F); group->qs[j + 1] = ((*quant_offset) >> 4); quant_offset++; } // 16-31 for (int j = 0; j < QK4_0 / 2; j += 2) { group->qs[j] |= ((*quant_offset) << 4); group->qs[j + 1] |= ((*quant_offset) & 0xF0); quant_offset++; } } } /** * @brief Transform quantized Q8.0 tensor data into a format suitable for CANN * processing. * * This function transforms quantized Q8.0 tensor data into a format suitable * for CANN processing. It extracts quantization values and scales from the * source data and prepares them in a format expected by CANN operations. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source data in Q8.0 format. * @param dst Pointer to the destination buffer where transformed data will be * stored. */ static void ggml_backend_cann_transform_q8_0(ggml_tensor * tensor, const void * src, void * dst) { int64_t n_elems = ggml_nelements(tensor); int64_t groups = n_elems / QK8_0; size_t quant_bytes = n_elems * sizeof(uint8_t); uint8_t * quant_offset = (uint8_t *) dst; uint16_t * scale_offset = (uint16_t *) ((char *) dst + quant_bytes); for (int i = 0; i < groups; i++) { const block_q8_0 * group = (const block_q8_0 *) ((const char *) src + i * sizeof(block_q8_0)); *scale_offset = group->d; scale_offset++; size_t group_quant_size = QK8_0 * sizeof(uint8_t); memcpy(quant_offset, group->qs, group_quant_size); quant_offset += group_quant_size; } } /** * @brief Transform CANN processed data back into quantized Q8.0 format. * * This function transforms CANN processed data back into quantized Q8.0 format. * It reverses the transformation performed by * ggml_backend_cann_transform_q8_0(), converting the data back into its * original quantized form. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source buffer containing transformed data. * @param dst Pointer to the destination buffer where the Q8.0 formatted data * will be stored. */ static void ggml_backend_cann_transform_back_q8_0(const ggml_tensor * tensor, const void * src, void * dst) { int64_t n_elems = ggml_nelements(tensor); int64_t groups = n_elems / QK8_0; size_t quant_bytes = n_elems * sizeof(uint8_t); const uint8_t * quant_offset = (const uint8_t *) src; const uint16_t * scale_offset = (const uint16_t *) ((const char *) src + quant_bytes); for (int i = 0; i < groups; i++) { block_q8_0 * group = (block_q8_0 *) ((char *) dst + i * sizeof(block_q8_0)); group->d = *scale_offset; scale_offset++; size_t group_quant_size = QK8_0 * sizeof(uint8_t); memcpy(group->qs, quant_offset, group_quant_size); quant_offset += group_quant_size; } } /** * @brief Transform tensor data based on its type for CANN processing. * * This function transforms tensor data based on its quantization type for CANN * processing. It dispatches the transformation based on the tensor's type to * specialized functions handling Q4.0 and Q8.0 formats. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source data to be transformed. * @param dst Pointer to the destination buffer where transformed data will be * stored. */ static void ggml_backend_cann_transform(ggml_tensor * tensor, const void * src, void * dst) { switch (tensor->type) { case GGML_TYPE_Q4_0: ggml_backend_cann_transform_q4_0(tensor, src, dst); break; case GGML_TYPE_Q8_0: ggml_backend_cann_transform_q8_0(tensor, src, dst); break; default: break; } } /** * @brief Transform CANN processed data back into tensor data based on its type. * * This function transforms CANN processed data back into tensor data based on * its quantization type for Q4.0 and Q8.0 formats. It dispatches the * transformation based on the tensor's type to specialized functions. * * @param tensor Pointer to the tensor information. * @param src Pointer to the source data containing CANN processed data. * @param dst Pointer to the destination buffer where transformed tensor data * will be stored. */ static void ggml_backend_cann_transform_back(const ggml_tensor * tensor, void * src, void * dst) { switch (tensor->type) { case GGML_TYPE_Q4_0: ggml_backend_cann_transform_back_q4_0(tensor, src, dst); break; case GGML_TYPE_Q8_0: ggml_backend_cann_transform_back_q8_0(tensor, src, dst); break; default: break; } } /** * @brief Check if transformation is needed for a given tensor type. * * This function checks if transformation is needed for a given tensor type * to prepare data for CANN processing. * * @param type The tensor type to check. * @return true if transformation is needed, false otherwise. */ static bool need_transform(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: return true; default: return false; } } /** * @brief Initialize a tensor using data from a CANN buffer. * * This function initializes a tensor using data from a CANN buffer. * It handles special cases such as views and quantization. * * @param buffer The CANN buffer from which to initialize the tensor. * @param tensor Pointer to the tensor to be initialized. */ static enum ggml_status ggml_backend_cann_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { if (tensor->view_src != NULL && tensor->view_offs == 0) { GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; } // TODO: cann backend doesn't support quantized yet. Just leave the code // here. if (ggml_is_quantized(tensor->type)) { // Initialize padding to 0 to avoid possible NaN values size_t original_size = ggml_nbytes(tensor); size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor); if (padded_size > original_size && tensor->view_src == nullptr) { size_t memset_size = padded_size - original_size; ACL_CHECK(aclrtMemset((char *) tensor->data + original_size, memset_size, 0, memset_size)); } } return GGML_STATUS_SUCCESS; } /** * @brief Workspace for caching NZ buffers per device. * * This struct manages a device buffer used in NZ computations. It supports * allocation, reallocation, and clearing of cached memory. The struct is * designed to be used with a global array, one per device. */ struct ggml_cann_nz_workspace { void * ptr; // Pointer to allocated device buffer size_t allocated; // Size of currently allocated buffer in bytes /** * @brief Constructor. Initializes the workspace with no allocated memory. */ ggml_cann_nz_workspace() : ptr(nullptr), allocated(0) {} /** * @brief Free cached memory and reset the workspace. * * If a buffer has been allocated, this function releases it using * aclrtFree and resets internal state. */ void clear() { if (ptr) { ACL_CHECK(aclrtFree(ptr)); ptr = nullptr; allocated = 0; } } /** * @brief Allocate or reallocate the workspace buffer. * * If the requested size is larger than the currently allocated size, * the old buffer will be freed and a new buffer of the requested size * will be allocated on the device. * * @param new_size Size in bytes to allocate for the workspace. */ void realloc(size_t new_size) { if (new_size > allocated) { clear(); ACL_CHECK(aclrtMalloc(&ptr, new_size, ACL_MEM_MALLOC_HUGE_FIRST)); allocated = new_size; } } /** * @brief Get the device buffer pointer. * * @return Pointer to the allocated buffer, or nullptr if not allocated. */ void * get() const { return ptr; } }; /** * @brief Global array of NZ workspaces, one per device. */ static ggml_cann_nz_workspace g_nz_workspaces[GGML_CANN_MAX_DEVICES]; /** * @brief Convert tensor weights to NZ format using Ascend CANN API. * * This function creates a transposed tensor descriptor and performs the * TransMatmulWeight operation. Converting tensor formats can significantly * improve performance on certain hardware. * * @param tensor Pointer to the input ggml_tensor containing the weights. * @param offset Byte offset within the tensor data buffer where weights start. * @param device device id. * * @note The workspace buffer used in this function is managed globally and reused * across calls. This reduces overhead from repeated memory allocation and deallocation. */ static void weight_format_to_nz(ggml_tensor * tensor, size_t offset, int device) { acl_tensor_ptr weightTransposed = ggml_cann_create_tensor(tensor, tensor->ne, tensor->nb, 2, ACL_FORMAT_ND, offset); uint64_t workspaceSize = 0; aclOpExecutor * executor; // TransMatmulWeight ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed.get(), &workspaceSize, &executor)); // Avoid frequent malloc/free of the workspace. g_nz_workspaces[device].realloc(workspaceSize); void * g_nz_workspace = g_nz_workspaces[device].get(); ACL_CHECK(aclnnTransMatmulWeight(g_nz_workspace, workspaceSize, executor, nullptr)); } // TODO: need handle tensor which has paddings. /** * @brief Set tensor data in a CANN buffer. * * This function sets tensor data in a CANN buffer, handling transformations * if needed based on the tensor's type. * * @param buffer The CANN buffer where the tensor data will be set. * @param tensor Pointer to the tensor whose data will be set. * @param data Pointer to the source data to be copied into the tensor. * @param offset Offset in the source data from where to start copying. * @param size Size of the data to be copied, in bytes. */ static void ggml_backend_cann_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); // TODO: refer to cann(#6017), it use thread's default stream. // For acl, synchronous functions use this default stream. // Why aclrtSynchronizeDevice? // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); if (!need_transform(tensor->type)) { ACL_CHECK(aclrtMemcpy((char *) tensor->data + offset, size, data, size, ACL_MEMCPY_HOST_TO_DEVICE)); if (weight_to_nz && is_matmul_weight((const ggml_tensor *) tensor)) { GGML_ASSERT(tensor->ne[2] == 1); GGML_ASSERT(tensor->ne[3] == 1); weight_format_to_nz(tensor, offset, ctx->device); } } else { void * transform_buffer = malloc(size); ggml_backend_cann_transform(tensor, data, transform_buffer); ACL_CHECK(aclrtMemcpy((char *) tensor->data + offset, size, transform_buffer, size, ACL_MEMCPY_HOST_TO_DEVICE)); free(transform_buffer); } } /** * @brief Get tensor data from a CANN buffer. * * This function retrieves tensor data from a CANN buffer, handling * transformations if needed based on the tensor's type. * * @param buffer The CANN buffer from which to retrieve tensor data. * @param tensor Pointer to the tensor whose data will be retrieved. * @param data Pointer to the destination buffer where the tensor data will be * copied. * @param offset Offset in the destination buffer where to start copying. * @param size Size of the data to be copied, in bytes. */ static void ggml_backend_cann_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); if (!need_transform(tensor->type)) { ACL_CHECK(aclrtMemcpy(data, size, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST)); } else { void * transform_buffer = malloc(size); ACL_CHECK(aclrtMemcpy(transform_buffer, size, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST)); ggml_backend_cann_transform_back(tensor, transform_buffer, data); free(transform_buffer); } } /** * @brief Copy tensor data between CANN buffers if possible. * * This function copies tensor data between CANN buffers if the source and * destination buffers are CANN buffers and they meet the necessary conditions * (same device or devices can access each other). * * @param buffer The destination CANN buffer where the tensor data will be * copied. * @param src Pointer to the source tensor whose data will be copied. * @param dst Pointer to the destination tensor where the data will be copied. * @return true if the copy operation succeeded, false otherwise. */ static bool ggml_backend_cann_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { if (ggml_backend_buffer_is_cann(src->buffer)) { ggml_backend_cann_buffer_context * src_ctx = (ggml_backend_cann_buffer_context *) src->buffer->context; ggml_backend_cann_buffer_context * dst_ctx = (ggml_backend_cann_buffer_context *) buffer->context; size_t memcpy_size = ggml_nbytes(src); // Same device. if (src_ctx->device == dst_ctx->device) { ACL_CHECK(aclrtMemcpy((char *) dst->data, memcpy_size, (const char *) src->data, memcpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE)); return true; } else { #ifdef ASCEND_310P // TODO: Support 310p P2P copy return false; #endif // Different device but can access by peer. int32_t canAccessPeer = 0; ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device, dst_ctx->device)); if (canAccessPeer) { ggml_cann_set_device(src_ctx->device); ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0)); ACL_CHECK(aclrtMemcpy((char *) dst->data, memcpy_size, (const char *) src->data, memcpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE)); return true; } } } return false; } /** * @brief Clear a CANN buffer by setting all its memory to a specified value. * * This function clears a CANN buffer by setting all its memory to a specified * value. * * @param buffer The CANN buffer to be cleared. * @param value The value to which each byte in the buffer will be set. */ static void ggml_backend_cann_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size)); } /** * @brief Interface for a CANN buffer in the backend. * * This structure defines function pointers to operations that can be performed * on a CANN buffer within the backend. */ static const ggml_backend_buffer_i ggml_backend_cann_buffer_interface = { /* .free_buffer = */ ggml_backend_cann_buffer_free_buffer, /* .get_base = */ ggml_backend_cann_buffer_get_base, /* .init_tensor = */ ggml_backend_cann_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_cann_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cann_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_cann_buffer_cpy_tensor, /* .clear = */ ggml_backend_cann_buffer_clear, /* .reset = */ NULL, }; // cann buffer type /** * @brief Structure representing context information for a specific backend * buffer type. */ struct ggml_backend_cann_buffer_type_context { int32_t device; /**< Device identifier associated with the buffer context. */ std::string name; /**< Name associated with the buffer context. */ }; /** * @brief Retrieves the name associated with a CANN buffer type. * * This function returns the descriptive name associated with the specified * CANN buffer type context. * * @param buft Pointer to the buffer type context. * @return Const pointer to the C-style string containing the name. */ static const char * ggml_backend_cann_buffer_type_name(ggml_backend_buffer_type_t buft) { ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; return buft_ctx->name.c_str(); } /** * @brief Allocates a new CANN buffer of the specified type and size. * * This function allocates a new CANN buffer on the specified device with the * given size. * * @param buft Pointer to the buffer type context. * @param size Size in bytes of the buffer to allocate. * @return Pointer to the allocated buffer, or nullptr if allocation fails. */ static ggml_backend_buffer_t ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; ggml_cann_set_device(buft_ctx->device); const size_t alignment = 128; size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } void * dev_ptr; aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST); if (err != ACL_SUCCESS) { GGML_LOG_ERROR("%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n", __func__, size / 1024.0 / 1024.0, buft_ctx->device, aclGetRecentErrMsg()); return nullptr; } ggml_backend_cann_buffer_context * ctx = new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr); return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface, ctx, size); } /** * @brief Retrieves the memory alignment requirement for CANN buffers of this * type. * * This function returns the alignment requirement in bytes for memory allocated * by the CANN buffer type. * * @param buft Pointer to the buffer type context (unused in this * implementation). * @return The alignment requirement in bytes (fixed at 128 bytes for CANN * buffers). */ static size_t ggml_backend_cann_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); } /** * @brief Calculates the allocation size required for a tensor in a CANN buffer. * * Computes the total allocation size needed for storing the tensor's data in a * CANN buffer, considering any necessary padding or adjustments for quantized * types. * * @param buft Pointer to the buffer type context (unused in this * implementation). * @param tensor Pointer to the tensor for which the allocation size is * calculated. * @return The total allocation size in bytes required for the tensor in the * CANN buffer. */ static size_t ggml_backend_cann_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { size_t size = ggml_nbytes(tensor); int64_t ne0 = tensor->ne[0]; // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); // last line must bigger than 32, because every single op deal at // least 32 bytes. // TODO: quantized type? // int64_t line_size = ne0 * ggml_element_size(tensor); // int64_t line_size_align_32 = (line_size + 31) & ~31; // size += (line_size_align_32 - line_size); if (ggml_is_quantized(tensor->type)) { if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } } else if (weight_to_nz && is_matmul_weight((const ggml_tensor *) tensor)) { // NZ format weight are not support quantized yet. // If ND tensor transform to NZ, size may changed. int64_t shape[] = { tensor->ne[1], tensor->ne[0] }; GGML_ASSERT(tensor->ne[2] == 1); GGML_ASSERT(tensor->ne[3] == 1); const aclIntArray * acl_shape = aclCreateIntArray(shape, 2); size_t new_size; ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(acl_shape, ggml_cann_type_mapping(tensor->type), &new_size)); ACL_CHECK(aclDestroyIntArray(acl_shape)); size = std::max(size, new_size); } return size; GGML_UNUSED(buft); } static bool ggml_backend_cann_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } /** * @brief Interface for managing CANN buffer types in the GGML backend. * * Provides function pointers for allocating, querying properties, and managing * memory for CANN buffer types in the GGML backend. */ static const ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface = { /* .get_name = */ ggml_backend_cann_buffer_type_name, /* .alloc_buffer = */ ggml_backend_cann_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cann_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cann_buffer_type_get_alloc_size, /* .is_host = */ ggml_backend_cann_buffer_type_is_host, }; /** * @brief Retrieves the CANN buffer type for a specified device. * * This function initializes and returns the buffer type interface associated * with the given device. It ensures thread-safe access using a mutex. * * @param device The device index for which to retrieve the buffer type. * @return A pointer to the buffer type interface for the specified device, or * nullptr if the device index is out of range. */ ggml_backend_buffer_type_t ggml_backend_cann_buffer_type(int32_t device) { static std::mutex mutex; std::lock_guard lock(mutex); if (device >= ggml_backend_cann_get_device_count()) { return nullptr; } static ggml_backend_buffer_type ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES]; static bool ggml_backend_cann_buffer_type_initialized = false; if (!ggml_backend_cann_buffer_type_initialized) { for (int32_t i = 0; i < ggml_cann_info().device_count; i++) { ggml_backend_cann_buffer_types[i] = { /* .iface = */ ggml_backend_cann_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i), /* .context = */ new ggml_backend_cann_buffer_type_context{ i, "CANN" + std::to_string(i) }, }; } ggml_backend_cann_buffer_type_initialized = true; } return &ggml_backend_cann_buffer_types[device]; } /** * @brief Retrieves the name associated with a CANN host buffer type. * * This function returns the descriptive name associated with the specified * CANN host buffer type context. * * @param buft Pointer to the host buffer type context. * @return Const pointer to the C-style string containing the name. */ static const char * ggml_backend_cann_host_buffer_type_name(ggml_backend_buffer_type_t buft) { return "CANN_Host"; GGML_UNUSED(buft); } /** * @brief Retrieves the name associated with a CANN host buffer. * * This function returns the descriptive name associated with the specified * CANN host buffer context. * * @param buft Pointer to the host buffer context. * @return Const pointer to the C-style string containing the name. */ static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buffer) { return "CANN_Host"; GGML_UNUSED(buffer); } /** * @brief Free resources associated with a CANN host buffer. * * This function frees the resources associated with a CANN host buffer, including * its context. * * @param buffer The CANN host buffer to free. */ static void ggml_backend_cann_host_buffer_free(ggml_backend_buffer_t buffer) { ACL_CHECK(aclrtFreeHost(buffer->context)); } /** * @brief Allocates a new CANN host buffer of the specified size. * * This function allocates a new CANN host buffer with the given size. * @param size Size in bytes of the host buffer to allocate. * @return Pointer to the allocated host buffer, or nullptr if allocation fails. */ static void * ggml_cann_host_malloc(size_t size) { if (getenv("GGML_CANN_NO_PINNED") != nullptr) { return nullptr; } const size_t alignment = 128; size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } void * hostPtr = nullptr; aclError err = aclrtMallocHost((void **) &hostPtr, size); if (err != ACL_SUCCESS) { GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, aclGetRecentErrMsg()); return nullptr; } return hostPtr; } /** * @brief Allocates a new CANN host buffer of the specified type and size. * * @param buft Pointer to the host buffer type context. * @param size Size in bytes of the host buffer to allocate. * @return Pointer to the allocated host buffer, or CPU buffer pointer if allocation fails. */ static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * hostPtr = ggml_cann_host_malloc(size); if (hostPtr == nullptr) { // fallback to cpu buffer return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(hostPtr, size); buffer->buft = buft; buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free; return buffer; } /** * @brief Interface for managing CANN host buffer types in the GGML backend. * * Provides function pointers for allocating, querying properties, and managing * memory for CANN buffer types in the GGML backend. */ ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() { static struct ggml_backend_buffer_type ggml_backend_cann_buffer_type_host = { /* .iface = */ { /* .get_name = */ ggml_backend_cann_host_buffer_type_name, /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0), /* .context = */ nullptr, }; return &ggml_backend_cann_buffer_type_host; } /** * @brief Computes the forward operation for a given tensor using CANN * operations. * * This function selects the appropriate CANN operation based on the type of * operation specified in the tensor and performs the computation. * * @param ctx The CANN context containing necessary resources and * configurations. * @param dst The destination tensor where the result of the computation will be * stored. * @return true if the computation was successful; false otherwise. */ static bool ggml_cann_compute_forward(ggml_backend_cann_context & ctx, struct ggml_tensor * dst) { switch (dst->op) { case GGML_OP_REPEAT: ggml_cann_repeat(ctx, dst); break; case GGML_OP_GET_ROWS: ggml_cann_get_rows(ctx, dst); break; case GGML_OP_SET_ROWS: ggml_cann_set_rows(ctx, dst); break; case GGML_OP_DUP: ggml_cann_dup(ctx, dst); break; case GGML_OP_ADD: case GGML_OP_ADD1: ggml_cann_binary_op(ctx, dst); break; case GGML_OP_SUB: ggml_cann_binary_op(ctx, dst); break; case GGML_OP_ACC: ggml_cann_acc(ctx, dst); break; case GGML_OP_MUL: ggml_cann_binary_op(ctx, dst); break; case GGML_OP_DIV: ggml_cann_binary_op(ctx, dst); break; case GGML_OP_UNARY: switch (ggml_get_unary_op(dst)) { case GGML_UNARY_OP_ABS: GGML_CANN_CALL_OP_UNARY(Abs); break; case GGML_UNARY_OP_NEG: GGML_CANN_CALL_OP_UNARY(Neg); break; case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_GELU_ERF: // aclnnGelu internally uses the erf-based approximation. GGML_CANN_CALL_OP_UNARY(Gelu); break; case GGML_UNARY_OP_SILU: GGML_CANN_CALL_OP_UNARY(Silu); break; case GGML_UNARY_OP_GELU_QUICK: { auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); }; ggml_cann_op_unary(lambda, ctx, dst); } break; case GGML_UNARY_OP_TANH: GGML_CANN_CALL_OP_UNARY(Tanh); break; case GGML_UNARY_OP_RELU: GGML_CANN_CALL_OP_UNARY(Relu); break; case GGML_UNARY_OP_SIGMOID: GGML_CANN_CALL_OP_UNARY(Sigmoid); break; case GGML_UNARY_OP_HARDSIGMOID: GGML_CANN_CALL_OP_UNARY(Hardsigmoid); break; case GGML_UNARY_OP_HARDSWISH: GGML_CANN_CALL_OP_UNARY(Hardswish); break; case GGML_UNARY_OP_EXP: GGML_CANN_CALL_OP_UNARY(Exp); break; case GGML_UNARY_OP_ELU: ggml_cann_elu(ctx, dst); break; case GGML_UNARY_OP_SGN: GGML_CANN_CALL_OP_UNARY(Sign); break; case GGML_UNARY_OP_STEP: ggml_cann_step(ctx, dst); break; default: return false; } break; case GGML_OP_GLU: switch (ggml_get_glu_op(dst)) { case GGML_GLU_OP_REGLU: GGML_CANN_CALL_OP_UNARY_GATED(Relu); break; case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_GEGLU_ERF: // aclnnGelu internally uses the erf-based approximation. GGML_CANN_CALL_OP_UNARY_GATED(Gelu); break; case GGML_GLU_OP_SWIGLU: GGML_CANN_CALL_OP_UNARY_GATED(Silu); break; case GGML_GLU_OP_GEGLU_QUICK: { auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); }; ggml_cann_op_unary_gated(lambda, ctx, dst); } break; default: return false; } break; case GGML_OP_NORM: ggml_cann_norm(ctx, dst); break; case GGML_OP_GROUP_NORM: ggml_cann_group_norm(ctx, dst); break; case GGML_OP_L2_NORM: ggml_cann_l2_norm(ctx, dst); break; case GGML_OP_CROSS_ENTROPY_LOSS: ggml_cann_cross_entropy_loss(ctx, dst); break; case GGML_OP_CONCAT: ggml_cann_concat(ctx, dst); break; case GGML_OP_UPSCALE: ggml_cann_upsample_nearest2d(ctx, dst); break; case GGML_OP_PAD: ggml_cann_pad(ctx, dst); break; case GGML_OP_ARANGE: ggml_cann_arange(ctx, dst); break; case GGML_OP_TIMESTEP_EMBEDDING: ggml_cann_timestep_embedding(ctx, dst); break; case GGML_OP_LEAKY_RELU: ggml_cann_leaky_relu(ctx, dst); break; case GGML_OP_RMS_NORM: ggml_cann_rms_norm(ctx, dst); break; case GGML_OP_MUL_MAT: ggml_cann_mul_mat(ctx, dst); break; case GGML_OP_MUL_MAT_ID: ggml_cann_mul_mat_id(ctx, dst); break; case GGML_OP_SCALE: ggml_cann_scale(ctx, dst); break; case GGML_OP_SQR: GGML_ASSERT(dst->src[1] == nullptr); dst->src[1] = dst->src[0]; ggml_cann_binary_op(ctx, dst); break; case GGML_OP_SQRT: GGML_CANN_CALL_OP_UNARY(Sqrt); break; case GGML_OP_CLAMP: ggml_cann_clamp(ctx, dst); break; case GGML_OP_CPY: ggml_cann_cpy(ctx, dst); break; case GGML_OP_CONT: ggml_cann_dup(ctx, dst); break; case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: break; case GGML_OP_DIAG_MASK_INF: ggml_cann_diag_mask(ctx, dst, -INFINITY); break; case GGML_OP_SOFT_MAX: ggml_cann_softmax(ctx, dst); break; case GGML_OP_ROPE: ggml_cann_rope(ctx, dst); break; case GGML_OP_IM2COL: ggml_cann_im2col(ctx, dst); break; case GGML_OP_POOL_2D: ggml_cann_pool2d(ctx, dst); break; case GGML_OP_SUM: ggml_cann_sum(ctx, dst); break; case GGML_OP_SUM_ROWS: ggml_cann_sum_rows(ctx, dst); break; case GGML_OP_ARGSORT: ggml_cann_argsort(ctx, dst); break; case GGML_OP_ARGMAX: ggml_cann_argmax(ctx, dst); break; case GGML_OP_COS: ggml_cann_op_unary(ctx, dst); break; case GGML_OP_SIN: ggml_cann_op_unary(ctx, dst); break; case GGML_OP_CONV_TRANSPOSE_1D: ggml_cann_conv_transpose_1d(ctx, dst); break; case GGML_OP_LOG: GGML_CANN_CALL_OP_UNARY(Log); break; case GGML_OP_MEAN: ggml_cann_mean(ctx, dst); break; case GGML_OP_PAD_REFLECT_1D: ggml_cann_pad_reflect_1d(ctx, dst); break; case GGML_OP_COUNT_EQUAL: ggml_cann_count_equal(ctx, dst); break; case GGML_OP_FLASH_ATTN_EXT: ggml_cann_flash_attn_ext(ctx, dst); break; case GGML_OP_OUT_PROD: ggml_cann_out_prod(ctx, dst); case GGML_OP_SSM_CONV: ggml_cann_ssm_conv(ctx, dst); break; default: return false; } return true; } // backend /** * @brief Retrieves the name associated with the CANN backend. * * This function returns the name assigned to the CANN backend, which is stored * in the context of the provided backend structure. * * @param backend Pointer to the CANN backend structure. * @return A pointer to a constant string representing the backend name. */ static const char * ggml_backend_cann_name(ggml_backend_t backend) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; return cann_ctx->name.c_str(); } /** * @brief Frees resources associated with the CANN backend. * * This function releases resources associated with the CANN backend context * and resets the device associated with the backend to its initial state. * * @param backend Pointer to the CANN backend structure to be freed. */ static void ggml_backend_cann_free(ggml_backend_t backend) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ACL_CHECK(aclrtSynchronizeDevice()); ACL_CHECK(aclrtResetDevice(cann_ctx->device)); delete cann_ctx; delete backend; } /** * @brief Sets tensor data asynchronously in the CANN backend. * * This function asynchronously sets tensor data in the CANN backend. * * @param backend Pointer to the CANN backend structure. * @param tensor Pointer to the tensor structure to set data for. * @param data Pointer to the host data to copy to the tensor. * @param offset Offset in bytes within the host data. * @param size Size of the data to copy in bytes. */ static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && "unsupported buffer type"); GGML_ASSERT(!ggml_is_quantized(tensor->type)); ACL_CHECK(aclrtMemcpyAsync((char *) tensor->data + offset, size, data, size, ACL_MEMCPY_HOST_TO_DEVICE, cann_ctx->stream())); } /** * @brief Gets tensor data asynchronously in the CANN backend. * * This function asynchronously gets tensor data in the CANN backend. * * @param backend Pointer to the CANN backend structure. * @param tensor Pointer to the tensor structure to get data from. * @param data Pointer to the host data to copy from the tensor. * @param offset Offset in bytes within the host data. * @param size Size of the data to copy in bytes. */ static void ggml_backend_cann_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && "unsupported buffer type"); GGML_ASSERT(!ggml_is_quantized(tensor->type)); ACL_CHECK(aclrtMemcpyAsync(data, size, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST, cann_ctx->stream())); } /** * @brief Asynchronously copies tensor data between CANN backends. * * This function copies tensor data asynchronously between two CANN backends. It * checks if both tensors reside in CANN buffers and whether the devices support * peer-to-peer access for direct copying. If not, it returns false. * * @param backend_src Pointer to the source CANN backend structure. * @param backend_dst Pointer to the destination CANN backend structure. * @param src Pointer to the source tensor to copy data from. * @param dst Pointer to the destination tensor to copy data to. * @return true if the copy operation succeeds, false otherwise. */ static bool ggml_backend_cann_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) { GGML_ASSERT(ggml_backend_is_cann(backend_src) || ggml_backend_is_cann(backend_dst)); GGML_ASSERT(!is_matmul_weight((const ggml_tensor *) src)); if (!ggml_backend_buffer_is_cann(src->buffer) || !ggml_backend_buffer_is_cann(dst->buffer)) { return false; } ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer; ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer; ggml_backend_cann_context * cann_ctx_src = (ggml_backend_cann_context *) backend_src->context; ggml_backend_cann_context * cann_ctx_dst = (ggml_backend_cann_context *) backend_dst->context; size_t copy_size = ggml_nbytes(dst); if (copy_size == 0) { return true; } if (backend_src != backend_dst) { #ifdef ASCEND_310P // TODO: Support 310p P2P copy return false; #endif ggml_backend_cann_buffer_context * buf_ctx_src = (ggml_backend_cann_buffer_context *) buf_src->context; ggml_backend_cann_buffer_context * buf_ctx_dst = (ggml_backend_cann_buffer_context *) buf_dst->context; GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device); GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device); int32_t canAccessPeer = 0; ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device, cann_ctx_dst->device)); if (!canAccessPeer) { return false; } // need open both directions for memcpyasync between devices. ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_src->device, 0)); ggml_cann_set_device(cann_ctx_src->device); ACL_CHECK(aclrtDeviceEnablePeerAccess(cann_ctx_dst->device, 0)); // wait for task_queue empty to keep task order. ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, cann_ctx_src->stream())); // record event on src stream after the copy // TODO: this event is not effective with acl graph mode, change to use aclrtSynchronizeStream // if (!cann_ctx_src->copy_event) { // ACL_CHECK(aclrtCreateEventWithFlag(&cann_ctx_src->copy_event, ACL_EVENT_SYNC)); // } // ACL_CHECK(aclrtRecordEvent(cann_ctx_src->copy_event, cann_ctx_src->stream())); // // wait on dst stream for the copy to complete // ggml_cann_set_device(cann_ctx_dst->device); // ACL_CHECK(aclrtStreamWaitEvent(cann_ctx_dst->stream(), cann_ctx_src->copy_event)); ACL_CHECK(aclrtSynchronizeStream(cann_ctx_src->stream())); } else { // src and dst are on the same backend ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, cann_ctx_dst->stream())); } return true; } /** * @brief Synchronizes a CANN backend. * * This function synchronizes the specified CANN backend by waiting for all * operations in its associated stream to complete. * * @param backend Pointer to the CANN backend structure to synchronize. */ static void ggml_backend_cann_synchronize(ggml_backend_t backend) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ggml_cann_set_device(cann_ctx->device); ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream())); } /** * @brief Evaluate the computation graph and optionally capture or execute it using CANN graph API. * * If CANN graph execution is enabled and graph capture is required, this function begins * graph capture, runs the graph, ends capture, and stores the captured graph. * * Otherwise, it falls back to op-by-op execution using the CANN compute kernel dispatcher. * * @param cann_ctx The CANN backend context. * @param cgraph The ggml computation graph. * @param use_cann_graph Whether to use CANN graph execution. * @param cann_graph_capture_required Whether graph capture is needed due to graph changes. */ static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph, bool use_cann_graph, bool cann_graph_capture_required) { #ifdef USE_ACL_GRAPH if (use_cann_graph && cann_graph_capture_required) { // Begin CANN graph capture ACL_CHECK(aclmdlRICaptureBegin(cann_ctx->stream(), ACL_MODEL_RI_CAPTURE_MODE_GLOBAL)); } #endif // USE_ACL_GRAPH // Only perform the graph execution if CANN graphs are not enabled, or we are capturing the graph. // With the use of CANN graphs, the execution will be performed by the graph launch. if (!use_cann_graph || cann_graph_capture_required) { for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } bool ok = ggml_cann_compute_forward(*cann_ctx, node); if (!ok) { GGML_LOG_ERROR("%s: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); } GGML_ASSERT(ok); } } #ifdef USE_ACL_GRAPH if (use_cann_graph) { GGML_ASSERT(!cann_ctx->graph_lru_cache.cache_list.empty()); ggml_cann_graph * matched_graph = cann_ctx->graph_lru_cache.cache_list.front(); if (cann_graph_capture_required) { // End CANN graph capture ACL_CHECK(aclmdlRICaptureEnd(cann_ctx->stream(), &matched_graph->graph)); } // Execute CANN graph ACL_CHECK(aclmdlRIExecuteAsync(matched_graph->graph, cann_ctx->stream())); } #endif // USE_ACL_GRAPH } /** * @brief Computes a computational graph using a CANN backend. * * This function computes the operations defined in the computational graph * using the specified CANN backend. * * @param backend Pointer to the CANN backend structure to use for computation. * @param cgraph Pointer to the computational graph structure containing nodes * representing operations to be computed. * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation * completes successfully, otherwise an appropriate error status. */ static enum ggml_status ggml_backend_cann_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ggml_cann_set_device(cann_ctx->device); g_nz_workspaces[cann_ctx->device].clear(); // calculate rope cache for fist layer in current device. cann_ctx->rope_cache.cached = false; bool graph_capture_required = false; #ifdef USE_ACL_GRAPH bool use_cann_graph = true; static bool prefill_use_graph = parse_bool(get_env("GGML_CANN_PREFILL_USE_GRAPH").value_or("")); if (!prefill_use_graph) { // Do not use acl_graph for prefill. for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; // TODO: Optimize here. Currently, we can only // get seq_len by FA's input. if (node->op == GGML_OP_FLASH_ATTN_EXT) { // Q -> src[0], shape: [B, S, N, D] use_cann_graph = (node->src[0]->ne[1] == 1); break; } } } if (!cann_ctx->acl_graph_mode) { use_cann_graph = false; } if (use_cann_graph) { // If no matching graph is found, the graph needs to be recaptured. graph_capture_required = !cann_ctx->graph_lru_cache.find_and_move_to_front(cgraph); if (graph_capture_required) { // If no matching graph is found, add a new ACL graph. ggml_cann_graph * new_graph = ggml_cann_graph::create_from_cgraph(cgraph); cann_ctx->graph_lru_cache.push(new_graph); } } #else bool use_cann_graph = false; #endif // USE_ACL_GRAPH evaluate_and_capture_cann_graph(cann_ctx, cgraph, use_cann_graph, graph_capture_required); return GGML_STATUS_SUCCESS; } /** * @brief Checks if the CANN backend supports a specific operation. * * This function checks whether the specified operation is supported by the * CANN backend. * * @param backend Pointer to the CANN backend structure to check support for * the operation. * @param op Pointer to the tensor representing the operation to check. * @return bool Returns true if the operation is supported by the backend, * otherwise false. */ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_STEP: case GGML_UNARY_OP_GELU_ERF: return true; default: return false; } case GGML_OP_GLU: switch (ggml_get_glu_op(op)) { case GGML_GLU_OP_REGLU: case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_SWIGLU: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: return true; default: return false; } break; case GGML_OP_MUL_MAT: { switch (op->src[0]->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return true; case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: #ifdef ASCEND_310P // Q4 && Q8 per group is not support on 310p device return false; #endif // only support contiguous for quantized types. return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); default: return false; } } case GGML_OP_MUL_MAT_ID: switch (op->src[0]->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return true; case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: #ifdef ASCEND_310P // Q4 && Q8 per group is not support on 310p device return false; #endif // only support contiguous for quantized types. return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); default: return false; } // embedding case GGML_OP_GET_ROWS: { switch (op->src[0]->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_Q8_0: return true; default: return false; } } break; case GGML_OP_SET_ROWS: { switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: return true; default: return false; } } break; case GGML_OP_CPY: { ggml_tensor * src = op->src[0]; if ((op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_F16) || (src->type != GGML_TYPE_F32 && src->type != GGML_TYPE_F16)) { // only support F32 and F16. return false; } return true; } break; case GGML_OP_CONT: { // TODO: support GGML_TYPE_BF16 switch (op->src[0]->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: return true; default: return false; } } case GGML_OP_ROPE: { if (op->src[0]->ne[0] > 896) { return false; } #ifdef ASCEND_310P // TODO: Support rope_dim < ne00(dim) if (op->src[0]->ne[0] != op->op_params[1]) { return false; } if (!ggml_is_contiguous(op->src[0])) { return false; } #endif return true; } case GGML_OP_UPSCALE: { // aclnnUpsampleNearest2dGetWorkspaceSize not support // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) { return false; } if (op->op_params[0] != GGML_SCALE_MODE_NEAREST) { return false; } if (op->op_params[0] & GGML_SCALE_FLAG_ANTIALIAS) { return false; } return true; } case GGML_OP_POOL_2D: { const int32_t * opts = (const int32_t *) op->op_params; #ifdef ASCEND_310P enum ggml_op_pool opt = static_cast(opts[0]); if (opt == GGML_OP_POOL_MAX) { return false; } #endif const int k0 = opts[1]; const int k1 = opts[2]; const int p0 = opts[5]; const int p1 = opts[6]; // value of paddingH should be at most half of kernelH // value of paddingW should be at most half of kernelW return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2)); } case GGML_OP_SUM: return ggml_is_contiguous_rows(op->src[0]); case GGML_OP_L2_NORM: case GGML_OP_CROSS_ENTROPY_LOSS: case GGML_OP_DUP: case GGML_OP_IM2COL: case GGML_OP_CONCAT: case GGML_OP_REPEAT: case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: case GGML_OP_NORM: case GGML_OP_ADD: case GGML_OP_ADD1: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_RMS_NORM: case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_CLAMP: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SUM_ROWS: case GGML_OP_ARGSORT: case GGML_OP_ACC: case GGML_OP_GROUP_NORM: return true; case GGML_OP_PAD: // TODO: add circular padding support for cann, see https://github.com/ggml-org/llama.cpp/pull/16985 return ggml_get_op_params_i32(op, 8) == 0; case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: case GGML_OP_ARGMAX: case GGML_OP_COS: case GGML_OP_SIN: case GGML_OP_LOG: case GGML_OP_MEAN: case GGML_OP_PAD_REFLECT_1D: case GGML_OP_COUNT_EQUAL: return true; case GGML_OP_OUT_PROD: { #ifdef ASCEND_310P // Ger is not supported on 310p device return false; #endif switch (op->src[0]->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return true; default: return false; } } case GGML_OP_CONV_TRANSPOSE_1D: return true; case GGML_OP_SCALE: float bias; memcpy(&bias, (const float *) (op->op_params) + 1, sizeof(float)); return bias == 0.0f; // TODO: support bias != 0.0f case GGML_OP_SOFT_MAX: // TODO: support attention sinks [TAG_ATTN_SINKS] if (op->src[2]) { return false; } return true; case GGML_OP_FLASH_ATTN_EXT: { #ifdef ASCEND_310P // FA not support on 310p device return false; #endif // derived from [ggml-cuda.cu] if (op->src[1]->type != GGML_TYPE_F16 || op->src[2]->type != GGML_TYPE_F16) { return false; } if (op->src[1]->type != GGML_TYPE_F16 && op->src[1]->type != GGML_TYPE_F32 && op->src[1]->type != GGML_TYPE_BF16) { return false; } if (op->type != GGML_TYPE_F16 && op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_BF16) { return false; } // TODO: support attention sinks [TAG_ATTN_SINKS] if (op->src[4]) { return false; } if (op->src[1]->ne[0] != op->src[2]->ne[0]) { // different head sizes of K and V are not supported yet return false; } if (op->src[0]->ne[0] % 16 != 0) { // TODO: padding to support return false; } float logitSoftcap = 0.0f; memcpy(&logitSoftcap, (const float *) (op->op_params) + 2, sizeof(float)); if (logitSoftcap != 0.0f) { return false; } return true; } case GGML_OP_SSM_CONV: return true; default: return false; } GGML_UNUSED(dev); } /** * @brief Checks if the backend buffer type is associated with the CANN backend. * * This function checks whether the provided backend buffer type is associated * with the CANN backend based on the comparison of its name retrieval function * pointer. * * @param buft Pointer to the backend buffer type to check. * @return bool Returns true if the buffer type is associated with the CANN * backend, otherwise false. */ static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) { return buft->iface.get_name == ggml_backend_cann_buffer_type_name; } /** * @brief Determines if a tensor operation should be offloaded to the CANN * backend. * * This function checks if a given tensor operation should be offloaded to the * CANN backend based on the operation type and the size of the tensor. It * returns true if the second dimension (ne[1]) of the tensor is greater than or * equal to the minimum batch size and the operation is not GGML_OP_GET_ROWS. * * @param backend Pointer to the CANN backend. * @param op Pointer to the tensor operation to check. * @return bool Returns true if the operation should be offloaded, otherwise * false. */ static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { const int min_batch_size = 32; GGML_UNUSED(dev); return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS; } /** * @brief Records an event on the CANN backend stream. * * This function records the given event on the ACL runtime stream associated * with the backend context. * * @param event Pointer to the event structure to be recorded. */ static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_event_t event) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ACL_CHECK(aclrtRecordEvent((aclrtEvent) event->context, cann_ctx->stream())); } /** * @brief Waits for a recorded event to complete on the CANN backend stream. * * This function makes the given backend wait for the event to complete on its * ACL runtime stream. * * @param backend Pointer to the backend structure. * @param event Pointer to the event structure that the backend needs to wait * for. */ static void ggml_backend_cann_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; if (ggml_backend_is_cann(backend)) { ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(), (aclrtEvent) event->context)); } else { GGML_ABORT("fatal error"); } } /** * @brief Structure defining the interface for the CANN backend. * * This structure contains function pointers for various operations * supported by the CANN backend, including name retrieval, memory * management, tensor operations, synchronization, and event handling. */ static const ggml_backend_i ggml_backend_cann_interface = { /* .get_name = */ ggml_backend_cann_name, /* .free = */ ggml_backend_cann_free, /* .set_tensor_async = */ ggml_backend_cann_set_tensor_async, /* .get_tensor_async = */ ggml_backend_cann_get_tensor_async, /* .cpy_tensor_async = */ ggml_backend_cann_cpy_tensor_async, /* .synchronize = */ ggml_backend_cann_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_cann_graph_compute, /* .event_record = */ ggml_backend_cann_event_record, /* .event_wait = */ ggml_backend_cann_event_wait, /* .graph_optimize = */ NULL, }; /** * @brief Return the hardcoded GUID for the CANN backend. * * This function returns a static GUID which uniquely identifies the CANN * backend. * * @return A pointer to the static GUID. */ static ggml_guid_t ggml_backend_cann_guid() { static ggml_guid guid = { 0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34, 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64 }; return &guid; } // backend device struct ggml_backend_cann_device_context { int device; std::string name; std::string description; }; static const char * ggml_backend_cann_device_get_name(ggml_backend_dev_t dev) { ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ctx->name.c_str(); } static const char * ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) { ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ctx->description.c_str(); } static void ggml_backend_cann_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; ggml_backend_cann_get_device_memory(ctx->device, free, total); } static enum ggml_backend_dev_type ggml_backend_cann_device_get_type(ggml_backend_dev_t dev) { GGML_UNUSED(dev); return GGML_BACKEND_DEVICE_TYPE_GPU; } static void ggml_backend_cann_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_cann_device_get_name(dev); props->description = ggml_backend_cann_device_get_description(dev); props->type = ggml_backend_cann_device_get_type(dev); ggml_backend_cann_device_get_memory(dev, &props->memory_free, &props->memory_total); bool host_buffer = getenv("GGML_CANN_NO_PINNED") == nullptr; props->caps = { /* .async = */ false, /* .host_buffer = */ host_buffer, /* .buffer_from_host_ptr = */ false, /* .events = */ true, }; } static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, const char * params) { GGML_UNUSED(params); ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ggml_backend_cann_init(ctx->device); } /** * @brief Checks if the CANN backend supports a specific backend buffer type. * * This function determines whether the CANN backend supports the given backend * buffer type by comparing the device context of the backend and buffer type. * It returns true if the devices are same between the backend context and * buffer type context. * * @param backend Pointer to the CANN backend. * @param buft Pointer to the backend buffer type to check. * @return bool Returns true if the CANN backend supports the buffer type, * otherwise false. */ static bool ggml_backend_cann_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { if (ggml_backend_buft_is_cann(buft)) { ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *) dev->context; ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; return buft_ctx->device == dev_ctx->device; } return false; } static ggml_backend_buffer_type_t ggml_backend_cann_device_get_buffer_type(ggml_backend_dev_t dev) { ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ggml_backend_cann_buffer_type(ctx->device); } static ggml_backend_buffer_type_t ggml_backend_cann_device_get_host_buffer_type(ggml_backend_dev_t dev) { GGML_UNUSED(dev); return ggml_backend_cann_host_buffer_type(); } /** * @brief Creates a new event for the CANN backend device. * * This function initializes a new event for the CANN backend by setting the * device and creating an ACL runtime event. The created event is then wrapped * in a ggml_backend_event structure and returned. * * @param backend Pointer to the CANN backend. * @return ggml_backend_event_t Returns a pointer to the new event structure. */ static ggml_backend_event_t ggml_backend_cann_device_event_new(ggml_backend_dev_t dev) { ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *) dev->context; ggml_cann_set_device(dev_ctx->device); aclrtEvent event; ACL_CHECK(aclrtCreateEvent(&event)); return new ggml_backend_event{ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), dev_ctx->device), /* .context = */ event, }; } /** * @brief Frees a CANN backend event. * * This function destroys the ACL runtime event associated with the given CANN * backend event and then deletes the event structure itself. * * @param event Pointer to the event structure to be freed. */ static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) { ACL_CHECK(aclrtDestroyEvent((aclrtEvent) event->context)); delete event; GGML_UNUSED(dev); } /** * @brief Synchronizes the given event on the CANN backend. * * This function waits for the specified event to complete on the ACL runtime. * * @param event Pointer to the event structure to be synchronized. */ static void ggml_backend_cann_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) { ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent) event->context)); GGML_UNUSED(dev); } static const ggml_backend_device_i ggml_backend_cann_device_interface = { /* .get_name = */ ggml_backend_cann_device_get_name, /* .get_description = */ ggml_backend_cann_device_get_description, /* .get_memory = */ ggml_backend_cann_device_get_memory, /* .get_type = */ ggml_backend_cann_device_get_type, /* .get_props = */ ggml_backend_cann_device_get_props, /* .init_backend = */ ggml_backend_cann_device_init, // called for every card /* .get_buffer_type = */ ggml_backend_cann_device_get_buffer_type, /* .get_host_buffer_type = */ ggml_backend_cann_device_get_host_buffer_type, /* .buffer_from_host_ptr = */ NULL, // not supported for CANN /* .supports_op = */ ggml_backend_cann_supports_op, /* .supports_buft = */ ggml_backend_cann_supports_buft, /* .offload_op = */ ggml_backend_cann_offload_op, /* .event_new = */ ggml_backend_cann_device_event_new, /* .event_free = */ ggml_backend_cann_device_event_free, /* .event_synchronize = */ ggml_backend_cann_device_event_synchronize, }; // backend reg struct ggml_backend_cann_reg_context { std::vector devices; }; static const char * ggml_backend_cann_reg_get_name(ggml_backend_reg_t reg) { GGML_UNUSED(reg); return GGML_CANN_NAME; } static size_t ggml_backend_cann_reg_get_device_count(ggml_backend_reg_t reg) { ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *) reg->context; return ctx->devices.size(); } static ggml_backend_dev_t ggml_backend_cann_reg_get_device(ggml_backend_reg_t reg, size_t index) { ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *) reg->context; GGML_ASSERT(index < ctx->devices.size()); return ctx->devices[index]; } static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { GGML_UNUSED(reg); GGML_UNUSED(name); // reserved for future use return nullptr; } static const ggml_backend_reg_i ggml_backend_cann_reg_interface = { /* .get_name = */ ggml_backend_cann_reg_get_name, /* .get_device_count = */ ggml_backend_cann_reg_get_device_count, /* .get_device = */ ggml_backend_cann_reg_get_device, /* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address, }; // backend registry, called only once for cann backend ggml_backend_reg_t ggml_backend_cann_reg() { static ggml_backend_reg reg; static bool initialized = false; { static std::mutex mutex; std::lock_guard lock(mutex); if (!initialized) { aclInit(nullptr); ggml_backend_cann_reg_context * ctx = new ggml_backend_cann_reg_context; for (int i = 0; i < ggml_cann_info().device_count; i++) { ggml_backend_cann_device_context * dev_ctx = new ggml_backend_cann_device_context(); dev_ctx->description = aclrtGetSocName(); dev_ctx->device = i; dev_ctx->name = GGML_CANN_NAME + std::to_string(i); ggml_cann_set_device(i); ggml_backend_dev_t dev = new ggml_backend_device{ /* .iface = */ ggml_backend_cann_device_interface, /* .reg = */ ®, /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } reg = ggml_backend_reg{ /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_cann_reg_interface, /* .context = */ ctx }; } initialized = true; } return ® } ggml_backend_t ggml_backend_cann_init(int32_t device) { aclInit(nullptr); if (device < 0 || device >= ggml_backend_cann_get_device_count()) { GGML_LOG_ERROR("%s: error: invalid device %d\n", __func__, device); return nullptr; } ggml_backend_cann_context * ctx = new ggml_backend_cann_context(device); if (ctx == nullptr) { GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__); return nullptr; } ggml_cann_set_device(ctx->device); ggml_backend_t cann_backend = new ggml_backend{ /* .guid = */ ggml_backend_cann_guid(), /* .interface = */ ggml_backend_cann_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device), /* .context = */ ctx }; return cann_backend; } bool ggml_backend_is_cann(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_cann_guid()); } int32_t ggml_backend_cann_get_device_count() { return ggml_cann_info().device_count; } void ggml_backend_cann_get_device_description(int32_t device, char * description, size_t description_size) { ggml_cann_set_device(device); const char * soc_name = aclrtGetSocName(); snprintf(description, description_size, "%s", soc_name); } void ggml_backend_cann_get_device_memory(int32_t device, size_t * free, size_t * total) { ggml_cann_set_device(device); ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total)); } GGML_BACKEND_DL_IMPL(ggml_backend_cann_reg) ggml-org-ggml-3678254/src/ggml-common.h000066400000000000000000004056061512524704700175530ustar00rootroot00000000000000#ifndef GGML_COMMON_DECL #if defined(GGML_COMMON_DECL_C) #include typedef uint16_t ggml_half; typedef uint32_t ggml_half2; #define GGML_COMMON_AGGR_U #define GGML_COMMON_AGGR_S #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_CPP) #include typedef uint16_t ggml_half; typedef uint32_t ggml_half2; // std-c++ allow anonymous unions but some compiler warn on it #define GGML_COMMON_AGGR_U data // std-c++ do not allow it. #define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_METAL) #include typedef half ggml_half; typedef half2 ggml_half2; #define GGML_COMMON_AGGR_U #define GGML_COMMON_AGGR_S #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_CUDA) #if defined(GGML_COMMON_DECL_MUSA) #include #else #include #endif #include typedef half ggml_half; typedef half2 ggml_half2; #define GGML_COMMON_AGGR_U #define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_HIP) #include #include typedef half ggml_half; typedef half2 ggml_half2; #define GGML_COMMON_AGGR_U #define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_SYCL) #include #include typedef sycl::half ggml_half; typedef sycl::half2 ggml_half2; #define GGML_COMMON_AGGR_U #define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #endif #if defined(GGML_COMMON_DECL) #ifndef __cplusplus #ifndef static_assert #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) #define static_assert(cond, msg) _Static_assert(cond, msg) #else #define static_assert(cond, msg) struct global_scope_noop_trick #endif #endif #endif // __cplusplus // QK = number of values after dequantization // QK_K = super-block size #define QK_K 256 #define K_SCALE_SIZE 12 #if defined(GGML_COMMON_DECL_CUDA) || defined(GGML_COMMON_DECL_HIP) || defined(GGML_COMMON_DECL_SYCL) // QR = QK / number of values before dequantization // QI = number of 32 bit integers before dequantization #define QI4_0 (QK4_0 / (4 * QR4_0)) #define QR4_0 2 #define QI4_1 (QK4_1 / (4 * QR4_1)) #define QR4_1 2 #define QI_MXFP4 (QK_MXFP4 / (4 * QR_MXFP4)) #define QR_MXFP4 2 #define QI5_0 (QK5_0 / (4 * QR5_0)) #define QR5_0 2 #define QI5_1 (QK5_1 / (4 * QR5_1)) #define QR5_1 2 #define QI8_0 (QK8_0 / (4 * QR8_0)) #define QR8_0 1 #define QI8_1 (QK8_1 / (4 * QR8_1)) #define QR8_1 1 #define QI2_K (QK_K / (4*QR2_K)) #define QR2_K 4 #define QI3_K (QK_K / (4*QR3_K)) #define QR3_K 4 #define QI4_K (QK_K / (4*QR4_K)) #define QR4_K 2 #define QI5_K (QK_K / (4*QR5_K)) #define QR5_K 2 #define QI6_K (QK_K / (4*QR6_K)) #define QR6_K 2 #define QI2_XXS (QK_K / (4*QR2_XXS)) #define QR2_XXS 4 #define QI2_XS (QK_K / (4*QR2_XS)) #define QR2_XS 4 #define QI2_S (QK_K / (4*QR2_S)) #define QR2_S 4 #define QI3_XXS (QK_K / (4*QR3_XXS)) #define QR3_XXS 4 #define QI3_XS (QK_K / (4*QR3_XS)) #define QR3_XS 4 #define QI1_S (QK_K / (4*QR1_S)) #define QR1_S 8 #define QI1_M (QK_K / (4*QR1_M)) #define QR1_M 8 #define QI4_NL (QK4_NL / (4*QR4_NL)) #define QR4_NL 2 #define QI4_XS (QK_K / (4*QR4_XS)) #define QR4_XS 2 #define QI3_S (QK_K / (4*QR3_S)) #define QR3_S 4 #endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP #ifdef _MSC_VER #define GGML_EXTENSION #else // _MSC_VER #define GGML_EXTENSION __extension__ #endif // _MSC_VER #define QK4_0 32 typedef struct { ggml_half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; static_assert(sizeof(block_q4_0) == sizeof(ggml_half) + QK4_0 / 2, "wrong q4_0 block size/padding"); #define QK4_1 32 typedef struct { GGML_EXTENSION union { struct { ggml_half d; // delta ggml_half m; // min } GGML_COMMON_AGGR_S; ggml_half2 dm; } GGML_COMMON_AGGR_U; uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); #define QK_MXFP4 32 typedef struct { uint8_t e; // E8M0 uint8_t qs[QK_MXFP4/2]; } block_mxfp4; static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + QK_MXFP4/2, "wrong mxfp4 block size/padding"); #define QK5_0 32 typedef struct { ggml_half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; static_assert(sizeof(block_q5_0) == sizeof(ggml_half) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); #define QK5_1 32 typedef struct { GGML_EXTENSION union { struct { ggml_half d; // delta ggml_half m; // min } GGML_COMMON_AGGR_S; ggml_half2 dm; } GGML_COMMON_AGGR_U; uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_half) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); #define QK8_0 32 typedef struct { ggml_half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; static_assert(sizeof(block_q8_0) == sizeof(ggml_half) + QK8_0, "wrong q8_0 block size/padding"); #define QK8_1 32 typedef struct { GGML_EXTENSION union { struct { ggml_half d; // delta ggml_half s; // d * sum(qs[i]) } GGML_COMMON_AGGR_S; ggml_half2 ds; } GGML_COMMON_AGGR_U; int8_t qs[QK8_1]; // quants } block_q8_1; static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding"); // // Ternary quantization // // 1.6875 bpw typedef struct { uint8_t qs[(QK_K - 4 * QK_K / 64) / 5]; // 5 elements per byte (3^5 = 243 < 256) uint8_t qh[QK_K/64]; // 4 elements per byte ggml_half d; } block_tq1_0; static_assert(sizeof(block_tq1_0) == sizeof(ggml_half) + QK_K / 64 + (QK_K - 4 * QK_K / 64) / 5, "wrong tq1_0 block size/padding"); // 2.0625 bpw typedef struct { uint8_t qs[QK_K/4]; // 2 bits per element ggml_half d; } block_tq2_0; static_assert(sizeof(block_tq2_0) == sizeof(ggml_half) + QK_K / 4, "wrong tq2_0 block size/padding"); // // Super-block quantization structures // // 2-bit quantization // weight is represented as x = a * q + b // 16 blocks of 16 elements each // Effectively 2.625 bits per weight typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants GGML_EXTENSION union { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins } GGML_COMMON_AGGR_S; ggml_half2 dm; } GGML_COMMON_AGGR_U; } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); // 3-bit quantization // weight is represented as x = a * q // 16 blocks of 16 elements each // Effectively 3.4375 bits per weight typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits uint8_t scales[12]; // scales, quantized with 6 bits ggml_half d; // super-block scale } block_q3_K; static_assert(sizeof(block_q3_K) == sizeof(ggml_half) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); // 4-bit quantization // 8 blocks of 32 elements each // weight is represented as x = a * q + b // Effectively 4.5 bits per weight typedef struct { GGML_EXTENSION union { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins } GGML_COMMON_AGGR_S; ggml_half2 dm; } GGML_COMMON_AGGR_U; uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding"); // 5-bit quantization // 8 blocks of 32 elements each // weight is represented as x = a * q + b // Effectively 5.5 bits per weight typedef struct { GGML_EXTENSION union { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins } GGML_COMMON_AGGR_S; ggml_half2 dm; } GGML_COMMON_AGGR_U; uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_half) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); // 6-bit quantization // weight is represented as x = a * q // 16 blocks of 16 elements each // Effectively 6.5625 bits per weight typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales, quantized with 8 bits ggml_half d; // super-block scale } block_q6_K; static_assert(sizeof(block_q6_K) == sizeof(ggml_half) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding"); // This is only used for intermediate quantization and dot products typedef struct { float d; // delta int8_t qs[QK_K]; // quants int16_t bsums[QK_K/16]; // sum of quants in groups of 16 } block_q8_K; static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); // (Almost) "true" 2-bit quantization. // Due to the need to use blocks as per ggml design, it ends up using // 2.0625 bpw because of the 16-bit scale for each block of 256. typedef struct { ggml_half d; uint16_t qs[QK_K/8]; } block_iq2_xxs; static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding"); // 2.3125 bpw quants typedef struct { ggml_half d; uint16_t qs[QK_K/8]; uint8_t scales[QK_K/32]; } block_iq2_xs; static_assert(sizeof(block_iq2_xs) == sizeof(ggml_half) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding"); // 2.5625 bpw quants typedef struct { ggml_half d; uint8_t qs[QK_K/4]; uint8_t qh[QK_K/32]; uint8_t scales[QK_K/32]; } block_iq2_s; static_assert(sizeof(block_iq2_s) == sizeof(ggml_half) + QK_K/4 + QK_K/16, "wrong iq2_s block size/padding"); // (Almost) "true" 3-bit quantization. // Due to the need to use blocks as per ggml design, it ends up using // 3.0625 bpw because of the 16-bit scale for each block of 256. typedef struct { ggml_half d; uint8_t qs[3*QK_K/8]; } block_iq3_xxs; static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding"); // 3.4375 bpw #define IQ3S_N_SCALE QK_K/64 typedef struct { ggml_half d; uint8_t qs[QK_K/4]; uint8_t qh[QK_K/32]; uint8_t signs[QK_K/8]; uint8_t scales[IQ3S_N_SCALE]; } block_iq3_s; static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding"); // 1.5625 bpw typedef struct { ggml_half d; uint8_t qs[QK_K/8]; uint16_t qh[QK_K/32]; } block_iq1_s; static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding"); // 1.75 bpw typedef struct { uint8_t qs[QK_K/8]; // grid index, low 8 bits uint8_t qh[QK_K/16]; // grid index, high 3 bits + grid shift bit (for two groups of 8) uint8_t scales[QK_K/32]; // 3-bit block scales (4-bit if QK_K == 64) } block_iq1_m; static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_m block size/padding"); // Used by IQ1_M quants typedef union { ggml_half f16; uint16_t u16; } iq1m_scale_t; // Non-linear quants #define QK4_NL 32 typedef struct { ggml_half d; uint8_t qs[QK4_NL/2]; } block_iq4_nl; static_assert(sizeof(block_iq4_nl) == sizeof(ggml_half) + QK4_NL/2, "wrong iq4_nl block size/padding"); typedef struct { ggml_half d; uint16_t scales_h; uint8_t scales_l[QK_K/64]; uint8_t qs[QK_K/2]; } block_iq4_xs; static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); #endif // GGML_COMMON_DECL #endif // GGML_COMMON_DECL //////////////////////////////////////////////////////////////////////////////// #ifndef GGML_COMMON_IMPL #if defined(GGML_COMMON_IMPL_C) #include #define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { #define GGML_TABLE_END() }; #define GGML_COMMON_IMPL #elif defined(GGML_COMMON_IMPL_CPP) #include #define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { #define GGML_TABLE_END() }; #define GGML_COMMON_IMPL #elif defined(GGML_COMMON_IMPL_METAL) #include #define GGML_TABLE_BEGIN(type, name, size) static const constant type name[size] = { #define GGML_TABLE_END() }; #define GGML_COMMON_IMPL #elif defined(GGML_COMMON_IMPL_CUDA) || defined(GGML_COMMON_IMPL_HIP) || defined(GGML_COMMON_IMPL_MUSA) #include #define GGML_TABLE_BEGIN(type, name, size) static const __device__ type name[size] = { #define GGML_TABLE_END() }; #define GGML_COMMON_IMPL #elif defined(GGML_COMMON_IMPL_SYCL) #include #define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { #define GGML_TABLE_END() }; #define GGML_COMMON_IMPL #endif #if defined(GGML_COMMON_IMPL) GGML_TABLE_BEGIN(uint8_t, kmask_iq2xs, 8) 1, 2, 4, 8, 16, 32, 64, 128 GGML_TABLE_END() GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128) 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15, 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159, 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175, 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63, 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207, 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95, 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111, 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, GGML_TABLE_END() GGML_TABLE_BEGIN(uint64_t, ksigns64, 128) 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff, 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff, 0xff000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0xff000000ff00ffff, 0x00000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0x00000000ffffffff, 0xff0000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0xff0000ff0000ffff, 0x000000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0x000000ff00ffffff, 0x000000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0x000000ffff00ffff, 0xff0000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0xff0000ffffffffff, 0xff00ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0xff00ff000000ffff, 0x0000ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0x0000ff0000ffffff, 0x0000ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0x0000ff00ff00ffff, 0xff00ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0xff00ff00ffffffff, 0x0000ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0x0000ffff0000ffff, 0xff00ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0xff00ffff00ffffff, 0xff00ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0xff00ffffff00ffff, 0x0000ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0x0000ffffffffffff, 0xffff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0xffff00000000ffff, 0x00ff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0x00ff000000ffffff, 0x00ff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0x00ff0000ff00ffff, 0xffff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0xffff0000ffffffff, 0x00ff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0x00ff00ff0000ffff, 0xffff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0xffff00ff00ffffff, 0xffff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0xffff00ffff00ffff, 0x00ff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0x00ff00ffffffffff, 0x00ffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0x00ffff000000ffff, 0xffffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0xffffff0000ffffff, 0xffffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0xffffff00ff00ffff, 0x00ffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0x00ffff00ffffffff, 0xffffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0xffffffff0000ffff, 0x00ffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0x00ffffff00ffffff, 0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff, 0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff, GGML_TABLE_END() GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256) 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819, 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b, 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808, 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b, 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819, 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08, 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08, 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808, 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808, 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819, 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08, 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908, 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819, 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808, 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808, 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908, 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808, 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08, 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819, 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819, 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908, 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19, 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b, 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808, 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908, 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08, 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819, 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808, 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808, 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19, 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b, 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08, 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808, 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908, 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b, 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819, 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08, 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808, 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b, 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908, 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908, 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b, 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808, 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b, 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b, 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808, 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19, 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908, GGML_TABLE_END() GGML_TABLE_BEGIN(uint64_t, iq2xs_grid, 512) 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819, 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b, 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919, 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808, 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819, 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908, 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819, 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808, 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b, 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819, 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819, 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808, 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19, 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b, 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819, 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908, 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808, 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819, 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808, 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808, 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808, 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908, 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908, 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808, 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b, 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819, 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908, 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919, 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19, 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b, 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b, 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808, 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08, 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808, 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819, 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919, 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819, 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819, 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908, 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908, 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b, 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908, 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908, 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808, 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819, 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808, 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b, 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819, 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819, 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08, 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808, 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19, 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919, 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19, 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b, 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808, 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b, 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b, 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b, 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808, 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819, 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808, 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808, 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08, 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b, 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19, 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08, 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919, 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08, 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08, 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908, 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908, 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b, 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908, 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808, 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b, 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808, 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808, 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19, 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808, 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b, 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808, 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b, GGML_TABLE_END() GGML_TABLE_BEGIN(uint64_t, iq2s_grid, 1024) 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08, 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b, 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919, 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b, 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919, 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b, 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808, 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19, 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919, 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908, 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919, 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908, 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b, 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b, 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b, 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08, 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819, 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808, 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908, 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b, 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908, 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08, 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819, 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08, 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819, 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b, 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908, 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b, 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919, 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808, 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919, 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919, 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808, 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819, 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b, 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b, 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919, 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b, 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819, 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919, 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908, 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b, 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908, 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b, 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908, 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08, 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908, 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819, 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819, 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19, 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819, 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808, 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819, 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919, 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808, 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19, 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08, 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b, 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908, 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808, 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819, 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908, 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819, 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808, 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808, 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819, 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908, 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08, 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819, 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b, 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08, 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19, 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919, 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908, 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808, 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808, 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908, 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808, 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08, 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08, 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908, 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919, 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808, 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819, 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908, 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08, 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819, 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808, 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808, 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819, 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808, 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908, 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b, 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b, 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19, 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08, 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908, 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b, 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b, 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919, 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808, 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819, 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08, 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08, 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819, 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908, 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b, 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908, 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b, 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908, 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08, 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819, 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808, 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819, 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919, 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808, 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808, 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08, 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819, 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919, 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808, 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819, 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919, 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808, 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b, 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908, 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808, 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908, 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908, 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b, 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908, 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b, 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908, 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08, 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908, 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b, 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908, 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08, 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819, 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919, 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808, 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19, 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b, 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919, 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808, 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819, 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908, 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919, 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808, 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808, 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b, 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919, 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808, 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b, 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808, 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919, 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b, 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08, 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919, 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808, 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b, 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908, 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808, 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808, 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808, 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908, 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808, 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808, 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b, 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908, 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808, 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819, 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919, 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b, 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808, 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819, 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b, 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908, 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08, 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908, 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919, 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819, 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908, 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b, 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808, 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819, 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908, 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919, 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808, 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808, 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808, 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919, 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908, 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908, 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08, 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819, 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b, 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819, 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908, 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819, 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808, 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808, 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b, 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908, 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808, 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908, 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819, 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819, 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808, 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b, 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b, 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819, 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b, 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b, 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b, 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819, 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19, 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819, 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908, 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808, 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b, GGML_TABLE_END() GGML_TABLE_BEGIN(uint32_t, iq3xxs_grid, 256) 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, GGML_TABLE_END() GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305, 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905, 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09, 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b, 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b, 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d, 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03, 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505, 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03, 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901, 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d, 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303, 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501, 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105, 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505, 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101, 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707, 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b, 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01, 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f, 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305, 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103, 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509, 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503, 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b, 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f, 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f, 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f, 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109, 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f, 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509, 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501, 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303, 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f, 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907, 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703, 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03, 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01, 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01, 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903, 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505, 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b, 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107, 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509, 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303, 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103, 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05, 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b, 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f, 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701, 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909, 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305, 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d, 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b, 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d, 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307, 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09, 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309, 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709, 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f, 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303, 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503, 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b, 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, GGML_TABLE_END() // TODO: fix name to kvalues_iq4_nl GGML_TABLE_BEGIN(int8_t, kvalues_iq4nl, 16) -127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113, GGML_TABLE_END() // e2m1 values (doubled) // ref: https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf GGML_TABLE_BEGIN(int8_t, kvalues_mxfp4, 16) 0, 1, 2, 3, 4, 6, 8, 12, 0, -1, -2, -3, -4, -6, -8, -12, GGML_TABLE_END() #define NGRID_IQ1S 2048 #define IQ1S_DELTA 0.125f #define IQ1M_DELTA 0.125f #if defined(GGML_COMMON_IMPL_C) GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S) 0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff, 0xffffffffffff0101, 0xffffffffff00ff00, 0xffffffffff000000, 0xffffffffff01ffff, 0xffffffffff01ff01, 0xffffffffff0101ff, 0xffffffffff010101, 0xffffffff00ff0000, 0xffffffff0000ff00, 0xffffffff000000ff, 0xffffffff00000001, 0xffffffff00010000, 0xffffffff01ffffff, 0xffffffff01ffff01, 0xffffffff01ff01ff, 0xffffffff01ff0101, 0xffffffff01000000, 0xffffffff0101ffff, 0xffffffff0101ff01, 0xffffffff010101ff, 0xffffffff01010101, 0xffffff00ffff00ff, 0xffffff00ffff0000, 0xffffff00ff00ff00, 0xffffff00ff0000ff, 0xffffff00ff000001, 0xffffff00ff000100, 0xffffff00ff000101, 0xffffff00ff010000, 0xffffff0000ffff00, 0xffffff0000ff0001, 0xffffff0000ff0100, 0xffffff000000ff01, 0xffffff0000000000, 0xffffff0000000101, 0xffffff000001ff00, 0xffffff00000100ff, 0xffffff0000010001, 0xffffff00000101ff, 0xffffff0001ff0000, 0xffffff000100ff00, 0xffffff00010000ff, 0xffffff0001000001, 0xffffff0001010000, 0xffffff01ffffffff, 0xffffff01ffffff01, 0xffffff01ffff01ff, 0xffffff01ffff0101, 0xffffff01ff000000, 0xffffff01ff01ffff, 0xffffff01ff01ff01, 0xffffff01ff0101ff, 0xffffff01ff010101, 0xffffff0100ff0000, 0xffffff010000ff00, 0xffffff0100000100, 0xffffff01000100ff, 0xffffff0100010100, 0xffffff0101ffffff, 0xffffff0101ffff01, 0xffffff0101ff01ff, 0xffffff0101ff0101, 0xffffff010100ff00, 0xffffff0101000000, 0xffffff0101000100, 0xffffff010101ffff, 0xffffff010101ff01, 0xffffff01010101ff, 0xffffff0101010101, 0xffff00ffff00ff00, 0xffff00ffff0000ff, 0xffff00ffff000001, 0xffff00ffff010000, 0xffff00ff00ffff00, 0xffff00ff00ff0100, 0xffff00ff00000000, 0xffff00ff00000101, 0xffff00ff000100ff, 0xffff00ff00010000, 0xffff00ff0100ff00, 0xffff00ff01000100, 0xffff00ff01010000, 0xffff0000ffffff00, 0xffff0000ffff00ff, 0xffff0000ffff0000, 0xffff0000ffff0001, 0xffff0000ff000000, 0xffff0000ff0001ff, 0xffff0000ff000101, 0xffff0000ff010100, 0xffff000000ffffff, 0xffff000000ff0000, 0xffff000000ff0101, 0xffff00000000ffff, 0xffff00000000ff00, 0xffff0000000000ff, 0xffff000000000000, 0xffff000000000001, 0xffff000000000100, 0xffff00000001ffff, 0xffff00000001ff01, 0xffff000000010000, 0xffff0000000101ff, 0xffff000000010101, 0xffff000001ffff00, 0xffff00000100ff00, 0xffff000001000000, 0xffff0000010001ff, 0xffff000001000101, 0xffff00000101ff00, 0xffff0000010100ff, 0xffff000001010000, 0xffff000001010001, 0xffff000001010100, 0xffff0001ff0000ff, 0xffff0001ff000100, 0xffff000100ffff00, 0xffff000100ff00ff, 0xffff00010000ffff, 0xffff00010000ff01, 0xffff000100000000, 0xffff0001000001ff, 0xffff00010001ffff, 0xffff00010001ff00, 0xffff000100010001, 0xffff000100010100, 0xffff000101ff0000, 0xffff00010100ff00, 0xffff0001010000ff, 0xffff000101000100, 0xffff01ffffffffff, 0xffff01ffffffff01, 0xffff01ffffff01ff, 0xffff01ffffff0101, 0xffff01ffff000000, 0xffff01ffff01ffff, 0xffff01ffff01ff01, 0xffff01ffff0101ff, 0xffff01ffff010101, 0xffff01ff00ff0000, 0xffff01ff0000ff00, 0xffff01ff00000001, 0xffff01ff00010000, 0xffff01ff01ffffff, 0xffff01ff01ffff01, 0xffff01ff01ff01ff, 0xffff01ff01ff0101, 0xffff01ff01000000, 0xffff01ff0101ffff, 0xffff01ff0101ff01, 0xffff01ff010101ff, 0xffff01ff01010101, 0xffff0100ffff0000, 0xffff0100ff00ff00, 0xffff0100ff0000ff, 0xffff0100ff000100, 0xffff0100ff0100ff, 0xffff0100ff010000, 0xffff010000ffff00, 0xffff01000000ffff, 0xffff01000000ff00, 0xffff010000000000, 0xffff01000001ff00, 0xffff0100000100ff, 0xffff010000010100, 0xffff01000100ff00, 0xffff0100010000ff, 0xffff010001000001, 0xffff010001000100, 0xffff010001010000, 0xffff0101ffffffff, 0xffff0101ffffff01, 0xffff0101ffff01ff, 0xffff0101ffff0101, 0xffff0101ff000000, 0xffff0101ff01ffff, 0xffff0101ff01ff01, 0xffff0101ff0101ff, 0xffff0101ff010101, 0xffff010100ff0000, 0xffff01010000ff00, 0xffff010100000100, 0xffff01010001ff00, 0xffff010100010000, 0xffff010101ffffff, 0xffff010101ffff01, 0xffff010101ff0000, 0xffff010101ff01ff, 0xffff010101ff0101, 0xffff010101000000, 0xffff01010101ffff, 0xffff01010101ff01, 0xffff0101010101ff, 0xffff010101010101, 0xff00ffffff00ffff, 0xff00ffffff00ff00, 0xff00ffffff0000ff, 0xff00ffffff000100, 0xff00ffffff0100ff, 0xff00ffffff010000, 0xff00ffff00ffff00, 0xff00ffff00ff00ff, 0xff00ffff0000ffff, 0xff00ffff00000000, 0xff00ffff000001ff, 0xff00ffff0001ff00, 0xff00ffff000100ff, 0xff00ffff00010000, 0xff00ffff00010100, 0xff00ffff0100ff00, 0xff00ffff010000ff, 0xff00ffff01000001, 0xff00ffff0101ff00, 0xff00ffff01010000, 0xff00ff00ffffff00, 0xff00ff00ffff00ff, 0xff00ff00ffff0001, 0xff00ff00ffff0100, 0xff00ff00ff00ffff, 0xff00ff00ff00ff01, 0xff00ff00ff000000, 0xff00ff00ff0001ff, 0xff00ff00ff01ff00, 0xff00ff00ff0100ff, 0xff00ff00ff010100, 0xff00ff0000ff0000, 0xff00ff0000ff0101, 0xff00ff000000ffff, 0xff00ff000000ff00, 0xff00ff000000ff01, 0xff00ff00000000ff, 0xff00ff0000000000, 0xff00ff0000000001, 0xff00ff0000000100, 0xff00ff000001ffff, 0xff00ff0000010000, 0xff00ff0001ff00ff, 0xff00ff000100ff01, 0xff00ff0001000000, 0xff00ff000101ff00, 0xff00ff00010100ff, 0xff00ff01ff00ff00, 0xff00ff01ff0000ff, 0xff00ff01ff000001, 0xff00ff01ff010000, 0xff00ff0100ffffff, 0xff00ff0100ff0001, 0xff00ff0100ff0100, 0xff00ff010000ff01, 0xff00ff0100000000, 0xff00ff01000001ff, 0xff00ff0100000101, 0xff00ff01000100ff, 0xff00ff0100010001, 0xff00ff0101ff0000, 0xff00ff010100ff00, 0xff00ff01010000ff, 0xff00ff0101000001, 0xff00ff0101010000, 0xff0000ffffffff00, 0xff0000ffffff0001, 0xff0000ffffff0100, 0xff0000ffff0000ff, 0xff0000ffff000000, 0xff0000ffff0001ff, 0xff0000ffff000100, 0xff0000ffff01ff00, 0xff0000ffff010001, 0xff0000ff00ffff00, 0xff0000ff00ff0000, 0xff0000ff00ff0001, 0xff0000ff00ff01ff, 0xff0000ff00ff0101, 0xff0000ff0000ff00, 0xff0000ff000000ff, 0xff0000ff00000000, 0xff0000ff00000001, 0xff0000ff00000100, 0xff0000ff0001ff01, 0xff0000ff00010000, 0xff0000ff000101ff, 0xff0000ff01ff00ff, 0xff0000ff01ff0100, 0xff0000ff0100ffff, 0xff0000ff010000ff, 0xff0000ff01000000, 0xff0000ff010001ff, 0xff0000ff01000100, 0xff0000ff01000101, 0xff0000ff0101ff00, 0xff0000ff010100ff, 0xff0000ff01010000, 0xff0000ff01010100, 0xff000000ffffff01, 0xff000000ffff0000, 0xff000000ffff0101, 0xff000000ff00ff00, 0xff000000ff0000ff, 0xff000000ff000000, 0xff000000ff000001, 0xff000000ff000100, 0xff000000ff01ffff, 0xff000000ff01ff01, 0xff000000ff010000, 0xff000000ff0101ff, 0xff000000ff010101, 0xff00000000ffff00, 0xff00000000ff00ff, 0xff00000000ff0000, 0xff00000000ff0001, 0xff0000000000ff00, 0xff0000000000ff01, 0xff000000000000ff, 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, 0xff00000000000101, 0xff0000000001ff00, 0xff000000000100ff, 0xff00000000010000, 0xff00000000010001, 0xff00000000010100, 0xff00000001ffffff, 0xff00000001ffff01, 0xff00000001ff00ff, 0xff00000001ff0000, 0xff00000001ff01ff, 0xff00000001ff0101, 0xff0000000100ffff, 0xff0000000100ff00, 0xff000000010000ff, 0xff00000001000000, 0xff00000001000001, 0xff00000001000100, 0xff00000001000101, 0xff0000000101ffff, 0xff0000000101ff01, 0xff00000001010000, 0xff000001ffffff00, 0xff000001ffff00ff, 0xff000001ffff0000, 0xff000001ffff0001, 0xff000001ff000000, 0xff000001ff000001, 0xff000001ff0001ff, 0xff000001ff000101, 0xff000001ff01ff00, 0xff000001ff010001, 0xff00000100ffffff, 0xff00000100ffff01, 0xff00000100ff00ff, 0xff00000100ff0000, 0xff00000100ff01ff, 0xff00000100ff0101, 0xff0000010000ff00, 0xff00000100000000, 0xff00000100000001, 0xff000001000001ff, 0xff00000100000100, 0xff0000010001ff00, 0xff000001000100ff, 0xff00000100010000, 0xff000001000101ff, 0xff00000100010100, 0xff00000100010101, 0xff00000101ff0001, 0xff00000101ff0101, 0xff0000010100ff01, 0xff00000101000000, 0xff000001010100ff, 0xff00000101010100, 0xff0001ffff00ff00, 0xff0001ffff000001, 0xff0001ffff010000, 0xff0001ff00ffff00, 0xff0001ff00ff00ff, 0xff0001ff00ff0001, 0xff0001ff00ff0100, 0xff0001ff0000ffff, 0xff0001ff00000000, 0xff0001ff000001ff, 0xff0001ff00000101, 0xff0001ff0001ffff, 0xff0001ff0001ff00, 0xff0001ff000100ff, 0xff0001ff00010001, 0xff0001ff00010100, 0xff0001ff01ff0000, 0xff0001ff0100ff00, 0xff0001ff010000ff, 0xff0001ff01010000, 0xff000100ff00ffff, 0xff000100ff00ff01, 0xff000100ff000000, 0xff000100ff000101, 0xff000100ff01ff00, 0xff000100ff010000, 0xff00010000ffff01, 0xff00010000ff00ff, 0xff00010000ff0000, 0xff00010000ff01ff, 0xff0001000000ff00, 0xff000100000000ff, 0xff00010000000000, 0xff00010000000001, 0xff00010000000100, 0xff00010000000101, 0xff0001000001ffff, 0xff00010000010000, 0xff00010000010101, 0xff00010001ff0100, 0xff0001000100ff00, 0xff0001000100ff01, 0xff00010001000000, 0xff000100010001ff, 0xff0001000101ff00, 0xff00010001010001, 0xff00010001010100, 0xff000101ffff0100, 0xff000101ff000001, 0xff000101ff0100ff, 0xff000101ff010001, 0xff00010100ff00ff, 0xff00010100ff0001, 0xff00010100ff0100, 0xff0001010000ffff, 0xff0001010000ff01, 0xff00010100000000, 0xff000101000001ff, 0xff0001010001ff00, 0xff00010100010001, 0xff00010100010100, 0xff00010101ff0000, 0xff0001010100ff00, 0xff00010101000001, 0xff00010101000101, 0xff01ffffffffffff, 0xff01ffffffffff01, 0xff01ffffffff01ff, 0xff01ffffffff0101, 0xff01ffffff000000, 0xff01ffffff01ffff, 0xff01ffffff01ff01, 0xff01ffffff010000, 0xff01ffffff0101ff, 0xff01ffffff010101, 0xff01ffff00ff0000, 0xff01ffff0000ff00, 0xff01ffff00000100, 0xff01ffff0001ff00, 0xff01ffff00010000, 0xff01ffff01ffffff, 0xff01ffff01ffff01, 0xff01ffff01ff01ff, 0xff01ffff01ff0101, 0xff01ffff01000000, 0xff01ffff0101ffff, 0xff01ffff0101ff01, 0xff01ffff01010000, 0xff01ffff010101ff, 0xff01ffff01010101, 0xff01ff00ffff0000, 0xff01ff00ff00ff00, 0xff01ff00ff0000ff, 0xff01ff00ff000100, 0xff01ff00ff010000, 0xff01ff0000ffff01, 0xff01ff0000ff00ff, 0xff01ff0000ff0100, 0xff01ff0000000000, 0xff01ff00000001ff, 0xff01ff0000000101, 0xff01ff000001ff00, 0xff01ff00000100ff, 0xff01ff0000010000, 0xff01ff0000010001, 0xff01ff0001ff0000, 0xff01ff000100ffff, 0xff01ff0001000001, 0xff01ff0001000100, 0xff01ff0001010000, 0xff01ff01ffffff00, 0xff01ff01ffff01ff, 0xff01ff01ffff0101, 0xff01ff01ff00ff00, 0xff01ff01ff000000, 0xff01ff01ff01ffff, 0xff01ff01ff01ff01, 0xff01ff01ff0101ff, 0xff01ff01ff010101, 0xff01ff0100ff0000, 0xff01ff010000ff00, 0xff01ff0100000001, 0xff01ff0100000100, 0xff01ff0100010000, 0xff01ff0101ffff00, 0xff01ff0101ff01ff, 0xff01ff0101ff0101, 0xff01ff010100ff00, 0xff01ff0101000000, 0xff01ff010101ffff, 0xff01ff010101ff01, 0xff01ff01010101ff, 0xff01ff0101010101, 0xff0100ffffff0000, 0xff0100ffff0000ff, 0xff0100ffff000001, 0xff0100ffff000100, 0xff0100ffff010000, 0xff0100ff00ff00ff, 0xff0100ff00ff0000, 0xff0100ff00ff0001, 0xff0100ff00ff0100, 0xff0100ff0000ff01, 0xff0100ff00000000, 0xff0100ff000001ff, 0xff0100ff00000101, 0xff0100ff00010001, 0xff0100ff01ff0000, 0xff0100ff0100ff00, 0xff0100ff010000ff, 0xff0100ff01000100, 0xff0100ff0101ff00, 0xff0100ff01010000, 0xff010000ffff0100, 0xff010000ff000000, 0xff010000ff01ff00, 0xff010000ff010100, 0xff01000000ffffff, 0xff01000000ff0000, 0xff01000000ff01ff, 0xff0100000000ff00, 0xff010000000000ff, 0xff01000000000000, 0xff01000000000100, 0xff0100000001ff01, 0xff01000000010000, 0xff010000000101ff, 0xff01000001ff0100, 0xff0100000100ffff, 0xff010000010000ff, 0xff01000001000000, 0xff010000010001ff, 0xff01000001000101, 0xff0100000101ff00, 0xff010000010100ff, 0xff01000001010001, 0xff01000001010100, 0xff010001ffff0000, 0xff010001ff00ffff, 0xff010001ff00ff01, 0xff010001ff000100, 0xff010001ff010000, 0xff01000100ffff00, 0xff01000100ff0100, 0xff01000100000000, 0xff0100010001ffff, 0xff0100010001ff00, 0xff01000100010100, 0xff01000101ff00ff, 0xff01000101ff0001, 0xff0100010100ffff, 0xff01000101000101, 0xff0101ffffffffff, 0xff0101ffffffff01, 0xff0101ffffff01ff, 0xff0101ffffff0101, 0xff0101ffff000000, 0xff0101ffff01ffff, 0xff0101ffff01ff01, 0xff0101ffff0101ff, 0xff0101ffff010101, 0xff0101ff00ff0000, 0xff0101ff0000ff00, 0xff0101ff000000ff, 0xff0101ff00010000, 0xff0101ff01ffffff, 0xff0101ff01ffff01, 0xff0101ff01ff01ff, 0xff0101ff01ff0101, 0xff0101ff0101ffff, 0xff0101ff0101ff01, 0xff0101ff010101ff, 0xff0101ff01010101, 0xff010100ffff0100, 0xff010100ff00ff00, 0xff010100ff0000ff, 0xff010100ff000100, 0xff010100ff010000, 0xff01010000ff0001, 0xff01010000ff0100, 0xff0101000000ff01, 0xff01010000000000, 0xff0101000001ff00, 0xff010100000100ff, 0xff01010000010001, 0xff01010000010100, 0xff01010001ff0000, 0xff0101000100ffff, 0xff01010001000001, 0xff01010001000100, 0xff010100010100ff, 0xff01010001010000, 0xff010101ffffffff, 0xff010101ffffff01, 0xff010101ffff01ff, 0xff010101ffff0101, 0xff010101ff01ffff, 0xff010101ff01ff01, 0xff010101ff0101ff, 0xff010101ff010101, 0xff01010100ff0000, 0xff0101010000ff00, 0xff01010100000001, 0xff01010100000100, 0xff01010100010000, 0xff01010101ffffff, 0xff01010101ffff01, 0xff01010101ff01ff, 0xff01010101ff0101, 0xff01010101000000, 0xff0101010101ffff, 0xff0101010101ff01, 0xff010101010101ff, 0xff01010101010101, 0x00ffffffffff0000, 0x00ffffffff00ff00, 0x00ffffffff000001, 0x00ffffffff010000, 0x00ffffff00ff0100, 0x00ffffff0000ff01, 0x00ffffff00000000, 0x00ffffff000001ff, 0x00ffffff00000101, 0x00ffffff0001ff00, 0x00ffffff000100ff, 0x00ffffff00010001, 0x00ffffff010000ff, 0x00ffffff01000100, 0x00ffffff0101ff00, 0x00ffffff01010001, 0x00ffff00ffffffff, 0x00ffff00ffffff00, 0x00ffff00ffff00ff, 0x00ffff00ffff0001, 0x00ffff00ffff0100, 0x00ffff00ff00ff01, 0x00ffff00ff000000, 0x00ffff00ff000001, 0x00ffff00ff0001ff, 0x00ffff00ff000101, 0x00ffff00ff01ff00, 0x00ffff00ff010001, 0x00ffff00ff010100, 0x00ffff0000ff0000, 0x00ffff0000ff01ff, 0x00ffff0000ff0101, 0x00ffff000000ff00, 0x00ffff00000000ff, 0x00ffff0000000000, 0x00ffff0000000001, 0x00ffff0000000100, 0x00ffff0000000101, 0x00ffff0000010000, 0x00ffff00000101ff, 0x00ffff0000010101, 0x00ffff0001ffff00, 0x00ffff0001ff00ff, 0x00ffff0001ff0001, 0x00ffff000100ffff, 0x00ffff000100ff01, 0x00ffff0001000000, 0x00ffff000101ffff, 0x00ffff000101ff00, 0x00ffff000101ff01, 0x00ffff01ffff0000, 0x00ffff01ff00ff00, 0x00ffff01ff0000ff, 0x00ffff01ff000001, 0x00ffff01ff010000, 0x00ffff0100ffff00, 0x00ffff010000ff01, 0x00ffff0100000000, 0x00ffff0100000101, 0x00ffff01000100ff, 0x00ffff0100010100, 0x00ffff0101ff0100, 0x00ffff01010000ff, 0x00ffff0101010000, 0x00ff00ffffffff00, 0x00ff00ffff000000, 0x00ff00ffff000100, 0x00ff00ffff010100, 0x00ff00ff00ff0000, 0x00ff00ff00ff01ff, 0x00ff00ff00ff0101, 0x00ff00ff0000ff00, 0x00ff00ff000000ff, 0x00ff00ff00000000, 0x00ff00ff00000001, 0x00ff00ff0001ff00, 0x00ff00ff0001ff01, 0x00ff00ff00010000, 0x00ff00ff000101ff, 0x00ff00ff00010101, 0x00ff00ff01ffff00, 0x00ff00ff01ff0001, 0x00ff00ff01ff0100, 0x00ff00ff0100ffff, 0x00ff00ff0100ff01, 0x00ff00ff01000000, 0x00ff00ff0101ffff, 0x00ff00ff0101ff00, 0x00ff00ff01010100, 0x00ff0000ffffff00, 0x00ff0000ffffff01, 0x00ff0000ffff0000, 0x00ff0000ffff0101, 0x00ff0000ff00ff00, 0x00ff0000ff0000ff, 0x00ff0000ff000000, 0x00ff0000ff000001, 0x00ff0000ff000100, 0x00ff0000ff01ffff, 0x00ff0000ff010000, 0x00ff0000ff010101, 0x00ff000000ffff00, 0x00ff000000ff00ff, 0x00ff000000ff0000, 0x00ff000000ff0001, 0x00ff000000ff0100, 0x00ff00000000ffff, 0x00ff00000000ff00, 0x00ff0000000000ff, 0x00ff000000000000, 0x00ff000000000001, 0x00ff0000000001ff, 0x00ff000000000100, 0x00ff00000001ff00, 0x00ff0000000100ff, 0x00ff000000010000, 0x00ff000000010001, 0x00ff000000010100, 0x00ff000001ffff01, 0x00ff000001ff00ff, 0x00ff000001ff0000, 0x00ff000001ff01ff, 0x00ff00000100ff00, 0x00ff0000010000ff, 0x00ff000001000000, 0x00ff000001000001, 0x00ff000001000100, 0x00ff000001000101, 0x00ff000001010000, 0x00ff0000010101ff, 0x00ff000001010101, 0x00ff0001ffffff00, 0x00ff0001ffff0000, 0x00ff0001ffff0100, 0x00ff0001ff0000ff, 0x00ff0001ff000000, 0x00ff0001ff0001ff, 0x00ff0001ff000101, 0x00ff0001ff01ff00, 0x00ff0001ff0100ff, 0x00ff0001ff010100, 0x00ff000100ffffff, 0x00ff000100ffff01, 0x00ff000100ff0000, 0x00ff000100ff01ff, 0x00ff00010000ffff, 0x00ff00010000ff00, 0x00ff00010000ff01, 0x00ff000100000000, 0x00ff000100000001, 0x00ff000100000100, 0x00ff00010001ff01, 0x00ff000100010000, 0x00ff0001000101ff, 0x00ff000101ffff00, 0x00ff000101ff0000, 0x00ff000101ff0101, 0x00ff0001010000ff, 0x00ff000101000000, 0x00ff00010101ff00, 0x00ff0001010100ff, 0x00ff000101010001, 0x00ff01ffffff0000, 0x00ff01ffff00ff00, 0x00ff01ffff000000, 0x00ff01ffff000101, 0x00ff01ffff010000, 0x00ff01ff00ffff01, 0x00ff01ff00ff0100, 0x00ff01ff0000ffff, 0x00ff01ff00000000, 0x00ff01ff000001ff, 0x00ff01ff0001ff00, 0x00ff01ff000100ff, 0x00ff01ff00010001, 0x00ff01ff00010100, 0x00ff01ff01ff0000, 0x00ff01ff0100ff00, 0x00ff01ff010000ff, 0x00ff01ff01000001, 0x00ff01ff01000100, 0x00ff01ff01010000, 0x00ff0100ffffff00, 0x00ff0100ffff0000, 0x00ff0100ffff0001, 0x00ff0100ffff0101, 0x00ff0100ff00ffff, 0x00ff0100ff0000ff, 0x00ff0100ff000000, 0x00ff0100ff0001ff, 0x00ff0100ff01ff00, 0x00ff0100ff0100ff, 0x00ff0100ff010001, 0x00ff010000ffffff, 0x00ff010000ff0000, 0x00ff010000ff0101, 0x00ff01000000ff00, 0x00ff01000000ff01, 0x00ff0100000000ff, 0x00ff010000000000, 0x00ff010000000001, 0x00ff010000000100, 0x00ff01000001ffff, 0x00ff01000001ff01, 0x00ff010000010000, 0x00ff010000010001, 0x00ff010000010101, 0x00ff010001ff0001, 0x00ff010001ff0100, 0x00ff01000100ff01, 0x00ff010001000000, 0x00ff010001000001, 0x00ff0100010001ff, 0x00ff01000101ff00, 0x00ff0100010100ff, 0x00ff010001010001, 0x00ff010001010100, 0x00ff0101ff000001, 0x00ff010100ff00ff, 0x00ff010100ff0001, 0x00ff010100ff0100, 0x00ff010100000000, 0x00ff0101000001ff, 0x00ff010100000101, 0x00ff0101000100ff, 0x00ff010100010100, 0x00ff0101010000ff, 0x00ff010101010000, 0x0000ffffffffff00, 0x0000ffffffff00ff, 0x0000ffffffff0000, 0x0000ffffffff0001, 0x0000ffffffff0100, 0x0000ffffff00ff01, 0x0000ffffff000000, 0x0000ffffff000101, 0x0000ffffff01ff00, 0x0000ffffff0100ff, 0x0000ffffff010100, 0x0000ffff00ffffff, 0x0000ffff00ff0000, 0x0000ffff00ff01ff, 0x0000ffff0000ff00, 0x0000ffff000000ff, 0x0000ffff00000000, 0x0000ffff00000001, 0x0000ffff00000100, 0x0000ffff00010000, 0x0000ffff000101ff, 0x0000ffff01ff0001, 0x0000ffff01ff0100, 0x0000ffff01000000, 0x0000ffff010001ff, 0x0000ffff0101ffff, 0x0000ffff0101ff00, 0x0000ffff01010001, 0x0000ffff01010100, 0x0000ff00ffff0000, 0x0000ff00ffff01ff, 0x0000ff00ffff0100, 0x0000ff00ffff0101, 0x0000ff00ff00ff00, 0x0000ff00ff0000ff, 0x0000ff00ff000000, 0x0000ff00ff000001, 0x0000ff00ff0001ff, 0x0000ff00ff000100, 0x0000ff00ff01ffff, 0x0000ff00ff010000, 0x0000ff00ff010001, 0x0000ff00ff0101ff, 0x0000ff00ff010101, 0x0000ff0000ffff00, 0x0000ff0000ff00ff, 0x0000ff0000ff0000, 0x0000ff0000ff0001, 0x0000ff0000ff0100, 0x0000ff000000ffff, 0x0000ff000000ff00, 0x0000ff000000ff01, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001, 0x0000ff00000001ff, 0x0000ff0000000100, 0x0000ff0000000101, 0x0000ff000001ff00, 0x0000ff00000100ff, 0x0000ff0000010000, 0x0000ff0000010001, 0x0000ff0000010100, 0x0000ff0001ffff01, 0x0000ff0001ff0000, 0x0000ff000100ff00, 0x0000ff00010000ff, 0x0000ff0001000000, 0x0000ff0001000001, 0x0000ff0001000100, 0x0000ff000101ffff, 0x0000ff0001010000, 0x0000ff0001010101, 0x0000ff01ffffff00, 0x0000ff01ffff0001, 0x0000ff01ff00ff01, 0x0000ff01ff000000, 0x0000ff01ff000101, 0x0000ff01ff01ff00, 0x0000ff01ff0100ff, 0x0000ff0100ffff01, 0x0000ff0100ff0000, 0x0000ff0100ff0101, 0x0000ff010000ff00, 0x0000ff01000000ff, 0x0000ff0100000000, 0x0000ff0100000001, 0x0000ff0100000100, 0x0000ff010001ff01, 0x0000ff0100010000, 0x0000ff0101ff0000, 0x0000ff010100ffff, 0x0000ff010100ff01, 0x0000ff0101000000, 0x0000ff0101000100, 0x0000ff0101000101, 0x0000ff01010100ff, 0x000000ffffff00ff, 0x000000ffffff0000, 0x000000ffff00ff00, 0x000000ffff0000ff, 0x000000ffff000000, 0x000000ffff000001, 0x000000ffff0001ff, 0x000000ffff000100, 0x000000ffff01ff00, 0x000000ffff010000, 0x000000ffff0101ff, 0x000000ffff010101, 0x000000ff00ffff00, 0x000000ff00ff00ff, 0x000000ff00ff0000, 0x000000ff00ff0001, 0x000000ff00ff0100, 0x000000ff00ff0101, 0x000000ff0000ffff, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000, 0x000000ff00000001, 0x000000ff000001ff, 0x000000ff00000100, 0x000000ff00000101, 0x000000ff0001ff00, 0x000000ff0001ff01, 0x000000ff000100ff, 0x000000ff00010000, 0x000000ff00010001, 0x000000ff00010100, 0x000000ff01ffffff, 0x000000ff01ff01ff, 0x000000ff01ff0101, 0x000000ff0100ff00, 0x000000ff010000ff, 0x000000ff01000000, 0x000000ff01000001, 0x000000ff01000100, 0x000000ff0101ff00, 0x000000ff010100ff, 0x000000ff01010000, 0x000000ff01010101, 0x00000000ffffff00, 0x00000000ffffff01, 0x00000000ffff00ff, 0x00000000ffff0000, 0x00000000ffff0001, 0x00000000ffff0100, 0x00000000ff00ffff, 0x00000000ff00ff00, 0x00000000ff00ff01, 0x00000000ff0000ff, 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff000101, 0x00000000ff01ff00, 0x00000000ff0100ff, 0x00000000ff010000, 0x00000000ff010001, 0x00000000ff010100, 0x0000000000ffffff, 0x0000000000ffff00, 0x0000000000ffff01, 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001, 0x0000000000ff01ff, 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01, 0x00000000000000ff, 0x0000000000000000, 0x0000000000000001, 0x00000000000001ff, 0x0000000000000100, 0x0000000000000101, 0x000000000001ffff, 0x000000000001ff00, 0x00000000000100ff, 0x0000000000010000, 0x0000000000010001, 0x00000000000101ff, 0x0000000000010100, 0x0000000000010101, 0x0000000001ffff00, 0x0000000001ff00ff, 0x0000000001ff0000, 0x0000000001ff0100, 0x0000000001ff0101, 0x000000000100ffff, 0x000000000100ff00, 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, 0x00000000010001ff, 0x0000000001000100, 0x000000000101ff00, 0x00000000010100ff, 0x0000000001010000, 0x0000000001010001, 0x0000000001010100, 0x00000001ffffffff, 0x00000001ffffff00, 0x00000001ffffff01, 0x00000001ffff00ff, 0x00000001ffff0001, 0x00000001ffff01ff, 0x00000001ffff0100, 0x00000001ff00ff00, 0x00000001ff0000ff, 0x00000001ff000000, 0x00000001ff0001ff, 0x00000001ff000100, 0x00000001ff01ffff, 0x00000001ff01ff00, 0x00000001ff01ff01, 0x00000001ff0100ff, 0x00000001ff010000, 0x00000001ff010001, 0x00000001ff0101ff, 0x00000001ff010100, 0x0000000100ffff00, 0x0000000100ff0000, 0x0000000100ff0001, 0x0000000100ff01ff, 0x0000000100ff0100, 0x0000000100ff0101, 0x000000010000ffff, 0x000000010000ff00, 0x000000010000ff01, 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001, 0x00000001000001ff, 0x0000000100000100, 0x0000000100000101, 0x000000010001ff00, 0x00000001000100ff, 0x0000000100010000, 0x0000000100010100, 0x0000000101ffff01, 0x0000000101ff0000, 0x0000000101ff0001, 0x0000000101ff01ff, 0x0000000101ff0100, 0x0000000101ff0101, 0x000000010100ff00, 0x0000000101000000, 0x0000000101000101, 0x000000010101ff01, 0x0000000101010000, 0x0000000101010001, 0x00000001010101ff, 0x0000000101010100, 0x000001ffffff00ff, 0x000001ffffff0000, 0x000001ffffff0001, 0x000001ffffff0100, 0x000001ffff00ffff, 0x000001ffff000000, 0x000001ffff0001ff, 0x000001ffff01ff00, 0x000001ffff010101, 0x000001ff00ff0000, 0x000001ff00ff01ff, 0x000001ff00ff0101, 0x000001ff0000ff00, 0x000001ff000000ff, 0x000001ff00000000, 0x000001ff00000001, 0x000001ff000001ff, 0x000001ff00000100, 0x000001ff0001ffff, 0x000001ff0001ff01, 0x000001ff000100ff, 0x000001ff00010000, 0x000001ff01ffff01, 0x000001ff01ff0100, 0x000001ff0100ffff, 0x000001ff0100ff01, 0x000001ff01000000, 0x000001ff010001ff, 0x000001ff0101ff00, 0x000001ff01010100, 0x00000100ffffff00, 0x00000100ffffff01, 0x00000100ffff0000, 0x00000100ffff0101, 0x00000100ff00ff00, 0x00000100ff0000ff, 0x00000100ff000000, 0x00000100ff000001, 0x00000100ff000100, 0x00000100ff010000, 0x0000010000ffff00, 0x0000010000ff00ff, 0x0000010000ff0000, 0x0000010000ff0001, 0x0000010000ff0100, 0x000001000000ffff, 0x000001000000ff00, 0x000001000000ff01, 0x00000100000000ff, 0x0000010000000000, 0x0000010000000001, 0x00000100000001ff, 0x0000010000000100, 0x0000010000000101, 0x000001000001ff00, 0x00000100000100ff, 0x0000010000010000, 0x0000010000010001, 0x0000010000010100, 0x0000010001ffff00, 0x0000010001ff0000, 0x0000010001ff0100, 0x000001000100ff00, 0x00000100010000ff, 0x0000010001000000, 0x0000010001000001, 0x00000100010001ff, 0x0000010001000100, 0x0000010001010000, 0x00000101ffff00ff, 0x00000101ffff01ff, 0x00000101ff000000, 0x00000101ff000101, 0x00000101ff01ffff, 0x00000101ff010000, 0x00000101ff010001, 0x00000101ff010100, 0x0000010100ff0000, 0x0000010100ff01ff, 0x0000010100ff0100, 0x000001010000ff00, 0x0000010100000000, 0x0000010100000001, 0x00000101000001ff, 0x0000010100000100, 0x000001010001ff01, 0x0000010100010000, 0x00000101000101ff, 0x0000010100010101, 0x0000010101ffff00, 0x0000010101ff0101, 0x000001010100ff01, 0x0000010101000000, 0x0000010101000001, 0x00000101010001ff, 0x0000010101000101, 0x000001010101ff00, 0x0001ffffffff0000, 0x0001ffffff0000ff, 0x0001ffffff000001, 0x0001ffffff000100, 0x0001ffffff010000, 0x0001ffff00ff00ff, 0x0001ffff0000ffff, 0x0001ffff00000000, 0x0001ffff00000001, 0x0001ffff000001ff, 0x0001ffff00000101, 0x0001ffff0001ff00, 0x0001ffff000100ff, 0x0001ffff00010001, 0x0001ffff00010100, 0x0001ffff01ffff00, 0x0001ffff01000001, 0x0001ffff01010000, 0x0001ff00ffffff00, 0x0001ff00ffff00ff, 0x0001ff00ffff0001, 0x0001ff00ffff0100, 0x0001ff00ff00ff01, 0x0001ff00ff000000, 0x0001ff00ff01ff00, 0x0001ff00ff01ff01, 0x0001ff00ff010001, 0x0001ff00ff010100, 0x0001ff0000ff0000, 0x0001ff0000ff0100, 0x0001ff000000ff00, 0x0001ff0000000000, 0x0001ff0000000001, 0x0001ff0000000100, 0x0001ff0000010000, 0x0001ff0000010001, 0x0001ff0000010101, 0x0001ff0001ff00ff, 0x0001ff0001ff0101, 0x0001ff000100ff01, 0x0001ff0001000000, 0x0001ff000101ff00, 0x0001ff0001010001, 0x0001ff0001010100, 0x0001ff01ff00ff00, 0x0001ff01ff000001, 0x0001ff01ff000100, 0x0001ff0100ffffff, 0x0001ff0100ffff00, 0x0001ff0100ff0001, 0x0001ff0100000000, 0x0001ff0100000001, 0x0001ff01000001ff, 0x0001ff010001ffff, 0x0001ff0101ff0000, 0x0001ff010100ff00, 0x0001ff0101000001, 0x0001ff0101010000, 0x000100ffff00ff00, 0x000100ffff00ff01, 0x000100ffff000000, 0x000100ffff000001, 0x000100ffff000101, 0x000100ffff01ff00, 0x000100ffff010001, 0x000100ffff010100, 0x000100ff00ffffff, 0x000100ff00ffff01, 0x000100ff00ff0000, 0x000100ff00ff01ff, 0x000100ff00ff0101, 0x000100ff0000ff00, 0x000100ff000000ff, 0x000100ff00000000, 0x000100ff00000001, 0x000100ff00000100, 0x000100ff00000101, 0x000100ff0001ffff, 0x000100ff0001ff01, 0x000100ff00010000, 0x000100ff01ff00ff, 0x000100ff01ff0000, 0x000100ff01ff0100, 0x000100ff0100ffff, 0x000100ff0100ff01, 0x000100ff010000ff, 0x000100ff01000000, 0x000100ff01000001, 0x000100ff010001ff, 0x000100ff01000101, 0x000100ff0101ff00, 0x000100ff010100ff, 0x000100ff01010100, 0x00010000ffff0000, 0x00010000ffff01ff, 0x00010000ffff0101, 0x00010000ff00ff00, 0x00010000ff000000, 0x00010000ff000001, 0x00010000ff000100, 0x0001000000ff00ff, 0x0001000000ff0000, 0x0001000000ff0001, 0x0001000000ff0100, 0x000100000000ffff, 0x000100000000ff00, 0x00010000000000ff, 0x0001000000000000, 0x0001000000000001, 0x0001000000000100, 0x000100000001ff00, 0x00010000000100ff, 0x0001000000010000, 0x0001000000010001, 0x0001000000010100, 0x0001000001ff0001, 0x0001000001ff0100, 0x0001000001ff0101, 0x000100000100ff00, 0x0001000001000000, 0x0001000001000001, 0x0001000001000100, 0x0001000001000101, 0x000100000101ff01, 0x0001000001010000, 0x0001000001010001, 0x00010000010101ff, 0x00010001ffffff01, 0x00010001ffff0100, 0x00010001ff000000, 0x00010001ff01ffff, 0x00010001ff010001, 0x00010001ff0101ff, 0x00010001ff010100, 0x0001000100ffffff, 0x0001000100ff0000, 0x0001000100ff01ff, 0x0001000100ff0101, 0x000100010000ff00, 0x00010001000000ff, 0x0001000100000000, 0x0001000100000001, 0x00010001000001ff, 0x0001000100000101, 0x000100010001ffff, 0x0001000100010000, 0x00010001000101ff, 0x0001000101ffffff, 0x0001000101ffff01, 0x0001000101ff0000, 0x0001000101ff0101, 0x00010001010000ff, 0x0001000101000001, 0x00010001010001ff, 0x0001000101000100, 0x000100010101ffff, 0x00010001010100ff, 0x0001000101010001, 0x0001000101010101, 0x000101ffff000001, 0x000101ffff000100, 0x000101ffff010000, 0x000101ff00ffff00, 0x000101ff0000ff01, 0x000101ff00000000, 0x000101ff00000101, 0x000101ff0001ff00, 0x000101ff00010100, 0x000101ff01ff0000, 0x000101ff0100ff00, 0x000101ff010001ff, 0x000101ff01010001, 0x00010100ffffff00, 0x00010100ffff00ff, 0x00010100ff00ffff, 0x00010100ff000000, 0x00010100ff01ff00, 0x00010100ff0100ff, 0x00010100ff010001, 0x00010100ff010100, 0x0001010000ffffff, 0x0001010000ffff00, 0x0001010000ff0000, 0x0001010000ff0001, 0x0001010000ff01ff, 0x000101000000ff00, 0x00010100000000ff, 0x0001010000000000, 0x0001010000000001, 0x0001010000000100, 0x000101000001ffff, 0x0001010000010000, 0x0001010000010101, 0x0001010001ffff01, 0x0001010001ff00ff, 0x0001010001ff0101, 0x0001010001000000, 0x000101000101ff00, 0x00010100010100ff, 0x0001010001010000, 0x0001010001010100, 0x00010101ff00ff00, 0x00010101ff000001, 0x00010101ff0001ff, 0x0001010100ffff00, 0x0001010100ff00ff, 0x0001010100ff0100, 0x000101010000ffff, 0x0001010100000000, 0x00010101000001ff, 0x0001010100000101, 0x00010101000100ff, 0x0001010100010000, 0x0001010100010100, 0x0001010101ff0001, 0x00010101010000ff, 0x00010101010001ff, 0x0001010101000101, 0x0001010101010001, 0x01ffffffffffffff, 0x01ffffffffffff01, 0x01ffffffffff01ff, 0x01ffffffffff0101, 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff, 0x01ffffffff010101, 0x01ffffff00ff0000, 0x01ffffff0000ffff, 0x01ffffff0000ff00, 0x01ffffff000000ff, 0x01ffffff00000001, 0x01ffffff00000100, 0x01ffffff00010000, 0x01ffffff01ffffff, 0x01ffffff01ffff01, 0x01ffffff01ff01ff, 0x01ffffff01ff0101, 0x01ffffff01000000, 0x01ffffff0101ffff, 0x01ffffff0101ff01, 0x01ffffff010101ff, 0x01ffffff01010101, 0x01ffff00ffff0000, 0x01ffff00ff00ff00, 0x01ffff00ff0000ff, 0x01ffff00ff000001, 0x01ffff00ff000100, 0x01ffff00ff010000, 0x01ffff0000ffff00, 0x01ffff0000ff00ff, 0x01ffff0000ff0100, 0x01ffff000000ffff, 0x01ffff000000ff01, 0x01ffff0000000000, 0x01ffff0000000001, 0x01ffff00000001ff, 0x01ffff0000000100, 0x01ffff00000100ff, 0x01ffff0000010001, 0x01ffff0000010100, 0x01ffff0001ff0000, 0x01ffff0001ff0100, 0x01ffff00010000ff, 0x01ffff0001000001, 0x01ffff0001000100, 0x01ffff0001010000, 0x01ffff01ffffffff, 0x01ffff01ffffff01, 0x01ffff01ffff01ff, 0x01ffff01ffff0101, 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff01ff01, 0x01ffff01ff0101ff, 0x01ffff01ff010101, 0x01ffff010000ff00, 0x01ffff01000000ff, 0x01ffff0100000100, 0x01ffff0100010000, 0x01ffff0101ffffff, 0x01ffff0101ffff01, 0x01ffff0101ff01ff, 0x01ffff0101ff0101, 0x01ffff0101000000, 0x01ffff010101ffff, 0x01ffff010101ff01, 0x01ffff01010101ff, 0x01ffff0101010101, 0x01ff00ffff0000ff, 0x01ff00ffff000100, 0x01ff00ff00ffff00, 0x01ff00ff00ff00ff, 0x01ff00ff0000ff00, 0x01ff00ff00000000, 0x01ff00ff00000101, 0x01ff00ff0001ff00, 0x01ff00ff000100ff, 0x01ff00ff00010100, 0x01ff00ff010000ff, 0x01ff00ff01000100, 0x01ff0000ffffff00, 0x01ff0000ffff0100, 0x01ff0000ff00ff01, 0x01ff0000ff000000, 0x01ff0000ff000101, 0x01ff0000ff010001, 0x01ff0000ff010100, 0x01ff000000ffffff, 0x01ff000000ffff00, 0x01ff000000ff0000, 0x01ff000000ff01ff, 0x01ff00000000ff00, 0x01ff0000000000ff, 0x01ff000000000000, 0x01ff000000000001, 0x01ff000000000100, 0x01ff000000000101, 0x01ff000000010000, 0x01ff000000010001, 0x01ff0000000101ff, 0x01ff000000010101, 0x01ff000001ffff00, 0x01ff000001ff00ff, 0x01ff000001ff0001, 0x01ff000001ff0100, 0x01ff00000100ffff, 0x01ff00000100ff01, 0x01ff000001000000, 0x01ff0000010001ff, 0x01ff000001010001, 0x01ff0001ff00ff00, 0x01ff0001ff000001, 0x01ff0001ff000100, 0x01ff0001ff010000, 0x01ff000100ffff00, 0x01ff000100ff00ff, 0x01ff000100ff0100, 0x01ff000100ff0101, 0x01ff00010000ffff, 0x01ff000100000000, 0x01ff000100000100, 0x01ff000100000101, 0x01ff00010001ff00, 0x01ff000100010001, 0x01ff000100010101, 0x01ff000101ff0000, 0x01ff00010100ff00, 0x01ff000101000101, 0x01ff0001010100ff, 0x01ff01ffffffffff, 0x01ff01ffffffff01, 0x01ff01ffffff01ff, 0x01ff01ffffff0101, 0x01ff01ffff000000, 0x01ff01ffff01ffff, 0x01ff01ffff01ff01, 0x01ff01ffff0101ff, 0x01ff01ffff010101, 0x01ff01ff00ffff00, 0x01ff01ff00ff0000, 0x01ff01ff0000ff00, 0x01ff01ff000000ff, 0x01ff01ff00000100, 0x01ff01ff00010000, 0x01ff01ff00010100, 0x01ff01ff01ffffff, 0x01ff01ff01ffff01, 0x01ff01ff01ff01ff, 0x01ff01ff01ff0101, 0x01ff01ff01000000, 0x01ff01ff0101ffff, 0x01ff01ff0101ff01, 0x01ff01ff010101ff, 0x01ff01ff01010101, 0x01ff0100ffff0000, 0x01ff0100ffff0001, 0x01ff0100ff00ff00, 0x01ff0100ff0000ff, 0x01ff0100ff000001, 0x01ff0100ff010000, 0x01ff010000ffff00, 0x01ff010000ff00ff, 0x01ff010000ff0001, 0x01ff010000ff0100, 0x01ff01000000ffff, 0x01ff01000000ff01, 0x01ff010000000000, 0x01ff010000000101, 0x01ff01000001ff00, 0x01ff0100000100ff, 0x01ff010001ff0000, 0x01ff010001000001, 0x01ff010001000100, 0x01ff010001010000, 0x01ff0101ffffffff, 0x01ff0101ffffff01, 0x01ff0101ffff01ff, 0x01ff0101ffff0101, 0x01ff0101ff000000, 0x01ff0101ff01ffff, 0x01ff0101ff01ff01, 0x01ff0101ff0101ff, 0x01ff0101ff010101, 0x01ff010100ff0000, 0x01ff01010000ff00, 0x01ff0101000000ff, 0x01ff010100000001, 0x01ff010101ffffff, 0x01ff010101ffff01, 0x01ff010101ff01ff, 0x01ff010101ff0101, 0x01ff010101000000, 0x01ff01010101ffff, 0x01ff01010101ff01, 0x01ff0101010101ff, 0x01ff010101010101, 0x0100ffffffff0000, 0x0100ffffff00ff00, 0x0100ffffff000001, 0x0100ffffff0001ff, 0x0100ffffff000100, 0x0100ffffff010000, 0x0100ffff00ffff00, 0x0100ffff00ff0001, 0x0100ffff00ff0100, 0x0100ffff00000000, 0x0100ffff000001ff, 0x0100ffff00000101, 0x0100ffff00010100, 0x0100ffff00010101, 0x0100ffff01ff0000, 0x0100ffff0100ff00, 0x0100ffff010000ff, 0x0100ffff01000001, 0x0100ffff01000100, 0x0100ffff01010000, 0x0100ff00ffffff00, 0x0100ff00ffff00ff, 0x0100ff00ffff0001, 0x0100ff00ffff0100, 0x0100ff00ff00ffff, 0x0100ff00ff000000, 0x0100ff00ff0001ff, 0x0100ff00ff000101, 0x0100ff00ff01ff00, 0x0100ff00ff0100ff, 0x0100ff00ff010001, 0x0100ff00ff010100, 0x0100ff0000ffffff, 0x0100ff0000ff0000, 0x0100ff000000ffff, 0x0100ff000000ff00, 0x0100ff00000000ff, 0x0100ff0000000000, 0x0100ff0000000001, 0x0100ff0000000100, 0x0100ff000001ff01, 0x0100ff0000010000, 0x0100ff0001ff00ff, 0x0100ff0001ff0001, 0x0100ff000100ff01, 0x0100ff0001000000, 0x0100ff00010001ff, 0x0100ff000101ff00, 0x0100ff00010100ff, 0x0100ff0001010001, 0x0100ff0001010100, 0x0100ff01ffff0000, 0x0100ff01ff00ff00, 0x0100ff01ff0000ff, 0x0100ff01ff000100, 0x0100ff01ff010000, 0x0100ff0100ff00ff, 0x0100ff0100ff0001, 0x0100ff0100ff0100, 0x0100ff010000ffff, 0x0100ff010000ff01, 0x0100ff0100000000, 0x0100ff01000001ff, 0x0100ff0100010001, 0x0100ff0100010100, 0x0100ff0101ff0000, 0x0100ff01010000ff, 0x0100ff0101000001, 0x0100ff0101010100, 0x010000ffffffff00, 0x010000ffffff00ff, 0x010000ffffff0001, 0x010000ffff00ffff, 0x010000ffff000000, 0x010000ffff0001ff, 0x010000ffff010001, 0x010000ff00ffffff, 0x010000ff00ff0101, 0x010000ff0000ff00, 0x010000ff000000ff, 0x010000ff00000000, 0x010000ff00000001, 0x010000ff000001ff, 0x010000ff00000100, 0x010000ff0001ffff, 0x010000ff0001ff00, 0x010000ff0001ff01, 0x010000ff00010000, 0x010000ff01ff00ff, 0x010000ff01ff0001, 0x010000ff0100ff01, 0x010000ff010000ff, 0x010000ff01000000, 0x010000ff010001ff, 0x010000ff0101ff00, 0x010000ff01010100, 0x01000000ffffffff, 0x01000000ffff0000, 0x01000000ffff01ff, 0x01000000ffff0101, 0x01000000ff00ffff, 0x01000000ff00ff00, 0x01000000ff0000ff, 0x01000000ff000000, 0x01000000ff000001, 0x01000000ff000100, 0x01000000ff01ff00, 0x01000000ff010000, 0x01000000ff010100, 0x01000000ff010101, 0x0100000000ffff00, 0x0100000000ff00ff, 0x0100000000ff0000, 0x0100000000ff0001, 0x0100000000ff0100, 0x010000000000ffff, 0x010000000000ff00, 0x010000000000ff01, 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001, 0x01000000000001ff, 0x0100000000000100, 0x0100000000000101, 0x010000000001ff00, 0x01000000000100ff, 0x0100000000010000, 0x0100000000010001, 0x0100000000010100, 0x0100000001ffff00, 0x0100000001ff0000, 0x0100000001ff01ff, 0x010000000100ff00, 0x010000000100ff01, 0x01000000010000ff, 0x0100000001000000, 0x0100000001000001, 0x0100000001000100, 0x0100000001000101, 0x010000000101ffff, 0x010000000101ff01, 0x0100000001010000, 0x01000000010101ff, 0x0100000001010101, 0x01000001ffffff00, 0x01000001ffff00ff, 0x01000001ff00ffff, 0x01000001ff000000, 0x01000001ff000100, 0x01000001ff01ffff, 0x01000001ff010001, 0x01000001ff010100, 0x0100000100ff0000, 0x0100000100ff01ff, 0x0100000100ff0100, 0x010000010000ff00, 0x010000010000ff01, 0x0100000100000000, 0x0100000100000001, 0x0100000100000100, 0x0100000100010000, 0x01000001000101ff, 0x0100000101ffff01, 0x0100000101ff00ff, 0x0100000101ff0100, 0x0100000101ff0101, 0x010000010100ff01, 0x01000001010000ff, 0x0100000101000000, 0x01000001010100ff, 0x0100000101010001, 0x0100000101010100, 0x010001ffffff0000, 0x010001ffff000001, 0x010001ffff000100, 0x010001ffff010000, 0x010001ff00ffff00, 0x010001ff00ff0001, 0x010001ff0000ffff, 0x010001ff0000ff01, 0x010001ff00000000, 0x010001ff00000001, 0x010001ff00000101, 0x010001ff000100ff, 0x010001ff00010000, 0x010001ff01ff0000, 0x010001ff0100ff00, 0x010001ff01000001, 0x010001ff01000100, 0x010001ff01010000, 0x01000100ffff00ff, 0x01000100ffff0001, 0x01000100ffff0100, 0x01000100ff00ffff, 0x01000100ff00ff01, 0x01000100ff000000, 0x01000100ff0001ff, 0x01000100ff000101, 0x01000100ff01ffff, 0x01000100ff01ff00, 0x01000100ff0100ff, 0x01000100ff010001, 0x0100010000ffffff, 0x0100010000ffff01, 0x0100010000ff0000, 0x0100010000ff01ff, 0x0100010000ff0101, 0x010001000000ff00, 0x01000100000000ff, 0x0100010000000000, 0x0100010000000001, 0x0100010000000100, 0x010001000001ff01, 0x0100010000010000, 0x0100010000010001, 0x0100010000010101, 0x0100010001ffff00, 0x0100010001ff00ff, 0x010001000100ffff, 0x010001000100ff01, 0x0100010001000000, 0x0100010001000101, 0x010001000101ff00, 0x0100010001010001, 0x01000101ffff0000, 0x01000101ff000000, 0x01000101ff010000, 0x0100010100ff00ff, 0x0100010100ff0001, 0x0100010100ff0100, 0x010001010000ffff, 0x0100010100000000, 0x01000101000001ff, 0x010001010001ff00, 0x0100010101ff0000, 0x010001010100ff00, 0x01000101010000ff, 0x0100010101000000, 0x0100010101000001, 0x0101ffffffffffff, 0x0101ffffffffff01, 0x0101ffffffff01ff, 0x0101ffffffff0101, 0x0101ffffff000000, 0x0101ffffff01ffff, 0x0101ffffff01ff01, 0x0101ffffff0101ff, 0x0101ffffff010101, 0x0101ffff00ff0000, 0x0101ffff0000ff00, 0x0101ffff000000ff, 0x0101ffff00000001, 0x0101ffff00000100, 0x0101ffff01ffffff, 0x0101ffff01ffff01, 0x0101ffff01ff01ff, 0x0101ffff01ff0101, 0x0101ffff01000000, 0x0101ffff0101ffff, 0x0101ffff0101ff01, 0x0101ffff010101ff, 0x0101ffff01010101, 0x0101ff00ffff0000, 0x0101ff00ffff0100, 0x0101ff00ff00ff00, 0x0101ff00ff0000ff, 0x0101ff00ff000001, 0x0101ff00ff000100, 0x0101ff00ff000101, 0x0101ff0000ff0001, 0x0101ff0000ff0100, 0x0101ff000000ff00, 0x0101ff0000000000, 0x0101ff00000001ff, 0x0101ff0000000101, 0x0101ff000001ff00, 0x0101ff00000100ff, 0x0101ff0001ff0000, 0x0101ff000100ffff, 0x0101ff000100ff01, 0x0101ff0001000001, 0x0101ff0001000100, 0x0101ff01ffffff01, 0x0101ff01ffff01ff, 0x0101ff01ffff0101, 0x0101ff01ff00ffff, 0x0101ff01ff000100, 0x0101ff01ff01ff01, 0x0101ff01ff0101ff, 0x0101ff01ff010101, 0x0101ff0100ff0000, 0x0101ff010000ff00, 0x0101ff0100000001, 0x0101ff0100000100, 0x0101ff0100010000, 0x0101ff0101ffffff, 0x0101ff0101ffff01, 0x0101ff0101ff01ff, 0x0101ff0101ff0101, 0x0101ff0101000000, 0x0101ff010101ffff, 0x0101ff010101ff01, 0x0101ff01010101ff, 0x0101ff0101010101, 0x010100ffff000100, 0x010100ffff010000, 0x010100ff00ffff00, 0x010100ff00ff00ff, 0x010100ff0000ffff, 0x010100ff000000ff, 0x010100ff00000000, 0x010100ff000001ff, 0x010100ff00000101, 0x010100ff0001ff00, 0x010100ff00010000, 0x010100ff00010001, 0x010100ff000101ff, 0x010100ff00010100, 0x010100ff01ff0000, 0x01010000ffff0001, 0x01010000ffff0100, 0x01010000ff00ffff, 0x01010000ff00ff01, 0x01010000ff000000, 0x01010000ff0001ff, 0x01010000ff010001, 0x01010000ff010100, 0x0101000000ffff01, 0x0101000000ff0000, 0x010100000000ff00, 0x01010000000000ff, 0x0101000000000000, 0x0101000000000001, 0x0101000000000100, 0x0101000000010000, 0x0101000000010101, 0x0101000001ffff00, 0x0101000001ff00ff, 0x0101000001ff0000, 0x0101000001ff0001, 0x0101000001ff0100, 0x010100000100ff01, 0x0101000001000000, 0x01010000010001ff, 0x01010001ffff0000, 0x01010001ff00ff00, 0x01010001ff000001, 0x01010001ff000101, 0x01010001ff01ff00, 0x01010001ff010000, 0x0101000100ff00ff, 0x0101000100ff0001, 0x0101000100ff0101, 0x010100010000ff01, 0x0101000100000000, 0x0101000100000001, 0x01010001000001ff, 0x010100010001ffff, 0x010100010001ff01, 0x0101000101ff0001, 0x010100010100ffff, 0x0101000101000000, 0x0101000101000001, 0x0101000101000100, 0x010100010101ff00, 0x01010001010100ff, 0x0101000101010001, 0x010101ffffffffff, 0x010101ffffffff01, 0x010101ffffff01ff, 0x010101ffffff0101, 0x010101ffff01ffff, 0x010101ffff01ff01, 0x010101ffff0101ff, 0x010101ffff010101, 0x010101ff0000ff00, 0x010101ff000000ff, 0x010101ff00000001, 0x010101ff00000100, 0x010101ff01ffffff, 0x010101ff01ffff01, 0x010101ff01ff01ff, 0x010101ff01ff0101, 0x010101ff01000000, 0x010101ff0101ffff, 0x010101ff0101ff01, 0x010101ff010101ff, 0x010101ff01010101, 0x01010100ffff0000, 0x01010100ff0000ff, 0x01010100ff000100, 0x01010100ff01ff00, 0x01010100ff010000, 0x0101010000ffff00, 0x010101000000ffff, 0x0101010000000000, 0x0101010000000101, 0x010101000001ff00, 0x0101010000010001, 0x0101010000010100, 0x010101000100ffff, 0x0101010001000001, 0x01010101ffffffff, 0x01010101ffffff01, 0x01010101ffff01ff, 0x01010101ffff0101, 0x01010101ff01ffff, 0x01010101ff01ff01, 0x01010101ff0101ff, 0x01010101ff010101, 0x010101010000ff00, 0x01010101000000ff, 0x0101010100000001, 0x0101010101ffffff, 0x0101010101ffff01, 0x0101010101ff01ff, 0x0101010101ff0101, 0x0101010101000000, 0x010101010101ffff, 0x010101010101ff01, 0x01010101010101ff, 0x0101010101010101, GGML_TABLE_END() #else GGML_TABLE_BEGIN(uint32_t, iq1s_grid_gpu, NGRID_IQ1S) 0x00000000, 0x00000002, 0x00000101, 0x00000200, 0x00000202, 0x00010001, 0x00010101, 0x00020000, 0x00020002, 0x00020200, 0x00020202, 0x01000101, 0x01010001, 0x01010100, 0x01010102, 0x01020101, 0x02000000, 0x02000002, 0x02000200, 0x02000202, 0x02010101, 0x02020000, 0x02020002, 0x02020200, 0x02020202, 0x00000110, 0x00000111, 0x00010011, 0x00010110, 0x00010112, 0x00010211, 0x00010212, 0x00020111, 0x01000011, 0x01000112, 0x01000211, 0x01010012, 0x01010111, 0x01010212, 0x01020011, 0x01020110, 0x01020112, 0x01020210, 0x02000111, 0x02010011, 0x02010110, 0x02010112, 0x02020111, 0x00000020, 0x00000022, 0x00000220, 0x00000222, 0x00010121, 0x00020020, 0x00020022, 0x00020220, 0x00020222, 0x01000121, 0x01010021, 0x01010221, 0x01020120, 0x01020221, 0x02000020, 0x02000022, 0x02000220, 0x02000222, 0x02010021, 0x02010121, 0x02010221, 0x02020020, 0x02020022, 0x02020220, 0x02020222, 0x00011001, 0x00011100, 0x00011102, 0x00021101, 0x01001001, 0x01001201, 0x01011101, 0x01011202, 0x01021100, 0x01021101, 0x02011001, 0x02011201, 0x02021101, 0x00001011, 0x00001110, 0x00001111, 0x00001112, 0x00011111, 0x00011210, 0x00011212, 0x00021211, 0x01001010, 0x01001111, 0x01001212, 0x01011010, 0x01011011, 0x01011110, 0x01011111, 0x01011112, 0x01011211, 0x01021010, 0x01021012, 0x01021111, 0x01021210, 0x01021212, 0x02001011, 0x02011011, 0x02011111, 0x02011210, 0x02011212, 0x02021011, 0x02021110, 0x02021111, 0x02021112, 0x02021211, 0x00011120, 0x00011221, 0x01001021, 0x01001120, 0x01011020, 0x01011022, 0x01011121, 0x01011220, 0x01021020, 0x01021021, 0x01021122, 0x01021221, 0x02001121, 0x02011021, 0x02011120, 0x02011221, 0x00002000, 0x00002002, 0x00002200, 0x00002202, 0x00012101, 0x00022000, 0x00022002, 0x00022200, 0x00022202, 0x01002101, 0x01012001, 0x01012102, 0x01022101, 0x02002000, 0x02002002, 0x02002200, 0x02002202, 0x02012101, 0x02022000, 0x02022002, 0x02022200, 0x02022202, 0x00002111, 0x00012011, 0x00012110, 0x00012211, 0x00022110, 0x00022111, 0x01002011, 0x01012010, 0x01012011, 0x01012111, 0x01022011, 0x01022110, 0x01022211, 0x02012011, 0x02012110, 0x02012112, 0x02012211, 0x02022111, 0x00002020, 0x00002022, 0x00002220, 0x00002222, 0x00012121, 0x00022020, 0x00022022, 0x00022220, 0x00022222, 0x01002121, 0x01012021, 0x01012221, 0x01022021, 0x01022121, 0x02002020, 0x02002022, 0x02002121, 0x02002220, 0x02002222, 0x02012121, 0x02022020, 0x02022022, 0x02022220, 0x02022222, 0x00110000, 0x00110001, 0x00110100, 0x00110201, 0x00120100, 0x00120101, 0x01100001, 0x01100100, 0x01110000, 0x01110101, 0x01110200, 0x01120001, 0x01120100, 0x01120101, 0x01120201, 0x02110001, 0x02110100, 0x02110102, 0x02120001, 0x02120101, 0x00100011, 0x00100110, 0x00100112, 0x00100211, 0x00110010, 0x00110012, 0x00110111, 0x00110210, 0x00120011, 0x00120110, 0x00120211, 0x01100111, 0x01100212, 0x01110010, 0x01110011, 0x01110012, 0x01110110, 0x01110111, 0x01110112, 0x01110211, 0x01120010, 0x01120111, 0x02100110, 0x02110012, 0x02110111, 0x02120011, 0x02120110, 0x00110021, 0x00110120, 0x00110122, 0x00120121, 0x01100020, 0x01100122, 0x01100221, 0x01110022, 0x01110121, 0x01110220, 0x01110222, 0x01120120, 0x01120122, 0x02100121, 0x02110021, 0x02110120, 0x02110122, 0x02120121, 0x00101001, 0x00101102, 0x00101201, 0x00111100, 0x00111101, 0x00111200, 0x00111201, 0x00121001, 0x00121102, 0x01101001, 0x01101101, 0x01101102, 0x01101200, 0x01101202, 0x01111001, 0x01111100, 0x01111101, 0x01111102, 0x01111201, 0x01121002, 0x01121101, 0x01121200, 0x02101100, 0x02101201, 0x02111000, 0x02111100, 0x02111101, 0x02111200, 0x02111201, 0x02111202, 0x02121001, 0x02121100, 0x02121101, 0x02121201, 0x00101012, 0x00101111, 0x00101212, 0x00111011, 0x00111110, 0x00111111, 0x00111112, 0x00111211, 0x00121010, 0x00121012, 0x00121111, 0x00121210, 0x00121212, 0x01101011, 0x01101110, 0x01101111, 0x01101112, 0x01111011, 0x01111012, 0x01111110, 0x01111111, 0x01111112, 0x01111211, 0x01111212, 0x01121011, 0x01121110, 0x01121111, 0x01121112, 0x01121211, 0x02101010, 0x02101012, 0x02101110, 0x02101111, 0x02101210, 0x02101212, 0x02111010, 0x02111011, 0x02111110, 0x02111111, 0x02111112, 0x02111211, 0x02111212, 0x02121010, 0x02121012, 0x02121111, 0x00101021, 0x00101120, 0x00101121, 0x00101122, 0x00111121, 0x00111122, 0x00111220, 0x00111222, 0x00121021, 0x00121122, 0x01101020, 0x01101022, 0x01101120, 0x01101121, 0x01101220, 0x01101222, 0x01111021, 0x01111121, 0x01111122, 0x01111220, 0x01111221, 0x01121021, 0x01121120, 0x01121121, 0x01121220, 0x01121221, 0x01121222, 0x02101122, 0x02101222, 0x02111022, 0x02111121, 0x02121120, 0x02121221, 0x00112001, 0x00112102, 0x00122101, 0x01102001, 0x01102100, 0x01102102, 0x01102201, 0x01112000, 0x01112101, 0x01112200, 0x01112202, 0x01122000, 0x01122001, 0x01122100, 0x01122102, 0x01122201, 0x02102101, 0x02112001, 0x02112100, 0x02122101, 0x00112010, 0x00112012, 0x00112111, 0x00112212, 0x00122011, 0x00122111, 0x01102012, 0x01102110, 0x01102111, 0x01102210, 0x01112011, 0x01112110, 0x01112111, 0x01112112, 0x01112211, 0x01112212, 0x01122010, 0x01122111, 0x01122212, 0x02102211, 0x02112011, 0x02112012, 0x02112111, 0x02112210, 0x02122011, 0x02122112, 0x02122211, 0x00102221, 0x00112122, 0x00122120, 0x00122122, 0x01102120, 0x01102122, 0x01102221, 0x01112020, 0x01112022, 0x01112121, 0x01112220, 0x01122021, 0x01122122, 0x01122221, 0x02102121, 0x02112021, 0x02112122, 0x02112222, 0x00200000, 0x00200002, 0x00200200, 0x00200202, 0x00210101, 0x00220000, 0x00220002, 0x00220101, 0x00220200, 0x00220202, 0x01200101, 0x01210001, 0x01210201, 0x01220001, 0x01220101, 0x02200000, 0x02200002, 0x02200200, 0x02200202, 0x02210101, 0x02220000, 0x02220002, 0x02220101, 0x02220200, 0x02220202, 0x00200111, 0x00210011, 0x00210110, 0x00210211, 0x00220111, 0x01200012, 0x01200110, 0x01200211, 0x01210111, 0x01210210, 0x01210212, 0x01220011, 0x01220110, 0x01220111, 0x01220112, 0x02200111, 0x02210010, 0x02210112, 0x02210211, 0x02220111, 0x00200021, 0x00200220, 0x00200222, 0x00210021, 0x00210121, 0x00220020, 0x00220022, 0x00220220, 0x00220222, 0x01200121, 0x01210021, 0x01210122, 0x01210221, 0x01220121, 0x02200021, 0x02200220, 0x02200222, 0x02210021, 0x02210121, 0x02220020, 0x02220022, 0x02220220, 0x02220222, 0x00201101, 0x00211100, 0x00211102, 0x00211201, 0x00221101, 0x01201100, 0x01201101, 0x01201102, 0x01201201, 0x01211002, 0x01211101, 0x01211200, 0x01211202, 0x01221102, 0x02201101, 0x02211001, 0x02211100, 0x02211201, 0x02221001, 0x02221101, 0x00201211, 0x00211111, 0x00221011, 0x00221211, 0x01201010, 0x01201111, 0x01201210, 0x01211011, 0x01211110, 0x01211111, 0x01211211, 0x01221012, 0x01221111, 0x01221210, 0x02201211, 0x02211010, 0x02211110, 0x02211111, 0x02211210, 0x02211212, 0x02221011, 0x02221110, 0x02221112, 0x02221211, 0x00201121, 0x00211020, 0x00211022, 0x00211221, 0x00221121, 0x01201021, 0x01201221, 0x01211121, 0x01221020, 0x01221021, 0x01221221, 0x02201120, 0x02201122, 0x02211020, 0x02211222, 0x00202000, 0x00202002, 0x00202200, 0x00202202, 0x00212101, 0x00222000, 0x00222002, 0x00222200, 0x00222202, 0x01202101, 0x01212001, 0x01212100, 0x01222101, 0x02202000, 0x02202002, 0x02202200, 0x02202202, 0x02222000, 0x02222002, 0x02222200, 0x02222202, 0x00202211, 0x00212011, 0x00212110, 0x00212211, 0x00222111, 0x01202112, 0x01202211, 0x01212012, 0x01212111, 0x01222011, 0x01222110, 0x01222112, 0x01222211, 0x02202111, 0x02212010, 0x02212112, 0x02212211, 0x02222110, 0x02222111, 0x00202020, 0x00202022, 0x00202220, 0x00202222, 0x00222020, 0x00222022, 0x00222220, 0x00222222, 0x01202121, 0x01212021, 0x01212122, 0x01212221, 0x01222121, 0x02202020, 0x02202022, 0x02202220, 0x02202222, 0x02212121, 0x02222020, 0x02222022, 0x02222220, 0x02222222, 0x10000101, 0x10010001, 0x10010102, 0x10020101, 0x11000201, 0x11010002, 0x11010101, 0x11010200, 0x11010202, 0x11020001, 0x11020100, 0x11020102, 0x12010100, 0x12010201, 0x12020001, 0x12020102, 0x10000010, 0x10000011, 0x10000110, 0x10000112, 0x10000211, 0x10010012, 0x10010111, 0x10010112, 0x10010210, 0x10010212, 0x10020011, 0x10020112, 0x10020211, 0x11000111, 0x11000210, 0x11000212, 0x11010011, 0x11010110, 0x11010111, 0x11010112, 0x11010211, 0x11010212, 0x11020111, 0x11020210, 0x11020212, 0x12000011, 0x12000110, 0x12000112, 0x12010010, 0x12010012, 0x12010111, 0x12020010, 0x12020011, 0x12020012, 0x10000121, 0x10010021, 0x10010120, 0x10010122, 0x10020121, 0x11000021, 0x11010022, 0x11010121, 0x11010222, 0x11020120, 0x11020221, 0x12000221, 0x12010120, 0x12020121, 0x10001001, 0x10011101, 0x10011201, 0x10021201, 0x11001101, 0x11001200, 0x11001202, 0x11011001, 0x11011100, 0x11011101, 0x11011102, 0x11021001, 0x11021002, 0x11021101, 0x11021200, 0x11021202, 0x12001001, 0x12001102, 0x12001201, 0x12011000, 0x12011002, 0x12011101, 0x12021000, 0x12021001, 0x12021201, 0x10001011, 0x10001012, 0x10001111, 0x10001212, 0x10011011, 0x10011110, 0x10011111, 0x10011112, 0x10011211, 0x10021010, 0x10021111, 0x10021212, 0x11001011, 0x11001110, 0x11001111, 0x11001112, 0x11001211, 0x11011010, 0x11011011, 0x11011110, 0x11011111, 0x11011112, 0x11011210, 0x11011211, 0x11021011, 0x11021110, 0x11021111, 0x11021112, 0x11021211, 0x12001012, 0x12001110, 0x12001111, 0x12001210, 0x12011011, 0x12011110, 0x12011111, 0x12011112, 0x12011211, 0x12011212, 0x12021111, 0x12021210, 0x12021212, 0x10001021, 0x10001121, 0x10001221, 0x10011120, 0x10011121, 0x10011220, 0x10011222, 0x10021021, 0x10021120, 0x10021221, 0x11001020, 0x11001022, 0x11001121, 0x11001220, 0x11011020, 0x11011021, 0x11011022, 0x11011121, 0x11011122, 0x11011221, 0x11021022, 0x11021121, 0x11021220, 0x12001021, 0x12001121, 0x12001222, 0x12011120, 0x12011121, 0x12021021, 0x12021120, 0x12021122, 0x10002101, 0x10012001, 0x10012101, 0x10012202, 0x10022101, 0x11002002, 0x11002201, 0x11012000, 0x11012101, 0x11012200, 0x11022001, 0x11022100, 0x11022102, 0x11022201, 0x12002101, 0x12012001, 0x12012100, 0x12012102, 0x12012201, 0x12022101, 0x10002011, 0x10002111, 0x10002112, 0x10002212, 0x10012010, 0x10012110, 0x10012111, 0x10012210, 0x10022011, 0x10022110, 0x10022112, 0x11002010, 0x11002111, 0x11002212, 0x11012011, 0x11012012, 0x11012110, 0x11012111, 0x11012112, 0x11012211, 0x11022010, 0x11022012, 0x11022111, 0x11022112, 0x11022212, 0x12002112, 0x12002211, 0x12012012, 0x12012111, 0x12012112, 0x12012210, 0x12022011, 0x12022110, 0x12022112, 0x12022211, 0x10012122, 0x11002120, 0x11002122, 0x11002221, 0x11012121, 0x11012220, 0x11012222, 0x11022120, 0x11022221, 0x12012120, 0x12022121, 0x10100001, 0x10100100, 0x10100101, 0x10100102, 0x10100201, 0x10110002, 0x10110101, 0x10110202, 0x10120001, 0x10120100, 0x10120201, 0x11100000, 0x11100101, 0x11100200, 0x11110001, 0x11110100, 0x11110101, 0x11110102, 0x11110201, 0x11120101, 0x11120200, 0x12100102, 0x12100201, 0x12110101, 0x12110200, 0x12120000, 0x12120001, 0x12120102, 0x12120201, 0x10100111, 0x10100210, 0x10100211, 0x10100212, 0x10110011, 0x10110110, 0x10110111, 0x10110112, 0x10110210, 0x10110211, 0x10120010, 0x10120111, 0x10120112, 0x10120210, 0x10120212, 0x11100011, 0x11100110, 0x11100111, 0x11100112, 0x11100211, 0x11110010, 0x11110011, 0x11110012, 0x11110110, 0x11110111, 0x11110112, 0x11110210, 0x11110211, 0x11110212, 0x11120011, 0x11120110, 0x11120111, 0x11120112, 0x11120211, 0x12100012, 0x12100111, 0x12110011, 0x12110110, 0x12110111, 0x12110112, 0x12110211, 0x12120010, 0x12120111, 0x12120212, 0x10100021, 0x10100122, 0x10110022, 0x10110121, 0x10110222, 0x10120021, 0x10120120, 0x11100022, 0x11100121, 0x11100222, 0x11110021, 0x11110120, 0x11110121, 0x11110122, 0x11110221, 0x11120022, 0x11120121, 0x12100121, 0x12110020, 0x12110022, 0x12110121, 0x12110221, 0x12110222, 0x12120120, 0x10101100, 0x10101101, 0x10111001, 0x10111100, 0x10111101, 0x10111102, 0x10111200, 0x10111201, 0x10121001, 0x10121101, 0x10121200, 0x10121202, 0x11101001, 0x11101100, 0x11101101, 0x11101102, 0x11101201, 0x11101202, 0x11111000, 0x11111001, 0x11111100, 0x11111101, 0x11111102, 0x11111200, 0x11111201, 0x11111202, 0x11121001, 0x11121002, 0x11121100, 0x11121101, 0x11121102, 0x11121201, 0x12101000, 0x12101200, 0x12101202, 0x12111001, 0x12111100, 0x12111101, 0x12111102, 0x12111201, 0x12121001, 0x12121100, 0x12121101, 0x12121202, 0x10101011, 0x10101012, 0x10101110, 0x10101111, 0x10101112, 0x10101211, 0x10111010, 0x10111011, 0x10111012, 0x10111110, 0x10111111, 0x10111112, 0x10111211, 0x10111212, 0x10121011, 0x10121110, 0x10121111, 0x10121112, 0x10121211, 0x11101010, 0x11101011, 0x11101012, 0x11101110, 0x11101111, 0x11101112, 0x11101210, 0x11101211, 0x11111010, 0x11111011, 0x11111012, 0x11111110, 0x11111111, 0x11111112, 0x11111210, 0x11111211, 0x11111212, 0x11121010, 0x11121011, 0x11121110, 0x11121111, 0x11121112, 0x11121210, 0x11121211, 0x11121212, 0x12101011, 0x12101110, 0x12101111, 0x12101211, 0x12101212, 0x12111010, 0x12111011, 0x12111110, 0x12111111, 0x12111112, 0x12111210, 0x12111211, 0x12121011, 0x12121110, 0x12121111, 0x12121112, 0x12121211, 0x10101020, 0x10101021, 0x10101022, 0x10101120, 0x10101122, 0x10101220, 0x10101221, 0x10111021, 0x10111120, 0x10111121, 0x10111220, 0x10111221, 0x10121020, 0x10121021, 0x10121022, 0x10121120, 0x10121121, 0x10121122, 0x10121220, 0x10121221, 0x11101021, 0x11101121, 0x11101122, 0x11101220, 0x11101221, 0x11101222, 0x11111020, 0x11111021, 0x11111022, 0x11111120, 0x11111121, 0x11111122, 0x11111220, 0x11111221, 0x11111222, 0x11121021, 0x11121120, 0x11121121, 0x11121221, 0x12101022, 0x12101121, 0x12101122, 0x12101220, 0x12101221, 0x12101222, 0x12111021, 0x12111121, 0x12111222, 0x12121022, 0x12121121, 0x12121122, 0x12121220, 0x12121221, 0x10102100, 0x10102101, 0x10102102, 0x10102201, 0x10112000, 0x10112101, 0x10112200, 0x10122001, 0x10122202, 0x11102101, 0x11102200, 0x11102202, 0x11112001, 0x11112100, 0x11112101, 0x11112102, 0x11112200, 0x11112201, 0x11122000, 0x11122002, 0x11122100, 0x11122101, 0x12102002, 0x12102201, 0x12112000, 0x12112002, 0x12112101, 0x12112200, 0x12122001, 0x12122201, 0x10102011, 0x10102012, 0x10102111, 0x10102212, 0x10112011, 0x10112110, 0x10112111, 0x10112112, 0x10112211, 0x10122111, 0x11102011, 0x11102110, 0x11102111, 0x11102112, 0x11102211, 0x11112010, 0x11112011, 0x11112012, 0x11112110, 0x11112111, 0x11112112, 0x11112210, 0x11112211, 0x11112212, 0x11122011, 0x11122110, 0x11122111, 0x11122112, 0x11122211, 0x12102011, 0x12102111, 0x12102211, 0x12112011, 0x12112110, 0x12112111, 0x12112112, 0x12112210, 0x12112211, 0x12122111, 0x10102120, 0x10102220, 0x10112121, 0x10112222, 0x10122020, 0x10122121, 0x10122122, 0x10122221, 0x11102121, 0x11102220, 0x11102221, 0x11112021, 0x11112121, 0x11112122, 0x11112220, 0x11112221, 0x11122022, 0x11122121, 0x11122220, 0x11122222, 0x12102021, 0x12102222, 0x12112022, 0x12112121, 0x12112122, 0x12112220, 0x12112222, 0x12122021, 0x10200101, 0x10210100, 0x10210102, 0x10210201, 0x10220101, 0x11200100, 0x11210000, 0x11210101, 0x11210102, 0x11210200, 0x11210202, 0x11220001, 0x11220100, 0x11220102, 0x11220201, 0x12200001, 0x12210102, 0x12220101, 0x10200011, 0x10200110, 0x10200112, 0x10200211, 0x10210012, 0x10210111, 0x10220011, 0x10220012, 0x10220112, 0x10220211, 0x11200111, 0x11200211, 0x11210011, 0x11210111, 0x11210112, 0x11210211, 0x11220111, 0x11220112, 0x11220212, 0x12200110, 0x12200212, 0x12210012, 0x12210111, 0x12220011, 0x12220112, 0x12220211, 0x10210021, 0x10210122, 0x10210221, 0x11200020, 0x11200021, 0x11200122, 0x11210121, 0x11210122, 0x11210220, 0x11220020, 0x12200121, 0x12210021, 0x12210122, 0x12220121, 0x10211001, 0x10211002, 0x10211101, 0x10211102, 0x10211202, 0x10221001, 0x10221102, 0x10221201, 0x11201000, 0x11201002, 0x11201101, 0x11201200, 0x11201202, 0x11211001, 0x11211100, 0x11211101, 0x11211102, 0x11211201, 0x11211202, 0x11221000, 0x11221002, 0x11221101, 0x12201100, 0x12201101, 0x12201201, 0x12211000, 0x12211002, 0x12211100, 0x12211101, 0x12211102, 0x12211200, 0x12211202, 0x12221001, 0x12221100, 0x12221201, 0x10201111, 0x10201210, 0x10201212, 0x10211011, 0x10211111, 0x10211112, 0x10211211, 0x11201110, 0x11201111, 0x11201112, 0x11201211, 0x11211010, 0x11211011, 0x11211110, 0x11211111, 0x11211112, 0x11211211, 0x11221011, 0x11221110, 0x11221111, 0x11221112, 0x11221211, 0x12201112, 0x12201211, 0x12201212, 0x12211011, 0x12211111, 0x12211112, 0x12211211, 0x12211212, 0x12221012, 0x12221111, 0x12221112, 0x12221210, 0x10201022, 0x10201221, 0x10211121, 0x10221020, 0x10221122, 0x10221220, 0x10221221, 0x11201020, 0x11201121, 0x11201220, 0x11201222, 0x11211021, 0x11211120, 0x11211121, 0x11211122, 0x11211220, 0x11211222, 0x11221020, 0x11221121, 0x11221220, 0x12201020, 0x12201022, 0x12201121, 0x12201222, 0x12211120, 0x12211122, 0x12211220, 0x12211221, 0x12221020, 0x12221120, 0x12221122, 0x12221222, 0x10212102, 0x10212201, 0x10222101, 0x11202001, 0x11212002, 0x11212101, 0x11212202, 0x11222001, 0x11222201, 0x12202101, 0x12212001, 0x12212200, 0x12222102, 0x10202011, 0x10202110, 0x10212010, 0x10212111, 0x10222011, 0x10222110, 0x10222112, 0x10222211, 0x11202010, 0x11202011, 0x11202111, 0x11202112, 0x11202210, 0x11212011, 0x11212110, 0x11212111, 0x11212112, 0x11212211, 0x11222010, 0x11222111, 0x11222212, 0x12202012, 0x12202110, 0x12202212, 0x12212111, 0x12222011, 0x12222110, 0x12222111, 0x12222211, 0x10212021, 0x10212122, 0x10212220, 0x11202021, 0x11202120, 0x11202221, 0x11212020, 0x11212121, 0x11212220, 0x11212222, 0x11222120, 0x11222121, 0x11222221, 0x12202122, 0x12212120, 0x12212220, 0x12212222, 0x12222122, 0x20000000, 0x20000002, 0x20000200, 0x20000202, 0x20020000, 0x20020002, 0x20020200, 0x20020202, 0x21000101, 0x21010000, 0x21010001, 0x21010100, 0x21010102, 0x21010201, 0x21020101, 0x22000000, 0x22000002, 0x22000200, 0x22000202, 0x22010101, 0x22020000, 0x22020002, 0x22020200, 0x22020202, 0x20000111, 0x20010011, 0x20010110, 0x20010112, 0x20010211, 0x20020111, 0x21000011, 0x21000110, 0x21000211, 0x21010010, 0x21010012, 0x21010111, 0x21010112, 0x21010210, 0x21010211, 0x21020110, 0x21020112, 0x21020211, 0x22000111, 0x22000211, 0x22010110, 0x22010112, 0x22010211, 0x22020111, 0x20000020, 0x20000022, 0x20000220, 0x20000222, 0x20010121, 0x20020020, 0x20020022, 0x20020220, 0x20020222, 0x21010021, 0x21010120, 0x21010221, 0x21020121, 0x22000020, 0x22000022, 0x22000220, 0x22000222, 0x22010121, 0x22020020, 0x22020022, 0x22020220, 0x22020222, 0x20011100, 0x20011201, 0x21001001, 0x21001100, 0x21011001, 0x21011101, 0x21011202, 0x21021001, 0x21021100, 0x21021201, 0x22011100, 0x22011201, 0x20001011, 0x20001211, 0x20011012, 0x20011111, 0x20011212, 0x20021112, 0x20021211, 0x21001010, 0x21001011, 0x21001111, 0x21001210, 0x21011011, 0x21011110, 0x21011111, 0x21011112, 0x21011211, 0x21011212, 0x21021111, 0x21021112, 0x21021210, 0x21021212, 0x22001011, 0x22001110, 0x22001112, 0x22001211, 0x22011010, 0x22011012, 0x22011111, 0x22011210, 0x22021112, 0x20011021, 0x20011122, 0x20011221, 0x20021121, 0x21001021, 0x21001120, 0x21001221, 0x21001222, 0x21011020, 0x21011121, 0x21011221, 0x21011222, 0x21021021, 0x21021122, 0x21021222, 0x22001121, 0x22011021, 0x22011222, 0x22021120, 0x20002000, 0x20002002, 0x20002200, 0x20002202, 0x20012101, 0x20022000, 0x20022002, 0x20022200, 0x20022202, 0x21002001, 0x21002101, 0x21012001, 0x21012100, 0x21012201, 0x21022101, 0x21022201, 0x22002000, 0x22002002, 0x22002200, 0x22002202, 0x22012101, 0x22022000, 0x22022002, 0x22022200, 0x22022202, 0x20002111, 0x20002112, 0x20012011, 0x20012110, 0x20012112, 0x20022111, 0x21002011, 0x21002110, 0x21002112, 0x21002211, 0x21012010, 0x21012012, 0x21012111, 0x21012212, 0x21022011, 0x21022110, 0x22002111, 0x22012112, 0x22012211, 0x22022111, 0x20002020, 0x20002022, 0x20002220, 0x20002222, 0x20012121, 0x20022020, 0x20022022, 0x20022220, 0x20022222, 0x21002121, 0x21012021, 0x21012120, 0x21012122, 0x22002020, 0x22002022, 0x22002220, 0x22002222, 0x22012121, 0x22022020, 0x22022022, 0x22022220, 0x22022222, 0x20100101, 0x20110001, 0x20110102, 0x20110200, 0x20110201, 0x20120101, 0x21100001, 0x21100102, 0x21100201, 0x21110101, 0x21110200, 0x21110202, 0x21120201, 0x21120202, 0x22100101, 0x22110001, 0x22110100, 0x22110102, 0x22110201, 0x22120101, 0x20100011, 0x20100110, 0x20100112, 0x20100211, 0x20110010, 0x20110111, 0x20110210, 0x20110212, 0x20120011, 0x20120110, 0x20120112, 0x20120211, 0x21100010, 0x21100111, 0x21110010, 0x21110011, 0x21110110, 0x21110111, 0x21110112, 0x21110211, 0x21120012, 0x21120111, 0x22100110, 0x22100112, 0x22110012, 0x22110111, 0x22110210, 0x22120011, 0x22120110, 0x22120112, 0x22120211, 0x20100121, 0x20110021, 0x20110120, 0x20110221, 0x20120121, 0x21100120, 0x21100122, 0x21100221, 0x21110020, 0x21110022, 0x21110121, 0x21110220, 0x21120122, 0x21120221, 0x22100121, 0x22110120, 0x22110122, 0x22120221, 0x20101001, 0x20101100, 0x20101102, 0x20111000, 0x20111101, 0x20111200, 0x20121102, 0x21101000, 0x21101202, 0x21111001, 0x21111100, 0x21111101, 0x21111102, 0x21111200, 0x21111201, 0x21121000, 0x21121001, 0x21121002, 0x21121101, 0x22101100, 0x22101102, 0x22111002, 0x22111100, 0x22111101, 0x22111200, 0x22121001, 0x22121201, 0x20101010, 0x20101111, 0x20101210, 0x20101212, 0x20111010, 0x20111011, 0x20111110, 0x20111111, 0x20111112, 0x20111211, 0x20121011, 0x20121111, 0x20121211, 0x20121212, 0x21101011, 0x21101110, 0x21101111, 0x21101112, 0x21101211, 0x21111010, 0x21111011, 0x21111012, 0x21111110, 0x21111111, 0x21111112, 0x21111210, 0x21111211, 0x21111212, 0x21121011, 0x21121110, 0x21121111, 0x21121112, 0x21121211, 0x22101011, 0x22101111, 0x22101210, 0x22111011, 0x22111012, 0x22111110, 0x22111111, 0x22111112, 0x22111211, 0x22111212, 0x22121010, 0x22121012, 0x22121111, 0x22121210, 0x22121212, 0x20101021, 0x20101120, 0x20111020, 0x20111121, 0x20111221, 0x20121020, 0x20121122, 0x20121221, 0x21101121, 0x21101220, 0x21101221, 0x21111021, 0x21111022, 0x21111121, 0x21111122, 0x21111221, 0x21121121, 0x21121220, 0x22101022, 0x22101120, 0x22101221, 0x22101222, 0x22111022, 0x22111120, 0x22111121, 0x22121120, 0x22121122, 0x22121221, 0x20102101, 0x20112102, 0x20112201, 0x20122101, 0x21102001, 0x21102102, 0x21112000, 0x21112002, 0x21112101, 0x21112102, 0x21112202, 0x21122100, 0x21122101, 0x22102101, 0x22112001, 0x22112102, 0x22112201, 0x22122101, 0x20102110, 0x20102112, 0x20102211, 0x20112010, 0x20112012, 0x20112111, 0x20112210, 0x20112212, 0x20122010, 0x20122011, 0x20122110, 0x20122112, 0x21102010, 0x21102012, 0x21102111, 0x21102210, 0x21102212, 0x21112011, 0x21112110, 0x21112111, 0x21112112, 0x21112211, 0x21122012, 0x21122111, 0x21122112, 0x21122212, 0x22102011, 0x22102110, 0x22112010, 0x22112012, 0x22112111, 0x22112212, 0x22122011, 0x22122112, 0x20102121, 0x20112121, 0x20122121, 0x21102120, 0x21102122, 0x21102221, 0x21112020, 0x21112121, 0x21112220, 0x21122021, 0x22102121, 0x22112021, 0x22112120, 0x22112121, 0x22112122, 0x20200000, 0x20200002, 0x20200200, 0x20200202, 0x20210101, 0x20220000, 0x20220002, 0x20220200, 0x20220202, 0x21200101, 0x21210001, 0x21210100, 0x21210102, 0x21210201, 0x22200000, 0x22200002, 0x22200200, 0x22200202, 0x22210101, 0x22220000, 0x22220002, 0x22220200, 0x22220202, 0x20200111, 0x20200211, 0x20210011, 0x20210110, 0x20210112, 0x20210211, 0x20210212, 0x21200112, 0x21200211, 0x21210011, 0x21210111, 0x21210210, 0x21210212, 0x21220011, 0x21220110, 0x22200111, 0x22210010, 0x22210012, 0x22210112, 0x22210211, 0x20200022, 0x20200220, 0x20200222, 0x20210020, 0x20210221, 0x20220022, 0x20220220, 0x20220222, 0x21200121, 0x21210021, 0x21210122, 0x21210221, 0x21220121, 0x22200020, 0x22200022, 0x22200220, 0x22200222, 0x22210121, 0x22220020, 0x22220022, 0x22220220, 0x22220222, 0x20211201, 0x20221101, 0x21201001, 0x21201100, 0x21211000, 0x21211100, 0x21211101, 0x21211200, 0x21211202, 0x21221001, 0x21221101, 0x21221102, 0x21221200, 0x21221201, 0x22201101, 0x20201112, 0x20201211, 0x20211010, 0x20211012, 0x20211111, 0x20211210, 0x20221112, 0x20221211, 0x21201012, 0x21201111, 0x21211011, 0x21211110, 0x21211111, 0x21211112, 0x21211211, 0x21221111, 0x21221212, 0x22201011, 0x22201110, 0x22201111, 0x22201112, 0x22201211, 0x22211012, 0x22211111, 0x22211210, 0x20201121, 0x20211021, 0x20211122, 0x20211222, 0x20221021, 0x20221121, 0x21201120, 0x21201122, 0x21201222, 0x21211022, 0x21211121, 0x21211122, 0x21211220, 0x21221020, 0x21221022, 0x22201122, 0x22211020, 0x22211121, 0x22211122, 0x22211221, 0x22221021, 0x22221120, 0x22221122, 0x20202000, 0x20202002, 0x20202200, 0x20202202, 0x20222000, 0x20222002, 0x20222200, 0x20222202, 0x21212001, 0x21212100, 0x21212102, 0x21212201, 0x22202000, 0x22202002, 0x22202200, 0x22202202, 0x22212101, 0x22222000, 0x22222002, 0x22222200, 0x22222202, 0x20202111, 0x20212110, 0x20212211, 0x20222011, 0x20222111, 0x21202011, 0x21212010, 0x21212111, 0x21212212, 0x21222011, 0x21222112, 0x21222211, 0x22212010, 0x22212112, 0x20202020, 0x20202022, 0x20202220, 0x20202222, 0x20222020, 0x20222022, 0x20222220, 0x20222222, 0x21212021, 0x21212120, 0x21212122, 0x22202020, 0x22202022, 0x22202220, 0x22202222, 0x22212121, 0x22222020, 0x22222022, 0x22222220, 0x22222222, GGML_TABLE_END() #endif #endif // GGML_COMMON_IMPL #endif // GGML_COMMON_IMPL ggml-org-ggml-3678254/src/ggml-cpu/000077500000000000000000000000001512524704700166665ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/CMakeLists.txt000066400000000000000000000772131512524704700214400ustar00rootroot00000000000000function(ggml_add_cpu_backend_features cpu_name arch) # The feature detection code is compiled as a separate target so that # it can be built without the architecture flags # Since multiple variants of the CPU backend may be included in the same # build, using set_source_files_properties() to set the arch flags is not possible set(GGML_CPU_FEATS_NAME ${cpu_name}-feats) add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/arch/${arch}/cpu-feats.cpp) target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . ../include) target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARGN}) target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(${cpu_name} PRIVATE ${GGML_CPU_FEATS_NAME}) endfunction() function(ggml_add_cpu_backend_variant_impl tag_name) if (tag_name) set(GGML_CPU_NAME ggml-cpu-${tag_name}) else() set(GGML_CPU_NAME ggml-cpu) endif() ggml_add_backend_library(${GGML_CPU_NAME}) list (APPEND GGML_CPU_SOURCES ggml-cpu/ggml-cpu.c ggml-cpu/ggml-cpu.cpp ggml-cpu/repack.cpp ggml-cpu/repack.h ggml-cpu/hbm.cpp ggml-cpu/hbm.h ggml-cpu/quants.c ggml-cpu/quants.h ggml-cpu/traits.cpp ggml-cpu/traits.h ggml-cpu/amx/amx.cpp ggml-cpu/amx/amx.h ggml-cpu/amx/mmq.cpp ggml-cpu/amx/mmq.h ggml-cpu/ggml-cpu-impl.h ggml-cpu/common.h ggml-cpu/binary-ops.h ggml-cpu/binary-ops.cpp ggml-cpu/unary-ops.h ggml-cpu/unary-ops.cpp ggml-cpu/simd-mappings.h ggml-cpu/vec.h ggml-cpu/vec.cpp ggml-cpu/ops.h ggml-cpu/ops.cpp ) target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17) target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu) if (APPLE AND GGML_ACCELERATE) find_library(ACCELERATE_FRAMEWORK Accelerate) if (ACCELERATE_FRAMEWORK) message(STATUS "Accelerate framework found") target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_ACCELERATE) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_NEW_LAPACK) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_LAPACK_ILP64) target_link_libraries(${GGML_CPU_NAME} PRIVATE ${ACCELERATE_FRAMEWORK}) else() message(WARNING "Accelerate framework not found") endif() endif() if (GGML_OPENMP) find_package(OpenMP) if (OpenMP_FOUND) set(GGML_OPENMP_ENABLED "ON" CACHE INTERNAL "") target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_OPENMP) target_link_libraries(${GGML_CPU_NAME} PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) else() set(GGML_OPENMP_ENABLED "OFF" CACHE INTERNAL "") message(WARNING "OpenMP not found") endif() endif() if (GGML_LLAMAFILE) target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_LLAMAFILE) list(APPEND GGML_CPU_SOURCES ggml-cpu/llamafile/sgemm.cpp ggml-cpu/llamafile/sgemm.h) endif() if (GGML_CPU_HBM) find_library(memkind memkind REQUIRED) message(STATUS "Using memkind for CPU HBM") target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_HBM) target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) endif() if (GGML_SYSTEM_ARCH STREQUAL "ARM") message(STATUS "ARM detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/arm/quants.c ggml-cpu/arch/arm/repack.cpp ) if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") message(FATAL_ERROR "MSVC is not supported for ARM, use clang") else() check_cxx_compiler_flag(-mfp16-format=ieee GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E) if (NOT "${GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") list(APPEND ARCH_FLAGS -mfp16-format=ieee) endif() if (GGML_NATIVE) # -mcpu=native does not always enable all the features in some compilers, # so we check for them manually and enable them if available execute_process( COMMAND ${CMAKE_C_COMPILER} -mcpu=native -E -v - INPUT_FILE "/dev/null" OUTPUT_QUIET ERROR_VARIABLE ARM_MCPU RESULT_VARIABLE ARM_MCPU_RESULT ) if (NOT ARM_MCPU_RESULT) string(REGEX MATCH "-mcpu=[^ ']+" ARM_MCPU_FLAG "${ARM_MCPU}") string(REGEX MATCH "-march=[^ ']+" ARM_MARCH_FLAG "${ARM_MCPU}") # on some old GCC we need to read -march= if (ARM_MARCH_FLAG AND NOT "${ARM_MARCH_FLAG}" STREQUAL "-march=native") set(ARM_NATIVE_FLAG "${ARM_MARCH_FLAG}") elseif(ARM_MCPU_FLAG AND NOT "${ARM_MCPU_FLAG}" STREQUAL "-mcpu=native") set(ARM_NATIVE_FLAG "${ARM_MCPU_FLAG}") endif() endif() if ("${ARM_NATIVE_FLAG}" STREQUAL "") set(ARM_NATIVE_FLAG -mcpu=native) message(WARNING "ARM -march/-mcpu not found, -mcpu=native will be used") else() message(STATUS "ARM detected flags: ${ARM_NATIVE_FLAG}") endif() include(CheckCXXSourceRuns) macro(check_arm_feature tag feature code) set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) set(CMAKE_REQUIRED_FLAGS "${ARM_NATIVE_FLAG}+${tag}") check_cxx_source_runs("${code}" GGML_MACHINE_SUPPORTS_${tag}) if (GGML_MACHINE_SUPPORTS_${tag}) set(ARM_NATIVE_FLAG_FIX "${ARM_NATIVE_FLAG_FIX}+${tag}") else() set(CMAKE_REQUIRED_FLAGS "${ARM_NATIVE_FLAG}+no${tag}") check_cxx_source_compiles("int main() { return 0; }" GGML_MACHINE_SUPPORTS_no${tag}) if (GGML_MACHINE_SUPPORTS_no${tag}) set(ARM_NATIVE_FLAG_FIX "${ARM_NATIVE_FLAG_FIX}+no${tag}") list(APPEND ARCH_FLAGS -U__ARM_FEATURE_${feature}) endif() endif() set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) endmacro() check_arm_feature(dotprod DOTPROD "#include \nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }") check_arm_feature(i8mm MATMUL_INT8 "#include \nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }") check_arm_feature(sve SVE "#include \nint main() { svfloat32_t _a, _b; volatile svfloat32_t _c = svadd_f32_z(svptrue_b8(), _a, _b); return 0; }") check_arm_feature(sme SME "#include \n__arm_locally_streaming int main() { __asm__ volatile(\"smstart; smstop;\"); return 0; }") list(APPEND ARCH_FLAGS "${ARM_NATIVE_FLAG}${ARM_NATIVE_FLAG_FIX}") else() if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) elseif(GGML_CPU_ALL_VARIANTS) # Begin with the lowest baseline set(ARM_MCPU "armv8-a") set(ARCH_TAGS "") set(ARCH_DEFINITIONS "") # When a feature is selected, bump the MCPU to the first # version that supported it if (GGML_INTERNAL_DOTPROD) set(ARM_MCPU "armv8.2-a") set(ARCH_TAGS "${ARCH_TAGS}+dotprod") list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) endif() if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) set(ARM_MCPU "armv8.2-a") set(ARCH_TAGS "${ARCH_TAGS}+fp16") list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) endif() if (GGML_INTERNAL_SVE) set(ARM_MCPU "armv8.2-a") set(ARCH_TAGS "${ARCH_TAGS}+sve") list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) endif() if (GGML_INTERNAL_MATMUL_INT8) set(ARM_MCPU "armv8.6-a") set(ARCH_TAGS "${ARCH_TAGS}+i8mm") list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) endif() if (GGML_INTERNAL_SVE2) set(ARM_MCPU "armv8.6-a") set(ARCH_TAGS "${ARCH_TAGS}+sve2") list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) endif() if (GGML_INTERNAL_NOSVE) set(ARCH_TAGS "${ARCH_TAGS}+nosve") endif() if (GGML_INTERNAL_SME) set(ARM_MCPU "armv9.2-a") set(ARCH_TAGS "${ARCH_TAGS}+sme") list(APPEND ARCH_DEFINITIONS GGML_USE_SME) endif() list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) endif() endif() message(STATUS "Checking for ARM features using flags:") foreach(flag IN LISTS ARCH_FLAGS) message(STATUS " ${flag}") endforeach() include(CheckCXXSourceCompiles) set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) string(REPLACE ";" " " ARCH_FLAGS_STR "${ARCH_FLAGS}") set(CMAKE_REQUIRED_FLAGS "${ARCH_FLAGS_STR}") foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC SME) set(ARM_FEATURE "HAVE_${feature}") check_cxx_source_compiles( " #if !defined(__ARM_FEATURE_${feature}) # error \"Feature ${feature} is not defined\" #endif int main() { return 0; } " ${ARM_FEATURE} ) endforeach() set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "x86") message(STATUS "x86 detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/x86/quants.c ggml-cpu/arch/x86/repack.cpp ) if (MSVC) # instruction set detection for MSVC only if (GGML_NATIVE) include(ggml-cpu/cmake/FindSIMD.cmake) endif () if (GGML_AVX512) list(APPEND ARCH_FLAGS /arch:AVX512) # /arch:AVX512 includes: __AVX512F__, __AVX512CD__, __AVX512BW__, __AVX512DQ__, and __AVX512VL__ # MSVC has no compile-time flags enabling specific # AVX512 extensions, neither it defines the # macros corresponding to the extensions. # Do it manually. list(APPEND ARCH_DEFINITIONS GGML_AVX512) if (GGML_AVX512_VBMI) list(APPEND ARCH_DEFINITIONS __AVX512VBMI__) if (CMAKE_C_COMPILER_ID STREQUAL "Clang") list(APPEND ARCH_FLAGS -mavx512vbmi) endif() endif() if (GGML_AVX512_VNNI) list(APPEND ARCH_DEFINITIONS __AVX512VNNI__ GGML_AVX512_VNNI) if (CMAKE_C_COMPILER_ID STREQUAL "Clang") list(APPEND ARCH_FLAGS -mavx512vnni) endif() endif() if (GGML_AVX512_BF16) list(APPEND ARCH_DEFINITIONS __AVX512BF16__ GGML_AVX512_BF16) if (CMAKE_C_COMPILER_ID STREQUAL "Clang") list(APPEND ARCH_FLAGS -mavx512bf16) endif() endif() if (GGML_AMX_TILE) list(APPEND ARCH_DEFINITIONS __AMX_TILE__ GGML_AMX_TILE) endif() if (GGML_AMX_INT8) list(APPEND ARCH_DEFINITIONS __AMX_INT8__ GGML_AMX_INT8) endif() if (GGML_AMX_BF16) list(APPEND ARCH_DEFINITIONS __AMX_BF16__ GGML_AMX_BF16) endif() elseif (GGML_AVX2) list(APPEND ARCH_FLAGS /arch:AVX2) list(APPEND ARCH_DEFINITIONS GGML_AVX2 GGML_FMA GGML_F16C) elseif (GGML_AVX) list(APPEND ARCH_FLAGS /arch:AVX) list(APPEND ARCH_DEFINITIONS GGML_AVX) elseif (GGML_SSE42) list(APPEND ARCH_FLAGS /arch:SSE4.2) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() if (GGML_AVX_VNNI) list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) endif() if (GGML_BMI2) # MSVC does not define macro __BMI2__ list(APPEND ARCH_DEFINITIONS __BMI2__ GGML_BMI2) endif() else () if (GGML_NATIVE) list(APPEND ARCH_FLAGS -march=native) else () if (GGML_SSE42) list(APPEND ARCH_FLAGS -msse4.2) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() if (GGML_F16C) list(APPEND ARCH_FLAGS -mf16c) list(APPEND ARCH_DEFINITIONS GGML_F16C) endif() if (GGML_FMA) list(APPEND ARCH_FLAGS -mfma) list(APPEND ARCH_DEFINITIONS GGML_FMA) endif() if (GGML_BMI2) list(APPEND ARCH_FLAGS -mbmi2) list(APPEND ARCH_DEFINITIONS GGML_BMI2) endif() if (GGML_AVX) list(APPEND ARCH_FLAGS -mavx) list(APPEND ARCH_DEFINITIONS GGML_AVX) endif() if (GGML_AVX2) list(APPEND ARCH_FLAGS -mavx2) list(APPEND ARCH_DEFINITIONS GGML_AVX2) endif() if (GGML_AVX_VNNI) list(APPEND ARCH_FLAGS -mavxvnni) list(APPEND ARCH_DEFINITIONS GGML_AVX_VNNI) endif() if (GGML_AVX512) list(APPEND ARCH_FLAGS -mavx512f) list(APPEND ARCH_FLAGS -mavx512cd) list(APPEND ARCH_FLAGS -mavx512vl) list(APPEND ARCH_FLAGS -mavx512dq) list(APPEND ARCH_FLAGS -mavx512bw) list(APPEND ARCH_DEFINITIONS GGML_AVX512) endif() if (GGML_AVX512_VBMI) list(APPEND ARCH_FLAGS -mavx512vbmi) list(APPEND ARCH_DEFINITIONS GGML_AVX512_VBMI) endif() if (GGML_AVX512_VNNI) list(APPEND ARCH_FLAGS -mavx512vnni) list(APPEND ARCH_DEFINITIONS GGML_AVX512_VNNI) endif() if (GGML_AVX512_BF16) list(APPEND ARCH_FLAGS -mavx512bf16) list(APPEND ARCH_DEFINITIONS GGML_AVX512_BF16) endif() if (GGML_AMX_TILE) list(APPEND ARCH_FLAGS -mamx-tile) list(APPEND ARCH_DEFINITIONS GGML_AMX_TILE) endif() if (GGML_AMX_INT8) list(APPEND ARCH_FLAGS -mamx-int8) list(APPEND ARCH_DEFINITIONS GGML_AMX_INT8) endif() if (GGML_AMX_BF16) list(APPEND ARCH_FLAGS -mamx-bf16) list(APPEND ARCH_DEFINITIONS GGML_AMX_BF16) endif() endif() endif() if (GGML_BACKEND_DL) if (GGML_NATIVE) # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") endif() ggml_add_cpu_backend_features(${GGML_CPU_NAME} x86 ${ARCH_DEFINITIONS}) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/powerpc/quants.c) if (GGML_NATIVE) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") file(READ "/proc/cpuinfo" POWER10_M) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "powerpc") execute_process(COMMAND bash -c "prtconf |grep 'Implementation' | head -n 1" OUTPUT_VARIABLE POWER10_M) endif() string(TOUPPER "${POWER10_M}" POWER10_M_UPPER) string(REGEX MATCHALL "POWER *([0-9]+)" MATCHED_STRING "${POWER10_M_UPPER}") string(REGEX REPLACE "POWER *([0-9]+)" "\\1" EXTRACTED_NUMBER "${MATCHED_STRING}") if (EXTRACTED_NUMBER GREATER_EQUAL 10) list(APPEND ARCH_FLAGS -mcpu=power10) elseif (EXTRACTED_NUMBER EQUAL 9) list(APPEND ARCH_FLAGS -mcpu=power9) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") list(APPEND ARCH_FLAGS -mcpu=powerpc64le -mtune=native) else() list(APPEND ARCH_FLAGS -mcpu=native -mtune=native -mpowerpc64) endif() elseif(GGML_CPU_ALL_VARIANTS) # Begin with the lowest baseline set(ARCH_DEFINITIONS "") # When a feature is selected, bump the MCPU to the first # version that supported it foreach(PVER RANGE 7 11) if(DEFINED GGML_INTERNAL_POWER${PVER}) set(POWERPC_MCPU "power${PVER}") list(APPEND ARCH_DEFINITIONS GGML_USE_POWER${PVER}) endif() endforeach() if (GGML_INTERNAL_VSX) list(APPEND ARCH_DEFINITIONS GGML_USE_VSX) list(APPEND ARCH_FLAGS -mvsx) endif() if (DEFINED POWERPC_MCPU) list(APPEND ARCH_FLAGS -mcpu=${POWERPC_MCPU}) endif() ggml_add_cpu_backend_features(${GGML_CPU_NAME} powerpc ${ARCH_DEFINITIONS}) else() if (GGML_CPU_POWERPC_CPUTYPE) list(APPEND ARCH_FLAGS -mcpu=${GGML_CPU_POWERPC_CPUTYPE}) endif() endif() elseif (GGML_SYSTEM_ARCH STREQUAL "loongarch64") message(STATUS "loongarch64 detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/loongarch/quants.c) list(APPEND ARCH_FLAGS -march=loongarch64) if (GGML_LASX) list(APPEND ARCH_FLAGS -mlasx) endif() if (GGML_LSX) list(APPEND ARCH_FLAGS -mlsx) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") message(STATUS "riscv64 detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/riscv/quants.c ggml-cpu/arch/riscv/repack.cpp ) if (GGML_CPU_RISCV64_SPACEMIT) target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_RISCV64_SPACEMIT ${RISCV64_SPACEMIT_IME_SPEC}) list(APPEND GGML_CPU_SOURCES ggml-cpu/spacemit/ime.cpp ggml-cpu/spacemit/ime.h ggml-cpu/spacemit/ime1_kernels.cpp ggml-cpu/spacemit/ime_kernels.h ) endif() if(NOT GGML_CPU_ALL_VARIANTS) set(MARCH_STR "rv64gc") if (GGML_RV_ZFH) string(APPEND MARCH_STR "_zfh") endif() if (GGML_XTHEADVECTOR) string(APPEND MARCH_STR "_xtheadvector") elseif (GGML_RVV) string(APPEND MARCH_STR "_v") if (GGML_RV_ZVFH) string(APPEND MARCH_STR "_zvfh") endif() if (GGML_RV_ZVFBFWMA) string(APPEND MARCH_STR "_zvfbfwma") endif() endif() if (GGML_RV_ZICBOP) string(APPEND MARCH_STR "_zicbop") endif() if (GGML_RV_ZIHINTPAUSE) string(APPEND MARCH_STR "_zihintpause") endif() list(APPEND ARCH_FLAGS "-march=${MARCH_STR}" -mabi=lp64d) else() # Begin with the lowest baseline set(ARCH_DEFINITIONS "") if (GGML_INTERNAL_RVV) message(STATUS "RVV enabled") list(APPEND ARCH_DEFINITIONS GGML_USE_RVV) list(APPEND ARCH_FLAGS -march=rv64gc_v -mabi=lp64d) endif() ggml_add_cpu_backend_features(${GGML_CPU_NAME} riscv ${ARCH_DEFINITIONS}) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/s390/quants.c) # for native compilation if (GGML_NATIVE) # check machine level to determine target file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) # TODO: Separation to determine activation of VX/VXE/VXE2 if (${S390X_M} MATCHES "8561|8562") message(STATUS "z15 target") list(APPEND ARCH_FLAGS -march=z15) elseif (${S390X_M} MATCHES "3931") message(STATUS "z16 target") list(APPEND ARCH_FLAGS -march=z16) elseif (${S390X_M} MATCHES "9175|9176") # NOTE: Only available from GCC 15.1.0 onwards. Any z17 machine with compile issues must first verify their GCC version. # binutils must also be updated to the latest for the -march=z17 flag to work. Otherwise, use -march=arch15. message(STATUS "z17 target") list(APPEND ARCH_FLAGS -march=arch15) else() message(STATUS "Unknown target") message(WARNING "Unknown target. If you are compiling for z14 and earlier, you might have to add -DGGML_VXE=OFF.") list(APPEND ARCH_FLAGS -march=native -mtune=native) endif() # for cross-compilation elseif(GGML_CPU_ALL_VARIANTS) # range through IBM z15 to z17 # NOTE: update when a new hardware level is released foreach (ZHW RANGE 15 17) if(DEFINED GGML_INTERNAL_Z${ZHW}) message(STATUS "z${ZHW} cross-compile target") list(APPEND ARCH_FLAGS -march=z${ZHW}) endif() endforeach() endif() if (GGML_VXE OR GGML_INTERNAL_VXE2) message(STATUS "VXE2 enabled") list(APPEND ARCH_FLAGS -mvx -mzvector) list(APPEND ARCH_DEFINITIONS GGML_USE_VXE2) endif() if (GGML_INTERNAL_NNPA) message(STATUS "NNPA enabled") list(APPEND ARCH_DEFINITIONS GGML_USE_NNPA) endif() ggml_add_cpu_backend_features(${GGML_CPU_NAME} s390 ${ARCH_DEFINITIONS}) elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "wasm") message(STATUS "Wasm detected") list (APPEND GGML_CPU_SOURCES ggml-cpu/arch/wasm/quants.c) else() message(WARNING "Unknown CPU architecture. Falling back to generic implementations.") list(APPEND ARCH_FLAGS -DGGML_CPU_GENERIC) endif() if (GGML_CPU_REPACK) target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_REPACK) endif() if (GGML_CPU_KLEIDIAI) message(STATUS "Using KleidiAI optimized kernels if applicable") # Disable the KleidiAI tests set(KLEIDIAI_BUILD_TESTS OFF) # Fetch KleidiAI sources: include(FetchContent) set(KLEIDIAI_COMMIT_TAG "v1.16.0") set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz") set(KLEIDIAI_ARCHIVE_MD5 "0a9e9008adb6031f9e8cf70dff4a3321") if (POLICY CMP0135) cmake_policy(SET CMP0135 NEW) endif() FetchContent_Declare(KleidiAI_Download URL ${KLEIDIAI_DOWNLOAD_URL} DOWNLOAD_EXTRACT_TIMESTAMP NEW URL_HASH MD5=${KLEIDIAI_ARCHIVE_MD5}) FetchContent_MakeAvailable(KleidiAI_Download) FetchContent_GetProperties(KleidiAI_Download SOURCE_DIR KLEIDIAI_SRC POPULATED KLEIDIAI_POPULATED) if (NOT KLEIDIAI_POPULATED) message(FATAL_ERROR "KleidiAI source downloaded failed.") endif() add_compile_definitions(GGML_USE_CPU_KLEIDIAI) # Remove kleidiai target after fetching it if (TARGET kleidiai) set_target_properties(kleidiai PROPERTIES EXCLUDE_FROM_ALL TRUE) endif() list(APPEND GGML_CPU_SOURCES ggml-cpu/kleidiai/kleidiai.cpp ggml-cpu/kleidiai/kernels.cpp ggml-cpu/kleidiai/kleidiai.h ggml-cpu/kleidiai/kernels.h ) # KleidiAI include_directories( ${KLEIDIAI_SRC}/ ${KLEIDIAI_SRC}/kai/ ${KLEIDIAI_SRC}/kai/ukernels/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/) set(ARCH_FLAGS_TEMP "${ARCH_FLAGS}") if (NOT ARCH_FLAGS_TEMP) string(REGEX MATCH "-march=[^ ]+" ARCH_FLAGS_TEMP "${CMAKE_C_FLAGS}") endif() string(FIND "${ARCH_FLAGS_TEMP}" "+dotprod" DOTPROD_ENABLED) string(FIND "${ARCH_FLAGS_TEMP}" "+i8mm" I8MM_ENABLED) string(FIND "${ARCH_FLAGS_TEMP}" "+sme" SME_ENABLED) string(FIND "${ARCH_FLAGS_TEMP}" "+sve" SVE_ENABLED) set(PRIVATE_ARCH_FLAGS ${ARCH_FLAGS_TEMP}) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p4x8sb_f32_neon.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32_neon.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.c) if (NOT DOTPROD_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod.c) endif() if (NOT I8MM_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.c) endif() if (NOT SME_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_pack_bf16p2vlx2_f32_sme.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme.c ${KLEIDIAI_SRC}/kai/kai_common_sme_asm.S) set(PRIVATE_ARCH_FLAGS "-fno-tree-vectorize;${PRIVATE_ARCH_FLAGS}+sve+sve2") endif() if (NOT SVE_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/kai_common_sve_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm.c) endif() set_source_files_properties(${GGML_KLEIDIAI_SOURCES} PROPERTIES COMPILE_OPTIONS "${PRIVATE_ARCH_FLAGS}") list(APPEND GGML_CPU_SOURCES ${GGML_KLEIDIAI_SOURCES}) endif() message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}") target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES}) target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS}) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) if (EMSCRIPTEN) set_target_properties(${GGML_CPU_NAME} PROPERTIES COMPILE_FLAGS "-msimd128") endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") # The compiler automatically enables "-ffast-math" which can cause NaNs in tests due to "-fassociative-math" target_compile_options(${GGML_CPU_NAME} PRIVATE "-fno-associative-math") endif() endfunction() ggml-org-ggml-3678254/src/ggml-cpu/amx/000077500000000000000000000000001512524704700174535ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/amx/amx.cpp000066400000000000000000000175731512524704700207610ustar00rootroot00000000000000#include "amx.h" #include "common.h" #include "mmq.h" #include "ggml-backend-impl.h" #include "ggml-backend.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "traits.h" #if defined(__linux__) #include #include #endif #include #include #include #if defined(__AMX_INT8__) && defined(__AVX512VNNI__) // AMX type_trais namespace ggml::cpu::amx { class tensor_traits : public ggml::cpu::tensor_traits { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { size = ggml_backend_amx_desired_wsize(op); return true; } bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { if (op->op == GGML_OP_MUL_MAT) { ggml_backend_amx_mul_mat(params, op); return true; } return false; } }; static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { static tensor_traits traits; return &traits; } } // namespace ggml::cpu::amx // AMX buffer interface static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) { free(buffer->context); } static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) { return (void *) (buffer->context); } static enum ggml_status ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor); GGML_UNUSED(buffer); return GGML_STATUS_SUCCESS; } static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { memset((char *) tensor->data + offset, value, size); GGML_UNUSED(buffer); } static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { if (qtype_has_amx_kernels(tensor->type)) { GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type)); ggml_backend_amx_convert_weight(tensor, data, offset, size); } else { memcpy((char *) tensor->data + offset, data, size); } GGML_UNUSED(buffer); } /* // need to figure what we need to do with buffer->extra. static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(!qtype_has_amx_kernels(tensor->type)); memcpy(data, (const char *)tensor->data + offset, size); GGML_UNUSED(buffer); } static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { if (ggml_backend_buffer_is_host(src->buffer)) { if (qtype_has_amx_kernels(src->type)) { ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst)); } else { memcpy(dst->data, src->data, ggml_nbytes(src)); } return true; } return false; GGML_UNUSED(buffer); } */ static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { memset(buffer->context, value, buffer->size); } static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = { /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer, /* .get_base = */ ggml_backend_amx_buffer_get_base, /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor, /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor, /* .get_tensor = */ nullptr, /* .cpy_tensor = */ nullptr, /* .clear = */ ggml_backend_amx_buffer_clear, /* .reset = */ nullptr, }; static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "AMX"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * data = ggml_aligned_malloc(size); if (data == NULL) { fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size); return NULL; } return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size); } static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return TENSOR_ALIGNMENT; GGML_UNUSED(buft); } namespace ggml::cpu::amx { class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { // handle only 2d gemm for now auto is_contiguous_2d = [](const struct ggml_tensor * t) { return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1; }; if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous is_contiguous_2d(op->src[1]) && // src1 must be contiguous op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() && op->src[0]->ne[0] % (TILE_K * 2 * 32) == 0 && // TODO: not sure if correct (https://github.com/ggml-org/llama.cpp/pull/16315) op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x (qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) { // src1 must be host buffer if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } // src1 must be float32 if (op->src[1]->type == GGML_TYPE_F32) { return true; } } return false; } ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) { return (ggml::cpu::tensor_traits *) op->src[0]->extra; } return nullptr; } }; } // namespace ggml::cpu::amx static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { return ggml_backend_amx_get_alloc_size(tensor); GGML_UNUSED(buft); } #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 #define XFEATURE_XTILECFG 17 #define XFEATURE_XTILEDATA 18 static bool ggml_amx_init() { #if defined(__linux__) if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) { fprintf(stderr, "AMX is not ready to be used!\n"); return false; } return true; #elif defined(_WIN32) return true; #else return false; #endif } ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() { static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = { /* .iface = */ { /* .get_name = */ ggml_backend_amx_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment, /* .get_max_size = */ nullptr, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size, /* .is_host = */ nullptr, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ new ggml::cpu::amx::extra_buffer_type(), }; if (!ggml_amx_init()) { return nullptr; } return &ggml_backend_buffer_type_amx; } #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__) ggml-org-ggml-3678254/src/ggml-cpu/amx/amx.h000066400000000000000000000003121512524704700204050ustar00rootroot00000000000000#include "ggml-backend.h" #include "ggml-cpu-impl.h" // GGML internal header #if defined(__AMX_INT8__) && defined(__AVX512VNNI__) ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void); #endif ggml-org-ggml-3678254/src/ggml-cpu/amx/common.h000066400000000000000000000040651512524704700211210ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-cpu-impl.h" #include #include #include #if defined(GGML_USE_OPENMP) #include #endif #define TILE_M 16 #define TILE_N 16 #define TILE_K 32 #define VNNI_BLK 4 #define AMX_BLK_SIZE 32 #define TMM0 0 #define TMM1 1 #define TMM2 2 #define TMM3 3 #define TMM4 4 #define TMM5 5 #define TMM6 6 #define TMM7 7 // parallel routines template ::value, int>::type = 0> inline T div_up(T x, T y) { return (x + y - 1) / y; } template inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) { #if 0 // onednn partition pattern T& n_my = n_end; if (nth <= 1 || n == 0) { n_start = 0; n_my = n; } else { T n1 = div_up(n, nth); T n2 = n1 - 1; T T1 = n - n2 * nth; n_my = ith < T1 ? n1 : n2; n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2; } n_end += n_start; #else // pytorch aten partition pattern T n_my = div_up(n, nth); n_start = ith * n_my; n_end = std::min(n_start + n_my, n); #endif } template inline void parallel_for(int n, const func_t& f) { #if defined(GGML_USE_OPENMP) #pragma omp parallel { int nth = omp_get_num_threads(); int ith = omp_get_thread_num(); int tbegin, tend; balance211(n, nth, ith, tbegin, tend); f(tbegin, tend); } #else f(0, n); #endif } template inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) { int tbegin, tend; balance211(n, params->nth, params->ith, tbegin, tend); f(tbegin, tend); } // quantized types that have AMX support inline bool qtype_has_amx_kernels(const enum ggml_type type) { // TODO: fix padding for vnni format return (type == GGML_TYPE_Q4_0) || (type == GGML_TYPE_Q4_1) || (type == GGML_TYPE_Q8_0) || (type == GGML_TYPE_Q4_K) || (type == GGML_TYPE_Q5_K) || (type == GGML_TYPE_Q6_K) || (type == GGML_TYPE_IQ4_XS); } ggml-org-ggml-3678254/src/ggml-cpu/amx/mmq.cpp000066400000000000000000003212711512524704700207570ustar00rootroot00000000000000 #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpedantic" #pragma GCC diagnostic ignored "-Wunused-local-typedefs" #endif #include "amx.h" #include "mmq.h" #include "ggml-impl.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "quants.h" #include "ggml-quants.h" #include #include #if defined(__gnu_linux__) #include #include #endif #if (defined(_WIN32) || defined(_WIN64)) #define RESTRICT __restrict #else #define RESTRICT __restrict__ #endif #if (defined(_WIN32) || defined(_WIN64)) #define ALWAYS_INLINE __forceinline #elif __has_attribute(always_inline) || defined(__GNUC__) #define ALWAYS_INLINE __attribute__((__always_inline__)) inline #else #define ALWAYS_INLINE inline #endif #if defined(__AMX_INT8__) && defined(__AVX512VNNI__) namespace { // Forced unrolling template struct Unroll { template ALWAYS_INLINE void operator()(const Func& f, Args... args) const { Unroll{}(f, args...); f(std::integral_constant{}, args...); } }; template <> struct Unroll<1> { template ALWAYS_INLINE void operator()(const Func& f, Args... args) const { f(std::integral_constant{}, args...); } }; // type traits template struct PackedTypes {}; template <> struct PackedTypes { using type = int8_t; }; template <> struct PackedTypes { using type = uint8_t; }; template <> struct PackedTypes { using type = int8_t; }; template using packed_B_type = typename PackedTypes::type; template struct do_compensate : std::integral_constant::value> {}; template struct do_unpack : std::integral_constant::value || std::is_same::value> {}; template struct is_type_qkk : std::integral_constant::value || std::is_same::value || std::is_same::value || std::is_same::value> {}; #define GGML_DISPATCH_FLOATING_TYPES(TYPE, ...) \ [&] { \ switch (TYPE) { \ case GGML_TYPE_F16: { \ using type = ggml_fp16_t; \ constexpr int blck_size = 16; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_BF16: { \ using type = ggml_bf16_t; \ constexpr int blck_size = 32; \ return __VA_ARGS__(); \ } \ default: \ fprintf(stderr, "Unsupported floating data type\n"); \ } \ }() #define GGML_DISPATCH_QTYPES(QT, ...) \ [&] { \ switch (QT) { \ case GGML_TYPE_Q4_0: { \ using type = block_q4_0; \ using vec_dot_type = block_q8_0; \ constexpr int blck_size = QK4_0; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_Q4_1: { \ using type = block_q4_1; \ using vec_dot_type = block_q8_1; \ constexpr int blck_size = QK4_1; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_Q8_0: { \ using type = block_q8_0; \ using vec_dot_type = block_q8_0; \ constexpr int blck_size = QK8_0; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_Q4_K: { \ using type = block_q4_K; \ using vec_dot_type = block_q8_K; \ constexpr int blck_size = QK_K; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_Q5_K: { \ using type = block_q5_K; \ using vec_dot_type = block_q8_K; \ constexpr int blck_size = QK_K; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_Q6_K: { \ using type = block_q6_K; \ using vec_dot_type = block_q8_K; \ constexpr int blck_size = QK_K; \ return __VA_ARGS__(); \ } \ case GGML_TYPE_IQ4_XS: { \ using type = block_iq4_xs; \ using vec_dot_type = block_q8_K; \ constexpr int blck_size = QK_K; \ return __VA_ARGS__(); \ } \ default: \ fprintf(stderr, "Unsupported quantized data type: %d\n", int(TYPE)); \ } \ }() #define GGML_DISPATCH_BOOL(BOOL_V, BOOL_NAME, ...) \ [&] { \ if (BOOL_V) { \ constexpr bool BOOL_NAME = true; \ return __VA_ARGS__(); \ } else { \ constexpr bool BOOL_NAME = false; \ return __VA_ARGS__(); \ } \ }() // define amx tile config data structure struct tile_config_t{ uint8_t palette_id = 0; uint8_t start_row = 0; uint8_t reserved_0[14] = {0}; uint16_t colsb[16] = {0}; uint8_t rows[16] = {0}; }; // Notes: amx tile config // // Typically, TMUL calculates A and B of size 16 x 64 containing INT8 values, // and accumulate the result to a 16 x 16 matrix C containing INT32 values, // // As many GGUF quantized types as `block_size` of 32, so a 16-16-32 config is used // instead of the normally used 16-16-64 config. // // Block A: {16, 32}, dtype = int8_t // Block B: {16, 32}, dtype = uint8_t/int8_t // Block C: {16, 16}, dtype = int32_t // // Block B needs to be prepacked to vnni format before feeding into TMUL: // packed_B: from {n, k} to {k/vnni_blk, n, vnni_blck}, viewed in 2d, we get {8, 64} // // Therefore, we get tileconfig: // A B C // rows 16 8 16 // colsb 32 64 16 // // For tile distribution, follow a 2-2-4 pattern, e.g. A used TMM2-TMM3, B used TMM0-TMM1, // C used TMM4-TMM7: // B TMM0 B TMM1 // A TMM2 C TMM4 C TMM6 // A TMM3 C TMM5 C TMM7 // // Each `amx` kernel handles 4 blocks at a time: 2MB * 2NB, when m < 2 * BLOCK_M, unpack A // will be needed. // // Here another commonly used pattern 1-3-3 is skipped, as it is mostly used when m <=16; // and the sinlge batch gemm (m=1) has a special fast path with `avx512-vnni`. // // ref: https://www.intel.com/content/www/us/en/developer/articles/code-sample/ // advanced-matrix-extensions-intrinsics-functions.html // #define TC_CONFIG_TILE(i, r, cb) tc.rows[i] = r; tc.colsb[i] = cb void ggml_tile_config_init(void) { static thread_local bool is_first_time = true; if (!is_first_time) { return; } static thread_local tile_config_t tc; tile_config_t current_tc; _tile_storeconfig(¤t_tc); // load only when config changes if (tc.palette_id == 0 || (memcmp(¤t_tc.colsb, &tc.colsb, sizeof(uint16_t) * 8) != 0 && memcmp(¤t_tc.rows, &tc.rows, sizeof(uint8_t) * 8) != 0)) { tc.palette_id = 1; tc.start_row = 0; TC_CONFIG_TILE(TMM0, 8, 64); TC_CONFIG_TILE(TMM1, 8, 64); TC_CONFIG_TILE(TMM2, 16, 32); TC_CONFIG_TILE(TMM3, 16, 32); TC_CONFIG_TILE(TMM4, 16, 64); TC_CONFIG_TILE(TMM5, 16, 64); TC_CONFIG_TILE(TMM6, 16, 64); TC_CONFIG_TILE(TMM7, 16, 64); _tile_loadconfig(&tc); } is_first_time = false; } // we need an extra 16 * 4B (TILE_N * int32_t) for each NB/KB block for compensation. // See the notes `s8s8 igemm compensation in avx512-vnni` for detail. template int get_tile_size() { int tile_size = TILE_N * sizeof(TB); if (do_compensate::value) { tile_size += TILE_N * sizeof(int32_t); } if (std::is_same::value || std::is_same::value) { tile_size += TILE_N * 4; } if (std::is_same::value) { tile_size += TILE_N * 2; } return tile_size; } template int get_row_size(int K) { int KB = K / BLOCK_K; int row_size = KB * sizeof(TB); if (do_compensate::value) { row_size += KB * sizeof(int32_t); } if (std::is_same::value || std::is_same::value) { row_size += KB * 4; } if (std::is_same::value) { row_size += KB * 2; } return row_size; } // vectorized dtype conversion inline float FP16_TO_FP32(ggml_half val) { __m256i v = _mm256_setr_epi16( val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); __m512 o = _mm512_cvtph_ps(v); return _mm512_cvtss_f32(o); } inline __m512 FP16_TO_FP32_VEC(ggml_half val) { __m256i v = _mm256_set1_epi16(val); return _mm512_cvtph_ps(v); } // horizontal reduce inline float _mm512_reduce_max_ps(const __m512 x) { __m512 v = x; __m512 v1 = _mm512_shuffle_f32x4(v, v, 0x4E); v = _mm512_max_ps(v, v1); v1 = _mm512_shuffle_f32x4(v, v, 0xB1); v = _mm512_max_ps(v, v1); v1 = _mm512_shuffle_ps(v, v, 0x4E); v = _mm512_max_ps(v, v1); v1 = _mm512_shuffle_ps(v, v, 0xB1); v = _mm512_max_ps(v, v1); return _mm512_cvtss_f32(v); } // transpose utils #define SHUFFLE_EPI32(a, b, mask) \ _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b), mask)) inline void transpose_8x8_32bit(__m256i * v, __m256i * v1) { // unpacking and 32-bit elements v1[0] = _mm256_unpacklo_epi32(v[0], v[1]); v1[1] = _mm256_unpackhi_epi32(v[0], v[1]); v1[2] = _mm256_unpacklo_epi32(v[2], v[3]); v1[3] = _mm256_unpackhi_epi32(v[2], v[3]); v1[4] = _mm256_unpacklo_epi32(v[4], v[5]); v1[5] = _mm256_unpackhi_epi32(v[4], v[5]); v1[6] = _mm256_unpacklo_epi32(v[6], v[7]); v1[7] = _mm256_unpackhi_epi32(v[6], v[7]); // shuffling the 32-bit elements v[0] = SHUFFLE_EPI32(v1[0], v1[2], 0x44); v[1] = SHUFFLE_EPI32(v1[0], v1[2], 0xee); v[2] = SHUFFLE_EPI32(v1[4], v1[6], 0x44); v[3] = SHUFFLE_EPI32(v1[4], v1[6], 0xee); v[4] = SHUFFLE_EPI32(v1[1], v1[3], 0x44); v[5] = SHUFFLE_EPI32(v1[1], v1[3], 0xee); v[6] = SHUFFLE_EPI32(v1[5], v1[7], 0x44); v[7] = SHUFFLE_EPI32(v1[5], v1[7], 0xee); // shuffling 128-bit elements v1[0] = _mm256_permute2f128_si256(v[2], v[0], 0x02); v1[1] = _mm256_permute2f128_si256(v[3], v[1], 0x02); v1[2] = _mm256_permute2f128_si256(v[6], v[4], 0x02); v1[3] = _mm256_permute2f128_si256(v[7], v[5], 0x02); v1[4] = _mm256_permute2f128_si256(v[2], v[0], 0x13); v1[5] = _mm256_permute2f128_si256(v[3], v[1], 0x13); v1[6] = _mm256_permute2f128_si256(v[6], v[4], 0x13); v1[7] = _mm256_permute2f128_si256(v[7], v[5], 0x13); } inline void transpose_16x4_32bit(__m512i * r, __m512i * d) { static const __m512i index1 = _mm512_set_epi32( 0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); d[0] = _mm512_permutexvar_epi32(index1, r[0]); d[1] = _mm512_permutexvar_epi32(index1, r[1]); d[2] = _mm512_permutexvar_epi32(index1, r[2]); d[3] = _mm512_permutexvar_epi32(index1, r[3]); r[0] = _mm512_shuffle_i32x4(d[0], d[1], 0x44); r[1] = _mm512_shuffle_i32x4(d[0], d[1], 0xee); r[2] = _mm512_shuffle_i32x4(d[2], d[3], 0x44); r[3] = _mm512_shuffle_i32x4(d[2], d[3], 0xee); d[0] = _mm512_shuffle_i32x4(r[0], r[2], 0x88); d[1] = _mm512_shuffle_i32x4(r[0], r[2], 0xdd); d[2] = _mm512_shuffle_i32x4(r[1], r[3], 0x88); d[3] = _mm512_shuffle_i32x4(r[1], r[3], 0xdd); } inline void transpose_16x16_32bit(__m512i * v) { __m512i v1[16]; v1[0] = _mm512_unpacklo_epi32(v[0], v[1]); v1[1] = _mm512_unpackhi_epi32(v[0], v[1]); v1[2] = _mm512_unpacklo_epi32(v[2], v[3]); v1[3] = _mm512_unpackhi_epi32(v[2], v[3]); v1[4] = _mm512_unpacklo_epi32(v[4], v[5]); v1[5] = _mm512_unpackhi_epi32(v[4], v[5]); v1[6] = _mm512_unpacklo_epi32(v[6], v[7]); v1[7] = _mm512_unpackhi_epi32(v[6], v[7]); v1[8] = _mm512_unpacklo_epi32(v[8], v[9]); v1[9] = _mm512_unpackhi_epi32(v[8], v[9]); v1[10] = _mm512_unpacklo_epi32(v[10], v[11]); v1[11] = _mm512_unpackhi_epi32(v[10], v[11]); v1[12] = _mm512_unpacklo_epi32(v[12], v[13]); v1[13] = _mm512_unpackhi_epi32(v[12], v[13]); v1[14] = _mm512_unpacklo_epi32(v[14], v[15]); v1[15] = _mm512_unpackhi_epi32(v[14], v[15]); v[0] = _mm512_unpacklo_epi64(v1[0], v1[2]); v[1] = _mm512_unpackhi_epi64(v1[0], v1[2]); v[2] = _mm512_unpacklo_epi64(v1[1], v1[3]); v[3] = _mm512_unpackhi_epi64(v1[1], v1[3]); v[4] = _mm512_unpacklo_epi64(v1[4], v1[6]); v[5] = _mm512_unpackhi_epi64(v1[4], v1[6]); v[6] = _mm512_unpacklo_epi64(v1[5], v1[7]); v[7] = _mm512_unpackhi_epi64(v1[5], v1[7]); v[8] = _mm512_unpacklo_epi64(v1[8], v1[10]); v[9] = _mm512_unpackhi_epi64(v1[8], v1[10]); v[10] = _mm512_unpacklo_epi64(v1[9], v1[11]); v[11] = _mm512_unpackhi_epi64(v1[9], v1[11]); v[12] = _mm512_unpacklo_epi64(v1[12], v1[14]); v[13] = _mm512_unpackhi_epi64(v1[12], v1[14]); v[14] = _mm512_unpacklo_epi64(v1[13], v1[15]); v[15] = _mm512_unpackhi_epi64(v1[13], v1[15]); v1[0] = _mm512_shuffle_i32x4(v[0], v[4], 0x88); v1[1] = _mm512_shuffle_i32x4(v[1], v[5], 0x88); v1[2] = _mm512_shuffle_i32x4(v[2], v[6], 0x88); v1[3] = _mm512_shuffle_i32x4(v[3], v[7], 0x88); v1[4] = _mm512_shuffle_i32x4(v[0], v[4], 0xdd); v1[5] = _mm512_shuffle_i32x4(v[1], v[5], 0xdd); v1[6] = _mm512_shuffle_i32x4(v[2], v[6], 0xdd); v1[7] = _mm512_shuffle_i32x4(v[3], v[7], 0xdd); v1[8] = _mm512_shuffle_i32x4(v[8], v[12], 0x88); v1[9] = _mm512_shuffle_i32x4(v[9], v[13], 0x88); v1[10] = _mm512_shuffle_i32x4(v[10], v[14], 0x88); v1[11] = _mm512_shuffle_i32x4(v[11], v[15], 0x88); v1[12] = _mm512_shuffle_i32x4(v[8], v[12], 0xdd); v1[13] = _mm512_shuffle_i32x4(v[9], v[13], 0xdd); v1[14] = _mm512_shuffle_i32x4(v[10], v[14], 0xdd); v1[15] = _mm512_shuffle_i32x4(v[11], v[15], 0xdd); v[0] = _mm512_shuffle_i32x4(v1[0], v1[8], 0x88); v[1] = _mm512_shuffle_i32x4(v1[1], v1[9], 0x88); v[2] = _mm512_shuffle_i32x4(v1[2], v1[10], 0x88); v[3] = _mm512_shuffle_i32x4(v1[3], v1[11], 0x88); v[4] = _mm512_shuffle_i32x4(v1[4], v1[12], 0x88); v[5] = _mm512_shuffle_i32x4(v1[5], v1[13], 0x88); v[6] = _mm512_shuffle_i32x4(v1[6], v1[14], 0x88); v[7] = _mm512_shuffle_i32x4(v1[7], v1[15], 0x88); v[8] = _mm512_shuffle_i32x4(v1[0], v1[8], 0xdd); v[9] = _mm512_shuffle_i32x4(v1[1], v1[9], 0xdd); v[10] = _mm512_shuffle_i32x4(v1[2], v1[10], 0xdd); v[11] = _mm512_shuffle_i32x4(v1[3], v1[11], 0xdd); v[12] = _mm512_shuffle_i32x4(v1[4], v1[12], 0xdd); v[13] = _mm512_shuffle_i32x4(v1[5], v1[13], 0xdd); v[14] = _mm512_shuffle_i32x4(v1[6], v1[14], 0xdd); v[15] = _mm512_shuffle_i32x4(v1[7], v1[15], 0xdd); } void quantize_row_q8_K_vnni(const float * RESTRICT x, void * RESTRICT vy, int64_t k) { assert(k % QK_K == 0); const int KB = k / QK_K; constexpr int kVecs = QK_K / 16; block_q8_K * y = reinterpret_cast(vy); // hold 16 float vecs from x __m512 v[kVecs]; // hold the quants vecs __m512i vq[kVecs / 4]; // hold the packed quants vecs __m512i vq_packed[kVecs / 4]; const __m512 signBit = _mm512_set1_ps(-0.f); for (int i = 0; i < KB; ++i) { // Compute max(abs(e)) for the block __m512 vamax = _mm512_set1_ps(0.f); for (int j = 0; j < kVecs; ++j) { v[j] = _mm512_loadu_ps(x); x += 16; vamax = _mm512_max_ps(vamax, _mm512_andnot_ps(signBit, v[j])); } const float amax = _mm512_reduce_max_ps(vamax); // Quantize these floats const float iscale = 127.f / amax; y[i].d = GGML_CPU_FP32_TO_FP16(1 / iscale); const float id = ( amax != 0.0f ) ? iscale : 0.f; const __m512 vscale = _mm512_set1_ps(id); // Apply multiplier and round to nearest integer for (int j = 0; j < kVecs; ++j) { v[j] = _mm512_mul_ps(v[j], vscale); v[j] = _mm512_roundscale_ps(v[j], (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); } // Pack to epi8 vecs for (int j = 0; j < kVecs / 4; ++j) { __m128i q8_0 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 0])); __m128i q8_1 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 1])); __m128i q8_2 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 2])); __m128i q8_3 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 3])); __m256i q8_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_0), (q8_1), 1); __m256i q8_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_2), (q8_3), 1); vq[j] = _mm512_inserti32x8(_mm512_castsi256_si512(q8_01), q8_23, 1); _mm512_storeu_si512((__m512i *)(y[i].qs + j * 64), vq[j]); } // Compute the bsums with vnni transpose_16x4_32bit(vq, vq_packed); const __m512i one = _mm512_set1_epi8(1); __m512i sum = _mm512_setzero_si512(); for (int k = 0; k < 4; ++k) { sum = _mm512_dpbusd_epi32(sum, one, vq_packed[k]); } _mm256_storeu_si256((__m256i *)(y[i].bsums), _mm512_cvtepi32_epi16(sum)); } } // quantize A from float to `vec_dot_type` template inline void from_float(const float * x, char * vy, int64_t k); template <> inline void from_float(const float * x, char * vy, int64_t k) { quantize_row_q8_0(x, (block_q8_0 *)vy, k); } template <> inline void from_float(const float * x, char * vy, int64_t k) { quantize_row_q8_1(x, (block_q8_1 *)vy, k); } template <> inline void from_float(const float * x, char * vy, int64_t k) { #if 1 // TODO: this is reference impl! quantize_row_q8_K_ref(x, (block_q8_K *)vy, k); #else quantize_row_q8_K_vnni(x, vy, k); #endif } // load A from memory to array when nrows can not fill in whole tile void unpack_A(int8_t * RESTRICT tile, const block_q8_0 * RESTRICT A, int lda, int nr) { assert(nr != TILE_M); for (int m = 0; m < nr; ++m) { const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs)); _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v); } } void unpack_A(int8_t * RESTRICT tile, const block_q8_1 * RESTRICT A, int lda, int nr) { assert(nr != TILE_M); for (int m = 0; m < nr; ++m) { const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs)); _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v); } } template void unpack_A(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) { assert(nr <= TILE_M); for (int m = 0; m < nr; ++m) { const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs + k * 32)); _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v); } } template <> void unpack_A(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) { assert(nr <= TILE_M); // zero padding k from 16 to 32, so that we don't have to re-config amx const __m128i zero = _mm_setzero_si128(); for (int m = 0; m < nr; ++m) { const __m128i v = _mm_loadu_si128((const __m128i *)(A[m * lda].qs + k * 16)); const __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(v), zero, 1); _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), r); } } #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); const __m256i lowMask = _mm256_set1_epi8(0xF); return _mm256_and_si256(lowMask, bytes); } // used for block_q4_K inline __m512i bytes_from_nibbles_64(const uint8_t * rsi) { const __m256i tmp = _mm256_loadu_si256((const __m256i *)rsi); const __m256i lowMask = _mm256_set1_epi8(0xF); const __m256i q4l = _mm256_and_si256(tmp, lowMask); const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(tmp, 4), lowMask); return _mm512_inserti32x8(_mm512_castsi256_si512(q4l), q4h, 1); } // used for block_q5_K inline __m512i bytes_from_nibbles_64(const uint8_t * qs, const uint8_t * qh, int k) { const __m256i lowMask = _mm256_set1_epi8(0xF); __m256i hmask = _mm256_set1_epi8(1); hmask = _mm256_slli_epi16(hmask, k); const __m256i q5bits = _mm256_loadu_si256((const __m256i *)qs); const __m256i hbits = _mm256_loadu_si256((const __m256i *)qh); const __m256i q5l_0 = _mm256_and_si256(q5bits, lowMask); const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 0), 4); const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); hmask = _mm256_slli_epi16(hmask, 1); const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), lowMask); const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 1), 4); const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); return _mm512_inserti32x8(_mm512_castsi256_si512(q5_0), q5_1, 1); } // used for block_q6_K inline void bytes_from_nibbles_128(__m512i& r0, __m512i& r1, const uint8_t * qs, const uint8_t * qh) { const __m256i m4 = _mm256_set1_epi8(0xF); const __m256i m2 = _mm256_set1_epi8(0x3); const __m256i q6bits1 = _mm256_loadu_si256((const __m256i *)qs); const __m256i q6bits2 = _mm256_loadu_si256((const __m256i *)(qs + 32)); const __m256i q6bitsH = _mm256_loadu_si256((const __m256i *)qh); const __m256i q6h_0 = _mm256_slli_epi16(_mm256_and_si256( q6bitsH, m2), 4); const __m256i q6h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 2), m2), 4); const __m256i q6h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 4), m2), 4); const __m256i q6h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 6), m2), 4); const __m256i q6_0 = _mm256_or_si256(_mm256_and_si256(q6bits1, m4), q6h_0); const __m256i q6_1 = _mm256_or_si256(_mm256_and_si256(q6bits2, m4), q6h_1); const __m256i q6_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits1, 4), m4), q6h_2); const __m256i q6_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits2, 4), m4), q6h_3); r0 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_0), q6_1, 1); r1 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_2), q6_3, 1); } inline __m512i packNibbles(__m512i r0, __m512i r1) { return _mm512_or_si512(r0, _mm512_slli_epi16(r1, 4)); } template inline void pack_qs(void * RESTRICT packed_B, const TB * RESTRICT B, int KB) { int8_t tmp[8 * 64]; __m256i v[8], v2[8]; for (int n = 0; n < 8; ++n) { v[n] = bytes_from_nibbles_32(B[n * KB].qs); } transpose_8x8_32bit(v, v2); for (int n = 0; n < 8; ++n) { _mm256_storeu_si256((__m256i *)(tmp + n * 64), v2[n]); } for (int n = 0; n < 8; ++n) { v[n] = bytes_from_nibbles_32(B[(n + 8) * KB].qs); } transpose_8x8_32bit(v, v2); for (int n = 0; n < 8; ++n) { _mm256_storeu_si256((__m256i *)(tmp + n * 64 + 32), v2[n]); } // pack again with 128 to fully utilize vector length for (int n = 0; n < 8; n += 2) { __m512i r0 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64)); __m512i r1 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64 + 64)); __m512i r1r0 = packNibbles(r0, r1); _mm512_storeu_si512((__m512i *)((char *)packed_B + n * 32), r1r0); } } template <> inline void pack_qs(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) { __m256i v[8], v2[8]; for (int n = 0; n < 8; ++n) { v[n] = _mm256_loadu_si256((const __m256i *)(B[n * KB].qs)); } transpose_8x8_32bit(v, v2); for (int n = 0; n < 8; ++n) { _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64), v2[n]); } for (int n = 0; n < 8; ++n) { v[n] = _mm256_loadu_si256((const __m256i *)(B[(n + 8) * KB].qs)); } transpose_8x8_32bit(v, v2); for (int n = 0; n < 8; ++n) { _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64 + 32), v2[n]); } } template <> inline void pack_qs(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) { __m512i v[16]; // QK_K 256 with 8 groups, handle 2 groups at a time char * pb = (char *)packed_B; for (int k = 0; k < QK_K / 64; ++k) { // pack 2 groups { n, g, k} to {g, k/4, 4n} // e.g. {16, 2, 32} to {2, 8, 64} for (int n = 0; n < TILE_N; ++n) { v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32); } transpose_16x16_32bit(v); // pack again with 128 to fully utilize vector length for (int n = 0; n < TILE_N; n += 2) { _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1])); pb += 64; } } } template <> inline void pack_qs(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) { __m512i v[16]; const __m512i lowMask = _mm512_set1_epi8(0xF); // QK_K 256 with 8 groups, handle 2 groups at a time char * pb = (char *)packed_B; char * ph = (char *)packed_B + (QK_K / 2) * TILE_N; for (int k = 0; k < QK_K / 64; ++k) { // pack 2 groups { n, g, k} to {g, k/4, 4n} // e.g. {16, 2, 32} to {2, 8, 64} for (int n = 0; n < TILE_N; ++n) { v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32, B[n * KB].qh, /* group */2 * k); } transpose_16x16_32bit(v); // 1. pack lower 4bits with 2 groups for (int n = 0; n < TILE_N; n += 2) { // get lower 4 bits const __m512i r0 = _mm512_and_si512(v[n], lowMask); const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask); _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64; } // 2. pack higher 1bit with 2 groups const __m512i hmask = _mm512_set1_epi8(0x10); for (int g = 0; g < 2; ++g) { __m512i hbits = _mm512_setzero_si512(); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 0], hmask), 4)); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 1], hmask), 3)); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 2], hmask), 2)); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 3], hmask), 1)); hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 8 + 4], hmask) ); hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 5], hmask), 1)); hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 6], hmask), 2)); hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 7], hmask), 3)); _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64; } } } template <> inline void pack_qs(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) { __m512i v[32]; const __m512i lowMask = _mm512_set1_epi8(0xF); // QK_K 256 with 8 groups, handle 4 groups at a time char * pb = (char *)packed_B; char * ph = (char *)packed_B + (QK_K / 2) * TILE_N; for (int k = 0; k < QK_K / 128; ++k) { for (int n = 0; n < TILE_N; ++n) { bytes_from_nibbles_128(v[n], v[n + 16], B[n * KB].ql + k * 64, B[n * KB].qh + k * 32); } // top half: group 0,1 or 4,5; bottom half: group 2,3 or 6,7 transpose_16x16_32bit(v); transpose_16x16_32bit(v + 16); // 1. pack lower 4bits with 4 groups for (int n = 0; n < 32; n += 2) { const __m512i r0 = _mm512_and_si512(v[n], lowMask); const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask); _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64; } // 2. pack higher 2bit with 4 groups const __m512i hmask = _mm512_set1_epi8(0x30); for (int g = 0; g < 8; ++g) { __m512i hbits = _mm512_setzero_si512(); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 0], hmask), 4)); hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 1], hmask), 2)); hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 4 + 2], hmask) ); hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 4 + 3], hmask), 2)); _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64; } } } template <> inline void pack_qs(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) { __m512i v[16]; char * pb = (char *)packed_B; for (int k = 0; k < QK_K / 64; ++k) { for (int n = 0; n < TILE_N; ++n) { __m256i r0 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 0); __m256i r1 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 16); v[n] = _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1); } transpose_16x16_32bit(v); // pack again with 128 to fully utilize vector length for (int n = 0; n < TILE_N; n += 2) { _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1])); pb += 64; } } } // pack B to vnni formats in 4bits or 8 bits void pack_B(void * RESTRICT packed_B, const block_q4_0 * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); ggml_half * d0 = reinterpret_cast((char *)packed_B + TILE_N * TILE_K / 2); for (int n = 0; n < TILE_N; ++n) { d0[n] = B[n * KB].d; } } void pack_B(void * RESTRICT packed_B, const block_q4_1 * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); ggml_half * d0 = reinterpret_cast((char *)packed_B + TILE_N * TILE_K / 2); ggml_half * m0 = d0 + TILE_N; for (int n = 0; n < TILE_N; ++n) { d0[n] = B[n * KB].d; m0[n] = B[n * KB].m; } } inline void s8s8_compensation(void * RESTRICT packed_B) { // packed_B layout: // quants {TILE_N, TILEK} int8_t // d0 {TILE_N} ggml_half // comp {TILE_N} int32_t const int offset = TILE_N * TILE_K + TILE_N * sizeof(ggml_half); __m512i vcomp = _mm512_setzero_si512(); const __m512i off = _mm512_set1_epi8(static_cast(0x80)); for (int k = 0; k < 8; ++k) { __m512i vb = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + k * 64)); vcomp = _mm512_dpbusd_epi32(vcomp, off, vb); } _mm512_storeu_si512((__m512i *)((char *)(packed_B) + offset), vcomp); } void pack_B(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); ggml_half * d0 = reinterpret_cast((char *)packed_B + TILE_N * TILE_K); for (int n = 0; n < TILE_N; ++n) { d0[n] = B[n * KB].d; } s8s8_compensation(packed_B); } // convert 8 * {min, scale} from int6 to int8 inline void unpack_mins_and_scales(const uint8_t * scales, uint32_t * utmp) { const uint32_t kmask1 = 0x3f3f3f3f; const uint32_t kmask2 = 0x0f0f0f0f; const uint32_t kmask3 = 0x03030303; memcpy(utmp, scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; } // packed_B layout: // quants {8, TILE_N, 16} uint8 // scales {8, TILE_N} uint8 // mins {8, TILE_N} uint8 // d {TILE_N} ggml_half // dmin {TILE_N} ggml_half void pack_B(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); uint8_t * scales = reinterpret_cast((char *)packed_B + (QK_K / 2) * TILE_N); uint8_t * mins = scales + 8 * TILE_N; ggml_half * d = reinterpret_cast(mins + 8 * TILE_N); ggml_half * dmin = d + TILE_N; union { uint32_t u32[4]; uint8_t u8[16]; } s; for (int n = 0; n < TILE_N; ++n) { unpack_mins_and_scales(B[n * KB].scales, s.u32); for (int k = 0; k < 8; ++k) { scales[k * TILE_N + n] = s.u8[k]; mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8]; } d[n] = B[n * KB].d; dmin[n] = B[n * KB].dmin; } } // packed_B layout: // quants {8, TILE_N, 16} uint8 // qh {8, TILE_N, 4} uint8 // scales {8, TILE_N} uint8 // mins {8, TILE_N} uint8 // d {TILE_N} ggml_half // dmin {TILE_N} ggml_half void pack_B(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); uint8_t * scales = reinterpret_cast((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N); uint8_t * mins = scales + 8 * TILE_N; ggml_half * d = reinterpret_cast(mins + 8 * TILE_N); ggml_half * dmin = d + TILE_N; union { uint32_t u32[4]; uint8_t u8[16]; } s; for (int n = 0; n < TILE_N; ++n) { unpack_mins_and_scales(B[n * KB].scales, s.u32); for (int k = 0; k < 8; ++k) { scales[k * TILE_N + n] = s.u8[k]; mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8]; } d[n] = B[n * KB].d; dmin[n] = B[n * KB].dmin; } } // packed_B layout: // quants {16, TILE_N, 8} uint8 // qh {16, TILE_N, 4} uint8 // scales {16, TILE_N} uint8 // d {TILE_N} ggml_half void pack_B(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); uint8_t * scales = reinterpret_cast((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N); ggml_half * d = reinterpret_cast(scales + 16 * TILE_N); for (int n = 0; n < TILE_N; ++n) { const int8_t * ps = B[n * KB].scales; for (int k = 0; k < 16; ++k) { scales[k * TILE_N + n] = ps[k]; } d[n] = B[n * KB].d; } } // packed_B layout: // quants {8, TILE_N, 16} uint8 // scales {8, TILE_N} int8 // d {TILE_N} ggml_half void pack_B(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) { pack_qs(packed_B, B, KB); int8_t * scales = reinterpret_cast((char *)packed_B + (QK_K / 2) * TILE_N); ggml_half * d = reinterpret_cast(scales + 8 * TILE_N); // pack the scales for (int n = 0; n < TILE_N; ++n) { uint16_t sh = B[n * KB].scales_h; for (int k = 0; k < 8; k += 2) { const int16_t ls1 = ((B[n * KB].scales_l[k / 2] & 0xf) | ((sh << 4) & 0x30)) - 32; const int16_t ls2 = ((B[n * KB].scales_l[k / 2] >> 4) | ((sh << 2) & 0x30)) - 32; scales[(k + 0) * TILE_N + n] = ls1; scales[(k + 1) * TILE_N + n] = ls2; sh >>= 4; } d[n] = B[n * KB].d; } } template> void unpack_B(packed_B_t * RESTRICT tile, const void * RESTRICT packed_B) { GGML_UNUSED(tile); GGML_UNUSED(packed_B); } template <> void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B) { const __m512i off = _mm512_set1_epi8(8); const __m512i lowMask = _mm512_set1_epi8(0xF); for (int n = 0; n < 8; n += 2) { __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32)); const __m512i r0 = _mm512_sub_epi8(_mm512_and_si512(bytes, lowMask), off); const __m512i r1 = _mm512_sub_epi8(_mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask), off); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1); } } template <> void unpack_B(uint8_t * RESTRICT tile, const void * RESTRICT packed_B) { const __m512i lowMask = _mm512_set1_epi8(0xF); for (int n = 0; n < 8; n += 2) { __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32)); const __m512i r0 = _mm512_and_si512(bytes, lowMask); const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1); } } // packed_B_t for QKK is int8_t template void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) { const int packed_B_group_size = QK_K / 2 * TILE_N / 8; const char * packed_B_group = (const char *)packed_B + k * packed_B_group_size; const __m512i lowMask = _mm512_set1_epi8(0xF); for (int n = 0; n < 8; n += 2) { __m512i bytes = _mm512_loadu_si512(packed_B_group + n * 32); const __m512i r0 = _mm512_and_si512(bytes, lowMask); const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1); } } template <> void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) { // lower 4bits, stride 256 bytes const int packed_l4_group_size = QK_K / 2 * TILE_N / 8; const char * pb = (const char *)packed_B + k * packed_l4_group_size; // higher 1bit, stride 64 bytes const int packed_h1_group_size = QK_K / 8 * TILE_N / 8; const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h1_group_size; const __m512i hbits = _mm512_loadu_si512(ph); const __m512i lowMask = _mm512_set1_epi8(0xF); __m512i hmask0 = _mm512_set1_epi8(0x1); __m512i hmask1 = _mm512_set1_epi8(0x2); for (int n = 0; n < 8; n += 2) { __m512i bytes = _mm512_loadu_si512(pb + n * 32); __m512i r0 = _mm512_and_si512(bytes, lowMask); __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); __m512i h0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), n), 4); __m512i h1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), n + 1), 4); hmask0 = _mm512_slli_epi16(hmask0, 2); hmask1 = _mm512_slli_epi16(hmask1, 2); r0 = _mm512_add_epi8(r0, h0); r1 = _mm512_add_epi8(r1, h1); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1); } } template <> void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) { // lower 4bits, stride 128 bytes const int packed_l4_group_size = QK_K / 2 * TILE_N / 16; const char * pb = (const char *)packed_B + k * packed_l4_group_size; // higher 2bits, stride 64 bytes const int packed_h2_group_size = QK_K / 4 * TILE_N / 16; const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h2_group_size; const __m512i hbits = _mm512_loadu_si512(ph); const __m512i off = _mm512_set1_epi8(32); const __m512i lowMask = _mm512_set1_epi8(0xF); __m512i hmask0 = _mm512_set1_epi8(0x3); // 0011 __m512i hmask1 = _mm512_set1_epi8(0xC); // 1100 // notes: skip zero padding from row4 to row7 as we have done so in `unpack_A` __m512i bytes = _mm512_loadu_si512(pb); __m512i r0 = _mm512_and_si512(bytes, lowMask); __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); __m512i h0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask0), 4); __m512i h1 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask1), 2); _mm512_storeu_si512((__m512i *)(tile + 0), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off)); _mm512_storeu_si512((__m512i *)(tile + 64), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off)); hmask0 = _mm512_slli_epi16(hmask0, 4); hmask1 = _mm512_slli_epi16(hmask1, 4); bytes = _mm512_loadu_si512(pb + 64); r0 = _mm512_and_si512(bytes, lowMask); r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); h0 = _mm512_and_si512(hbits, hmask0); h1 = _mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), 2); _mm512_storeu_si512((__m512i *)(tile + 128), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off)); _mm512_storeu_si512((__m512i *)(tile + 192), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off)); } template <> void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) { static const __m512i values128 = _mm512_set_epi8( 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127 ); const int packed_B_group_size = QK_K / 2 * TILE_N / 8; const char * pb = (const char *)packed_B + k * packed_B_group_size; const __m512i lowMask = _mm512_set1_epi8(0xF); for (int n = 0; n < 8; n += 2) { __m512i bytes = _mm512_loadu_si512(pb + n * 32); const __m512i r0 = _mm512_shuffle_epi8(values128, _mm512_and_si512(bytes, lowMask)); const __m512i r1 = _mm512_shuffle_epi8(values128, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask)); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0); _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1); } } template struct acc_C {}; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) { const int offset = TILE_N * TILE_K / 2; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_1 * A, int lda, const void * packed_B, int nr) { const int offset = TILE_N * TILE_K / 2; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset + TILE_N * sizeof(ggml_half)))); for (int m = 0; m < nr; ++m) { const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].s)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum); vsum = _mm512_fmadd_ps(vm0, vs1, vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) { const int offset = TILE_N * TILE_K; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) { const uint8_t * scales = reinterpret_cast((const char *)packed_B + (QK_K / 2) * TILE_N); const uint8_t * mins = scales + 8 * TILE_N; const ggml_half * d0 = reinterpret_cast(mins + 8 * TILE_N); const ggml_half * dmin = d0 + TILE_N; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0)); const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin)); for (int m = 0; m < nr; ++m) { const float d1 = A[m * lda].d; const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0); const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); __m512i acc_m = _mm512_setzero_si512(); for (int k = 0; k < 4; ++k) { __m512i vmask = _mm512_set1_epi32(k); __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s)); __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32))); acc_m = _mm512_dpwssds_epi32(acc_m, va, vb); } vsum = _mm512_fmadd_ps(vtile, vd, vsum); vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) { const uint8_t * scales = reinterpret_cast((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N); const uint8_t * mins = scales + 8 * TILE_N; const ggml_half * d0 = reinterpret_cast(mins + 8 * TILE_N); const ggml_half * dmin = d0 + TILE_N; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0)); const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin)); for (int m = 0; m < nr; ++m) { const float d1 = A[m * lda].d; const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0); const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); __m512i acc_m = _mm512_setzero_si512(); for (int k = 0; k < 4; ++k) { __m512i vmask = _mm512_set1_epi32(k); __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s)); __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32))); acc_m = _mm512_dpwssds_epi32(acc_m, va, vb); } vsum = _mm512_fmadd_ps(vtile, vd, vsum); vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) { const uint8_t * scales = reinterpret_cast((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N); const ggml_half * d0 = reinterpret_cast(scales + 16 * TILE_N); const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0)); for (int m = 0; m < nr; ++m) { const float d1 = A[m * lda].d; const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } vsum = _mm512_fmadd_ps(vtile, vd, vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template struct acc_C { static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) { const int8_t * scales = reinterpret_cast((const char *)packed_B + (QK_K / 2) * TILE_N); const ggml_half * d0 = reinterpret_cast(scales + 8 * TILE_N); const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0)); for (int m = 0; m < nr; ++m) { const float d1 = A[m * lda].d; const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; if (is_acc) { vsum = _mm512_loadu_ps(C + m * ldc); } else { vsum = _mm512_set1_ps(0.f); } vsum = _mm512_fmadd_ps(vtile, vd, vsum); _mm512_storeu_ps(C + m * ldc, vsum); } } }; template constexpr int get_quants_size(); template <> constexpr int get_quants_size() { return (QK_K / 2) * TILE_N; } template <> constexpr int get_quants_size() { return (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N; } template <> constexpr int get_quants_size() { return (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N; } template <> constexpr int get_quants_size() { return (QK_K / 2) * TILE_N; } // used for QKK format template ::value, int>::type = 0> inline void scale_C(const int32_t * RESTRICT tile, int32_t * RESTRICT sumi, const void * packed_B, int k, int nr) { const uint8_t * scales = reinterpret_cast((const char *)packed_B + get_quants_size()); const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(scales + k * TILE_N))); for (int m = 0; m < nr; ++m) { __m512i vsumi; if (is_acc) { vsumi = _mm512_loadu_si512(sumi + m * TILE_N); } else { vsumi = _mm512_setzero_si512(); } __m512i vtile = _mm512_loadu_si512(tile + m * TILE_N); vsumi = _mm512_add_epi32(vsumi, _mm512_mullo_epi32(vtile, vscale)); _mm512_storeu_si512((__m512i *)(sumi + m * TILE_N), vsumi); } } template struct tinygemm_kernel_avx { static void apply(int K, const TA * RESTRICT A, const TB * RESTRICT B, TC * RESTRICT C, int ldc) { GGML_UNUSED(K); GGML_UNUSED(A); GGML_UNUSED(B); GGML_UNUSED(C); GGML_UNUSED(ldc); } }; template struct tinygemm_kernel_avx { static void apply(int K, const float * RESTRICT A, const ggml_fp16_t * RESTRICT B, float * RESTRICT C, int ldc) { constexpr int ROWS = BLOCK_M; constexpr int COLS = BLOCK_N; assert(BLOCK_K == 16); __m512 va; __m512 vb[COLS]; __m512 vc[ROWS * COLS]; auto loadc = [&](auto idx) { vc[idx] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto idx, auto k) { constexpr int row = idx / COLS; constexpr int col = idx % COLS; if constexpr (col == 0) { va = _mm512_loadu_ps(A + row * K + k); } if constexpr (row == 0) { vb[col] = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(B + col * K + k))); } vc[idx] = _mm512_fmadd_ps(va, vb[col], vc[idx]); }; for (int k = 0; k < K; k += 16) { Unroll{}(compute, k); } auto storec = [&](auto idx) { constexpr int row = idx / COLS; constexpr int col = idx % COLS; C[row * ldc + col] = _mm512_reduce_add_ps(vc[idx]); }; Unroll{}(storec); } }; #define LAUNCH_TINYGEMM_KERNEL_AVX(MB_SIZE, NB_SIZE) \ tinygemm_kernel_avx::apply( \ K, (const float *)src1->data + mb_start * K, \ (const type *)src0->data + nb_start * K, \ (float *)dst->data + mb_start * ldc + nb_start, ldc); // re-organize in the format {NB, KB, TILE_SIZE}: #define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size template void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K) { const int NB = N / TILE_N; const int KB = K / BLOCK_K; const int TILE_SIZE = get_tile_size(); // parallel on NB should be enough parallel_for(NB, [&](int begin, int end) { for (int n = begin; n < end; ++n) { for (int k = 0; k < KB; ++k) { int n0 = n * TILE_N; pack_B((char *)packed_B + PACKED_INDEX(n, k, KB, TILE_SIZE), &B[n0 * KB + k], KB); } } }); } template struct tinygemm_kernel_vnni {}; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q4_0); const block_q8_0 * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); __m512i va[8]; __m512 vc[COLS]; __m512 vd1; // sum of offsets, shared across COLS // // avx512-vnni does not have `_mm512_dpbssd_epi32`, // need to transfrom ss to us: // a * (b - 8) is equavilent to b * a - 8 * a // s u u u s u s // __m512i vcomp; const __m512i off = _mm512_set1_epi8(8); const __m512i lowMask = _mm512_set1_epi8(0xF); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto col, auto i) { // load a and compute compensation if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); vcomp = _mm512_setzero_si512(); for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); vcomp = _mm512_dpbusd_epi32(vcomp, off, va[k]); } vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].d)); } // load b __m512i vsum = _mm512_setzero_si512(); const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); for (int k = 0; k < 8; k += 2) { __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32)); __m512i vb0 = _mm512_and_si512(bytes, lowMask); vsum = _mm512_dpbusd_epi32(vsum, vb0, va[k + 0]); __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); vsum = _mm512_dpbusd_epi32(vsum, vb1, va[k + 1]); } const int offset = TILE_N * TILE_K / 2; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset))); vsum = _mm512_sub_epi32(vsum, vcomp); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q4_1); const block_q8_1 * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); __m512i va[8]; __m512i vb[8]; __m512 vc[COLS]; __m512 vd1, vs1; const __m512i lowMask = _mm512_set1_epi8(0xF); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto col, auto i) { // load a if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); } vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].d)); vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].s)); } // load b const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); for (int k = 0; k < 8; k += 2) { __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32)); vb[k + 0] = _mm512_and_si512(bytes, lowMask); vb[k + 1] = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); } const int offset = TILE_N * TILE_K / 2; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset))); const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset + TILE_N * sizeof(ggml_half)))); __m512i vsum = _mm512_setzero_si512(); for (int k = 0; k < 8; ++k) { vsum = _mm512_dpbusd_epi32(vsum, vb[k], va[k]); } vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]); vc[col] = _mm512_fmadd_ps(vm0, vs1, vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q8_0) + TILE_N * sizeof(int32_t); const block_q8_0 * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); __m512i va[8]; __m512i vb[8]; __m512 vc[COLS]; __m512 vd1; // Notes: s8s8 igemm compensation in avx512-vnni // change s8s8 to u8s8 with compensate // a * b = (a + 128) * b - 128 * b // s s u s u s // // (128 * b is pre-computed when packing B to vnni formats) // const __m512i off = _mm512_set1_epi8(static_cast(0x80)); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto col, auto i) { // load a and add offset 128 if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); va[k] = _mm512_add_epi8(va[k], off); } vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].d)); } // load b const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); for (int k = 0; k < 8; ++k) { vb[k] = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 64)); } const int offset = TILE_N * TILE_K; const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset))); const int offset2 = TILE_N * TILE_K + TILE_N * sizeof(ggml_half); const __m512i vcomp = _mm512_loadu_si512((const __m512i *)(b_ptr + offset2)); __m512i vsum = _mm512_setzero_si512(); for (int k = 0; k < 8; ++k) { vsum = _mm512_dpbusd_epi32(vsum, va[k], vb[k]); } vsum = _mm512_sub_epi32(vsum, vcomp); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q4_K) + TILE_N * 4; const block_q8_K * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); // a.qs: 8 groups, 32 bytes each group (m256i) __m512i va[8]; // a.bsum: 8 groups, 2 bytes each group (m128i) __m512i va_bsum; __m512 vc[COLS]; __m512 vd1; // packed_B: const int offset_scales = (QK_K / 2) * TILE_N; const int offset_mins = (QK_K / 2) * TILE_N + 8 * TILE_N; const int offset_d0 = (QK_K / 2) * TILE_N + 16 * TILE_N; const int offset_dmin = (QK_K / 2) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half); const __m512i lowMask = _mm512_set1_epi8(0xF); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); // Notes: vnni formats in QK_K // a) quants vnni format // int8 {k/4, n, 4}, viewed as 2d {k/4, 4n}, k = 32 // from {16, 32} to {8, 64} // // b) min vnni format // int16 {k/2, n, 2}, viewed as 2d {k/2, 2n}, k = 8 // from {16, 8} to {4, 32} // auto compute = [&](auto col, auto i) { // load a if constexpr (col == 0) { for (int k_group = 0; k_group < QK_K / 32; ++k_group) { va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32))); } const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); va_bsum = _mm512_castsi128_si512(q8s); vd1 = _mm512_set1_ps(A[0 * KB + i].d); } // step 1: accumultate the quants __m512i acc = _mm512_setzero_si512(); const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); const char * b_qs = b_ptr; for (int k_group = 0; k_group < QK_K / 32; ++k_group) { __m512i vsum = _mm512_setzero_si512(); for (int k = 0; k < 8; k += 2) { __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]); __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]); __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs); __m512i vb0 = _mm512_and_si512(bytes, lowMask); vsum = _mm512_dpbusd_epi32(vsum, vb0, va0); __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); vsum = _mm512_dpbusd_epi32(vsum, vb1, va1); b_qs += 64; } // vacc += scale * (q8 @ q4) const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N))); acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale)); } const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0))); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]); // step 2: accumulate the mins __m512i acc_m = _mm512_setzero_si512(); for (int k = 0; k < 4; ++k) { __m512i vmask = _mm512_set1_epi32(k); __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum); __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32))); acc_m = _mm512_dpwssds_epi32(acc_m, va, vb); } const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin))); vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q5_K) + TILE_N * 4; const block_q8_K * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); // a.qs: 8 groups, 32 bytes each group (m256i) __m512i va[8]; // a.bsum: 8 groups, 2 bytes each group (m128i) __m512i va_bsum; __m512 vc[COLS]; __m512 vd1; // packed_B: const int offset_qh = (QK_K / 2) * TILE_N; const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N; const int offset_mins = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 8 * TILE_N; const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N; const int offset_dmin = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half); const __m512i lowMask = _mm512_set1_epi8(0xF); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); // Q5_K and Q4_K shares the same vnni formats, refer to notes above. auto compute = [&](auto col, auto i) { // load a if constexpr (col == 0) { for (int k_group = 0; k_group < QK_K / 32; ++k_group) { va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32))); } const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); va_bsum = _mm512_castsi128_si512(q8s); vd1 = _mm512_set1_ps(A[0 * KB + i].d); } // step 1: accumultate the quants __m512i acc = _mm512_setzero_si512(); const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); const char * b_qs = b_ptr; const char * b_qh = b_ptr + offset_qh; for (int k_group = 0; k_group < QK_K / 32; ++k_group) { __m512i vsum = _mm512_setzero_si512(); __m512i hmask0 = _mm512_set1_epi8(0x1); __m512i hmask1 = _mm512_set1_epi8(0x2); __m512i hbits = _mm512_loadu_si512((const __m512i *)(b_qh + k_group * 64)); for (int k = 0; k < 8; k += 2) { __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]); __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]); __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs); __m512i vb0 = _mm512_and_si512(bytes, lowMask); __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); __m512i vh0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), k), 4); __m512i vh1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), k + 1), 4); hmask0 = _mm512_slli_epi16(hmask0, 2); hmask1 = _mm512_slli_epi16(hmask1, 2); vb0 = _mm512_add_epi8(vb0, vh0); vb1 = _mm512_add_epi8(vb1, vh1); vsum = _mm512_dpbusd_epi32(vsum, vb0, va0); vsum = _mm512_dpbusd_epi32(vsum, vb1, va1); b_qs += 64; } // vacc += scale * (q8 @ q5) const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N))); acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale)); } const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0))); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]); // step 2: accumulate the mins __m512i acc_m = _mm512_setzero_si512(); for (int k = 0; k < 4; ++k) { __m512i vmask = _mm512_set1_epi32(k); __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum); __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32))); acc_m = _mm512_dpwssds_epi32(acc_m, va, vb); } const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin))); vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_q6_K); const block_q8_K * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); // load the 256 bytes from A to 4 avx512 vectors __m512i va[4]; __m512 vc[COLS]; __m512 vd1; // packed_B: const int offset_qh = (QK_K / 2) * TILE_N; const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N; const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N + 16 * TILE_N; // compensation __m512i vcomp; const __m512i m32s = _mm512_set1_epi32(32); const __m512i lowMask = _mm512_set1_epi8(0xF); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto col, auto i) { if constexpr (col == 0) { // load a va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0)); va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64)); va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128)); va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192)); const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums); vcomp = _mm512_mullo_epi32(_mm512_cvtepi16_epi32(q8sums), m32s); vd1 = _mm512_set1_ps(A[0 * KB + i].d); } // accmulate the quants __m512i acc = _mm512_setzero_si512(); const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); const char * b_qs = b_ptr; const char * b_qh = b_ptr + offset_qh; int mask = 0; for (int k_group = 0; k_group < QK_K / 16; ++k_group) { int r = k_group >> 2; __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); __m512i vsum = _mm512_setzero_si512(); __m512i hmask = _mm512_set1_epi8(0x3); __m512i bytes = _mm512_loadu_si512(b_qs); __m512i hbits = _mm512_loadu_si512(b_qh); __m512i vb0 = _mm512_and_si512(bytes, lowMask); __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); __m512i vh0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask), 4); __m512i vh1 = _mm512_slli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 2)), 2); vb0 = _mm512_add_epi8(vb0, vh0); vb1 = _mm512_add_epi8(vb1, vh1); vsum = _mm512_dpbusd_epi32(vsum, vb0, va0); vsum = _mm512_dpbusd_epi32(vsum, vb1, va1); b_qs += 64; va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); bytes = _mm512_loadu_si512(b_qs); vb0 = _mm512_and_si512(bytes, lowMask); vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask); vh0 = _mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 4)); vh1 = _mm512_srli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 6)), 2); vb0 = _mm512_add_epi8(vb0, vh0); vb1 = _mm512_add_epi8(vb1, vh1); vsum = _mm512_dpbusd_epi32(vsum, vb0, va0); vsum = _mm512_dpbusd_epi32(vsum, vb1, va1); b_qs += 64; b_qh += 64; // B * A - 32 * A __m512i vmask = _mm512_set1_epi32(k_group); vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp)); // vacc += scale * (q8 @ q6) const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N))); acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale)); } const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0))); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](int col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; template struct tinygemm_kernel_vnni { static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { constexpr int COLS = BLOCK_N / 16; const int TILE_SIZE = TILE_N * sizeof(block_iq4_xs) + TILE_N * 2; const block_q8_K * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); // load the 256 bytes from A to 4 avx512 vectors __m512i va[4]; __m512 vc[COLS]; __m512 vd1; // packed_B: const int offset_scales = (QK_K / 2) * TILE_N ; const int offset_d0 = (QK_K / 2) * TILE_N + 8 * TILE_N; // compensation __m512i vcomp; const __m256i m128s = _mm256_set1_epi16(128); const __m512i lowMask = _mm512_set1_epi8(0xF); const __m512i values128 = _mm512_set_epi8( 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127, 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127 ); const __m512i off = _mm512_set1_epi8(static_cast(0x80)); const __m512i values256 = _mm512_add_epi8(values128, off); auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); auto compute = [&](auto col, auto i) { if constexpr (col == 0) { // load a va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0)); va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64)); va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128)); va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192)); // compensation: 128 * A const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums); vcomp = _mm512_castsi256_si512(_mm256_madd_epi16(q8sums, m128s)); vd1 = _mm512_set1_ps(A[0 * KB + i].d); } // accmulate the quants __m512i acc = _mm512_setzero_si512(); const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE); const char * b_qs = b_ptr; int mask = 0; for (int k_group = 0; k_group < QK_K / 32; ++k_group) { int r = k_group >> 1; __m512i vmask = _mm512_set1_epi32(k_group); __m512i vsum = _mm512_setzero_si512(); for (int k = 0; k < 8; k += 2) { __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]); __m512i bytes = _mm512_loadu_si512(b_qs); __m512i vb0 = _mm512_shuffle_epi8(values256, _mm512_and_si512(bytes, lowMask)); __m512i vb1 = _mm512_shuffle_epi8(values256, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask)); vsum = _mm512_dpbusd_epi32(vsum, vb0, va0); vsum = _mm512_dpbusd_epi32(vsum, vb1, va1); b_qs += 64; } // (B + 128) * A - 128 * A vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp)); // vacc += scale * (q8 @ q4) const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N))); acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale)); } const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0))); vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]); }; for (int i = 0; i < KB; ++i) { Unroll{}(compute, i); } //store to C auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); } }; #define LAUNCH_TINYGEMM_KERNEL_VNNI(NB_SIZE) \ tinygemm_kernel_vnni::apply( \ KB, (const char *)wdata + 0 * row_size_A, \ (const char *)src0->data + PACKED_INDEX(nb * kTilesN, 0, KB, TILE_SIZE), \ (float *) dst->data + 0 * N + nb_start, ldc) template ::value, int>::type = 0> void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, TC * RESTRICT C, int ldc) { using packed_B_t = packed_B_type; const int TILE_SIZE = get_tile_size(); const bool need_unpack = do_unpack::value; GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N); const TA * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); const int m0 = std::min(M, TILE_M); const int m1 = std::max(M - TILE_M, 0); const int lda = KB * sizeof(TA); //const int ldb = KB * sizeof(TB); static thread_local packed_B_t Tile0[TILE_N * TILE_K]; static thread_local packed_B_t Tile1[TILE_N * TILE_K]; static thread_local int8_t Tile23[TILE_M * TILE_K]; static thread_local int32_t TileC0[TILE_M * TILE_N * 4]; static thread_local int32_t TileC1[TILE_M * TILE_N * 4]; // double buffering C to interleave avx512 and amx int32_t * C_cur = TileC0; int32_t * C_pre = TileC1; auto Tile4 = [&](int32_t * base) { return base; }; auto Tile5 = [&](int32_t * base) { return base + TILE_M * TILE_N; }; auto Tile6 = [&](int32_t * base) { return base + 2 * TILE_M * TILE_N; }; auto Tile7 = [&](int32_t * base) { return base + 3 * TILE_M * TILE_N; }; if (M == 2 * TILE_M) { // i = 0 const char * B_blk0 = B + PACKED_INDEX(0, 0, KB, TILE_SIZE); const char * B_blk1 = B + PACKED_INDEX(1, 0, KB, TILE_SIZE); if (need_unpack) { unpack_B(Tile0, B_blk0); _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK); } _tile_zero(TMM4); _tile_loadd(TMM2, A[0].qs, lda); _tile_dpbssd(TMM4, TMM2, TMM0); _tile_stored(TMM4, Tile4(C_pre), TILE_N * sizeof(int32_t)); _tile_zero(TMM5); _tile_loadd(TMM3, A[TILE_M * KB + 0].qs, lda); _tile_dpbssd(TMM5, TMM3, TMM0); _tile_stored(TMM5, Tile5(C_pre), TILE_N * sizeof(int32_t)); if (need_unpack) { unpack_B(Tile1, B_blk0); _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK); } _tile_zero(TMM6); _tile_dpbssd(TMM6, TMM2, TMM1); _tile_stored(TMM6, Tile6(C_pre), TILE_N * sizeof(int32_t)); _tile_zero(TMM7); _tile_dpbssd(TMM7, TMM3, TMM1); _tile_stored(TMM7, Tile7(C_pre), TILE_N * sizeof(int32_t)); for (int i = 1; i < KB; ++i) { // index of previous iter const int ii = i - 1; const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE); const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE); GGML_DISPATCH_BOOL(ii > 0, is_acc, [&] { if (need_unpack) { unpack_B(Tile0, B_blk0); _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK); } _tile_zero(TMM4); _tile_loadd(TMM2, A[i].qs, lda); acc_C::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M); _tile_dpbssd(TMM4, TMM2, TMM0); _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t)); _tile_zero(TMM5); _tile_loadd(TMM3, A[TILE_M * KB + i].qs, lda); acc_C::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M); _tile_dpbssd(TMM5, TMM3, TMM0); _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t)); if (need_unpack) { unpack_B(Tile1, B_blk1); _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK); } _tile_zero(TMM6); acc_C::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M); _tile_dpbssd(TMM6, TMM2, TMM1); _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t)); _tile_zero(TMM7); acc_C::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M); _tile_dpbssd(TMM7, TMM3, TMM1); _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t)); std::swap(C_cur, C_pre); }); } // final accumulation { int ii = KB - 1; acc_C::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M); acc_C::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M); acc_C::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M); acc_C::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M); } } else { for (int i = 0; i < KB; ++i) { _tile_zero(TMM4); _tile_zero(TMM6); if (m1 != 0) { _tile_zero(TMM5); _tile_zero(TMM7); } const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE); const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE); if (need_unpack) { unpack_B(Tile0, B_blk0); _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK); } if (need_unpack) { unpack_B(Tile1, B_blk1); _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK); } else { _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK); } if (m0 == TILE_M) { _tile_loadd(TMM2, A[i].qs, lda); } else { unpack_A(Tile23, &A[i], KB, m0); _tile_loadd(TMM2, Tile23, TILE_K); } _tile_dpbssd(TMM4, TMM2, TMM0); _tile_dpbssd(TMM6, TMM2, TMM1); _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t)); _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t)); GGML_DISPATCH_BOOL(i > 0, is_acc, [&] { acc_C::apply(C, ldc, Tile4(C_cur), &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0); acc_C::apply(C + TILE_N, ldc, Tile6(C_cur), &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0); }); if (m1 != 0) { unpack_A(Tile23, &A[TILE_M * KB + i], KB, m1); _tile_loadd(TMM3, Tile23, TILE_K); _tile_dpbssd(TMM5, TMM3, TMM0); _tile_dpbssd(TMM7, TMM3, TMM1); _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t)); _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t)); GGML_DISPATCH_BOOL(i > 0, is_acc, [&] { acc_C::apply(C + TILE_M * ldc, ldc, Tile5(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1); acc_C::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1); }); } } } return; } template ::value, int>::type = 0> void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) { static_assert(std::is_same::value); const int TILE_SIZE = get_tile_size(); GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N); const TA * RESTRICT A = static_cast(_A); const char * RESTRICT B = static_cast(_B); const int m0 = std::min(M, TILE_M); const int m1 = std::max(M - TILE_M, 0); //const int lda = KB * sizeof(TA); static thread_local int8_t Tile0[TILE_N * TILE_K]; static thread_local int8_t Tile1[TILE_N * TILE_K]; static thread_local int8_t Tile23[TILE_M * TILE_K]; // mat mul result for each group static thread_local int32_t Tile4[TILE_M * TILE_N]; static thread_local int32_t Tile5[TILE_M * TILE_N]; static thread_local int32_t Tile6[TILE_M * TILE_N]; static thread_local int32_t Tile7[TILE_M * TILE_N]; // sum of each QK_K block, contains 8 groups, int32 static thread_local int32_t Sumi4[TILE_M * TILE_N]; static thread_local int32_t Sumi5[TILE_M * TILE_N]; static thread_local int32_t Sumi6[TILE_M * TILE_N]; static thread_local int32_t Sumi7[TILE_M * TILE_N]; const int k_group_size = std::is_same::value ? 16 : 32; for (int i = 0; i < KB; ++i) { // step 1: accumulate the quants across 8 groups, each group with 32 for (int k = 0; k < QK_K / k_group_size; ++k) { GGML_DISPATCH_BOOL(k > 0, is_acc, [&] { _tile_zero(TMM4); _tile_zero(TMM6); unpack_B(Tile0, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k); _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK); unpack_B(Tile1, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k); _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK); unpack_A(Tile23, &A[i], KB, k, m0); _tile_loadd(TMM2, Tile23, TILE_K); _tile_dpbssd(TMM4, TMM2, TMM0); _tile_dpbssd(TMM6, TMM2, TMM1); _tile_stored(TMM4, Tile4, TILE_N * sizeof(int32_t)); _tile_stored(TMM6, Tile6, TILE_N * sizeof(int32_t)); scale_C(Tile4, Sumi4, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m0); scale_C(Tile6, Sumi6, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m0); if (m1 != 0) { _tile_zero(TMM5); _tile_zero(TMM7); unpack_A(Tile23, &A[TILE_M * KB + i], KB, k, m1); _tile_loadd(TMM3, Tile23, TILE_K); _tile_dpbssd(TMM5, TMM3, TMM0); _tile_dpbssd(TMM7, TMM3, TMM1); _tile_stored(TMM5, Tile5, TILE_N * sizeof(int32_t)); _tile_stored(TMM7, Tile7, TILE_N * sizeof(int32_t)); scale_C(Tile5, Sumi5, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m1); scale_C(Tile7, Sumi7, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m1); } }); } // step 2: accmulate the mins GGML_DISPATCH_BOOL(i > 0, is_acc, [&] { acc_C::apply(C, ldc, Sumi4, &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0); acc_C::apply(C + TILE_N, ldc, Sumi6, &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0); if (m1 != 0) { acc_C::apply(C + TILE_M * ldc, ldc, Sumi5, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1); acc_C::apply(C + TILE_M * ldc + TILE_N, ldc, Sumi7, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1); } }); } return; } } // anonymous namespace // get the packed tensor size for quantized weights size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor) { const enum ggml_type TYPE = tensor->type; const int K = tensor->ne[0]; // ne0: in_features const int N = tensor->ne[1]; // ne1: out_features auto get_tensor_size = [&] { size_t row_size_B{0}; GGML_DISPATCH_QTYPES(TYPE, [&] { row_size_B = get_row_size(K); }); return N * row_size_B; }; if (qtype_has_amx_kernels(TYPE)) { return get_tensor_size(); } else { // for f16, bf16 we don't do packing return ggml_nbytes(tensor); } } // pack weight to vnni format void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(offset == 0 && size == ggml_nbytes(tensor)); // only full tensor conversion is supported for now const enum ggml_type TYPE = tensor->type; const int K = tensor->ne[0]; // ne0: in_features const int N = tensor->ne[1]; // ne1: out_features GGML_DISPATCH_QTYPES(TYPE, [&] { convert_B_packed_format((void *)((char *)tensor->data + offset), (const type *)data, N, K); }); } size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst) { struct ggml_tensor * src0 = dst->src[0]; const enum ggml_type TYPE = src0->type; const bool is_floating_type = TYPE == GGML_TYPE_F16; if (is_floating_type) { return 0; } const int M = dst->ne[1]; const int K = src0->ne[0]; size_t desired_wsize = 0; GGML_DISPATCH_QTYPES(TYPE, [&] { const size_t row_size_A = K / blck_size * sizeof(vec_dot_type); desired_wsize = M * row_size_A; }); return desired_wsize; } // NB: mixed dtype gemm with Advanced Matrix Extensions (Intel AMX) // // src0: weight in shape of {N, K}, quantized // src1: input in shape of {M, K}, float32 // dst: output in shape of {M, N}, float32 // // the function performs: dst = src1 @ src0.T // void ggml_backend_amx_mul_mat(const ggml_compute_params * params, struct ggml_tensor * dst) { struct ggml_tensor * src0 = dst->src[0]; struct ggml_tensor * src1 = dst->src[1]; const enum ggml_type TYPE = src0->type; // f16 only has avx512 kernels for now, // amx kernels will be added once 6th gen xeon is released. const bool is_floating_type = TYPE == GGML_TYPE_F16; const int M = dst->ne[1]; const int N = dst->ne[0]; const int K = src0->ne[0]; const int ldc = dst->nb[1] / dst->nb[0]; if (is_floating_type) { constexpr int BLOCK_M = 4; constexpr int BLOCK_N = 6; const int MB = div_up(M, BLOCK_M); const int NB = div_up(N, BLOCK_N); parallel_for_ggml(params, MB * NB, [&](int begin, int end) { GGML_DISPATCH_FLOATING_TYPES(TYPE, [&] { for (int i = begin; i < end; ++i) { int mb = i / NB; int nb = i % NB; int mb_start = mb * BLOCK_M; int mb_size = std::min(BLOCK_M, M - mb_start); int nb_start = nb * BLOCK_N; int nb_size = std::min(BLOCK_N, N - nb_start); switch (mb_size << 4 | nb_size) { case 0x12: LAUNCH_TINYGEMM_KERNEL_AVX(1, 2); break; case 0x14: LAUNCH_TINYGEMM_KERNEL_AVX(1, 4); break; case 0x16: LAUNCH_TINYGEMM_KERNEL_AVX(1, 6); break; case 0x22: LAUNCH_TINYGEMM_KERNEL_AVX(2, 2); break; case 0x24: LAUNCH_TINYGEMM_KERNEL_AVX(2, 4); break; case 0x26: LAUNCH_TINYGEMM_KERNEL_AVX(2, 6); break; case 0x32: LAUNCH_TINYGEMM_KERNEL_AVX(3, 2); break; case 0x34: LAUNCH_TINYGEMM_KERNEL_AVX(3, 4); break; case 0x36: LAUNCH_TINYGEMM_KERNEL_AVX(3, 6); break; case 0x42: LAUNCH_TINYGEMM_KERNEL_AVX(4, 2); break; case 0x44: LAUNCH_TINYGEMM_KERNEL_AVX(4, 4); break; case 0x46: LAUNCH_TINYGEMM_KERNEL_AVX(4, 6); break; default: fprintf(stderr, "Unexpected block size!\n"); } } }); }); return; } // pointer to work space, used convert A from float to quantized type void * wdata = params->wdata; //TODO: performance improvement: merge quant A if (params->ith == 0) { GGML_DISPATCH_QTYPES(TYPE, [&] { const size_t row_size_A = K / blck_size * sizeof(vec_dot_type); const size_t desired_wsize = M * row_size_A; if (params->wsize < desired_wsize) { GGML_ABORT("insufficient work space size"); } // Q4_0, Q4_1, Q8_0 handles 1 TILE_K per blck_size // Q4_K, Q5_K, Q6_K, IQ4_XS handles 8 TILE_K per blck_size GGML_ASSERT(TILE_K == blck_size || TILE_K * 8 == blck_size); const float * A_data = static_cast(src1->data); for (int m = 0; m < M; ++m) { from_float(A_data + m * K, (char *)wdata + m * row_size_A, K); } }); } ggml_barrier(params->threadpool); if (M == 1) { // MB = 1 and handle 8 tiles in each block constexpr int kTilesN = 4; constexpr int BLOCK_N = TILE_N * kTilesN; const int NB = div_up(N, BLOCK_N); parallel_for_ggml(params, NB, [&](int begin, int end) { GGML_DISPATCH_QTYPES(TYPE, [&] { const int KB = K / blck_size; const int TILE_SIZE = get_tile_size(); const int row_size_A = KB * sizeof(vec_dot_type); for (int i = begin; i < end; ++i) { int nb = i; int nb_start = nb * BLOCK_N; int nb_size = std::min(BLOCK_N, N - nb_start); // 32, 64, 96 switch (nb_size) { //case 160: LAUNCH_TINYGEMM_KERNEL_VNNI(160); break; case 128: LAUNCH_TINYGEMM_KERNEL_VNNI(128); break; case 96: LAUNCH_TINYGEMM_KERNEL_VNNI(96); break; case 64: LAUNCH_TINYGEMM_KERNEL_VNNI(64); break; case 32: LAUNCH_TINYGEMM_KERNEL_VNNI(32); break; default: fprintf(stderr, "Unexpected n block size!\n"); } } }); }); return; } // handle 4 tiles at a tile constexpr int BLOCK_M = TILE_M * 2; constexpr int BLOCK_N = TILE_N * 2; const int MB = div_up(M, BLOCK_M); const int NB = div_up(N, BLOCK_N); parallel_for_ggml(params, MB * NB, [&](int begin, int end) { // init tile config for each thread ggml_tile_config_init(); GGML_DISPATCH_QTYPES(TYPE, [&] { const int KB = K / blck_size; const int TILE_SIZE = get_tile_size(); const int row_size_A = KB * sizeof(vec_dot_type); for (int i = begin; i < end; ++i) { int mb = i / NB; int nb = i % NB; int mb_start = mb * BLOCK_M; int mb_size = std::min(BLOCK_M, M - mb_start); int nb_start = nb * BLOCK_N; int nb_size = BLOCK_N; tinygemm_kernel_amx( mb_size, nb_size, KB, (const char *)wdata + mb_start * row_size_A, (const char *)src0->data + PACKED_INDEX(nb * 2, 0, KB, TILE_SIZE), (float *) dst->data + mb_start * N + nb_start, ldc); } }); }); } #endif // if defined(__AMX_INT8__) && defined(__AVX512VNNI__) ggml-org-ggml-3678254/src/ggml-cpu/amx/mmq.h000066400000000000000000000006151512524704700204200ustar00rootroot00000000000000#pragma once #include "common.h" size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst); size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor); void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cpu/arch-fallback.h000066400000000000000000000370431512524704700215200ustar00rootroot00000000000000#pragma once // Rename `_generic` functions if no native implementation is available. // This effectively selects the generic implementation. #if defined(GGML_CPU_GENERIC) // quants.c #define quantize_row_q8_0_generic quantize_row_q8_0 #define quantize_row_q8_1_generic quantize_row_q8_1 #define quantize_row_q8_K_generic quantize_row_q8_K #define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 #define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 #define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 #define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 #define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K #define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K #define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K #define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K #define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K #define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K #define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K #define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K #define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K #define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K #define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K #define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 #define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__aarch64__) || defined(__arm__) || defined(_M_ARM) || defined(_M_ARM64) // repack.cpp #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #elif defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64) // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__POWERPC__) || defined(__powerpc__) // ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__loongarch64) // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__riscv) // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K #define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K #define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K #define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K #define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K #define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K #define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 #define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__s390x__) // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K #define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K #define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K #define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K #define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K #define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K #define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #elif defined(__wasm__) // quants.c #define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K #define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K #define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K #define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K #define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K #define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K #define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 #define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 #define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 #define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 #define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 #define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 #define ggml_gemv_q4_K_8x4_q8_K_generic ggml_gemv_q4_K_8x4_q8_K #define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 #define ggml_gemv_iq4_nl_8x8_q8_0_generic ggml_gemv_iq4_nl_8x8_q8_0 #define ggml_gemv_q8_0_4x4_q8_0_generic ggml_gemv_q8_0_4x4_q8_0 #define ggml_gemv_q8_0_4x8_q8_0_generic ggml_gemv_q8_0_4x8_q8_0 #define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 #define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 #define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 #define ggml_gemm_q4_K_8x4_q8_K_generic ggml_gemm_q4_K_8x4_q8_K #define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 #define ggml_gemm_iq4_nl_8x8_q8_0_generic ggml_gemm_iq4_nl_8x8_q8_0 #define ggml_gemm_q8_0_4x4_q8_0_generic ggml_gemm_q8_0_4x4_q8_0 #define ggml_gemm_q8_0_4x8_q8_0_generic ggml_gemm_q8_0_4x8_q8_0 #endif ggml-org-ggml-3678254/src/ggml-cpu/arch/000077500000000000000000000000001512524704700176035ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/arm/000077500000000000000000000000001512524704700203625ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/arm/cpu-feats.cpp000066400000000000000000000045231512524704700227610ustar00rootroot00000000000000#include "ggml-backend-impl.h" #if defined(__aarch64__) #if defined(__linux__) #include #elif defined(__APPLE__) #include #endif #if !defined(HWCAP2_SVE2) #define HWCAP2_SVE2 (1 << 1) #endif #if !defined(HWCAP2_I8MM) #define HWCAP2_I8MM (1 << 13) #endif #if !defined(HWCAP2_SME) #define HWCAP2_SME (1 << 23) #endif struct aarch64_features { // has_neon not needed, aarch64 has NEON guaranteed bool has_dotprod = false; bool has_fp16_va = false; bool has_sve = false; bool has_sve2 = false; bool has_i8mm = false; bool has_sme = false; aarch64_features() { #if defined(__linux__) uint32_t hwcap = getauxval(AT_HWCAP); uint32_t hwcap2 = getauxval(AT_HWCAP2); has_dotprod = !!(hwcap & HWCAP_ASIMDDP); has_fp16_va = !!(hwcap & HWCAP_FPHP); has_sve = !!(hwcap & HWCAP_SVE); has_sve2 = !!(hwcap2 & HWCAP2_SVE2); has_i8mm = !!(hwcap2 & HWCAP2_I8MM); has_sme = !!(hwcap2 & HWCAP2_SME); #elif defined(__APPLE__) int oldp = 0; size_t size = sizeof(oldp); if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) == 0) { has_dotprod = static_cast(oldp); } if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) == 0) { has_i8mm = static_cast(oldp); } if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) == 0) { has_sme = static_cast(oldp); } // Apple apparently does not implement SVE yet #endif } }; static int ggml_backend_cpu_aarch64_score() { int score = 1; aarch64_features af; #ifdef GGML_USE_DOTPROD if (!af.has_dotprod) { return 0; } score += 1<<1; #endif #ifdef GGML_USE_FP16_VECTOR_ARITHMETIC if (!af.has_fp16_va) { return 0; } score += 1<<2; #endif #ifdef GGML_USE_SVE if (!af.has_sve) { return 0; } score += 1<<3; #endif #ifdef GGML_USE_MATMUL_INT8 if (!af.has_i8mm) { return 0; } score += 1<<4; #endif #ifdef GGML_USE_SVE2 if (!af.has_sve2) { return 0; } score += 1<<5; #endif #ifdef GGML_USE_SME if (!af.has_sme) { return 0; } score += 1<<6; #endif return score; } GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_aarch64_score) # endif // defined(__aarch64__) ggml-org-ggml-3678254/src/ggml-cpu/arch/arm/quants.c000066400000000000000000006231561512524704700220560ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__ARM_NEON) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) #define B8(c,s ) B7(c,s, c), B7(c,s, s) // precomputed tables for expanding 8bits to 8 bytes: static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__ARM_NEON) for (int i = 0; i < nb; i++) { float32x4_t srcv [8]; float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); const float amax = vmaxvq_f32(amaxv[0]); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const float32x4_t v = vmulq_n_f32(srcv[j], id); const int32x4_t vi = vcvtnq_s32_f32(v); y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); } } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__ARM_NEON) for (int i = 0; i < nb; i++) { float32x4_t srcv [8]; float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); const float amax = vmaxvq_f32(amaxv[0]); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); int32x4_t accv = vdupq_n_s32(0); for (int j = 0; j < 8; j++) { const float32x4_t v = vmulq_n_f32(srcv[j], id); const int32x4_t vi = vcvtnq_s32_f32(v); y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); accv = vaddq_s32(accv, vi); } y[i].s = GGML_CPU_FP32_TO_FP16(d * vaddvq_s32(accv)); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } // placeholder implementation for Apple targets void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_K_ref(x, y, k); } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); #if defined(__ARM_FEATURE_MATMUL_INT8) assert((nrc == 2) || (nrc == 1)); #else assert(nrc == 1); #endif UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_0 * GGML_RESTRICT vx0 = vx; const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * GGML_RESTRICT vy0 = vy; const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); for (int i = 0; i < nb; i++) { const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i]; const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i]; const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; const uint8x16_t m4b = vdupq_n_u8(0x0F); const int8x16_t s8b = vdupq_n_s8(0x8); const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); // 4-bit -> 8-bit const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // sub 8 const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); // load y const int8x16_t y0_l = vld1q_s8(b_y0->qs); const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); float32_t _scale[4] = { GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); } float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); return; } #endif int ib = 0; float sumf = 0; #if defined(__ARM_FEATURE_SVE) svfloat32_t sumv0 = svdup_n_f32(0.0f); svfloat32_t sumv1 = svdup_n_f32(0.0f); const int vector_length = ggml_cpu_get_sve_cnt()*8; // VLA Implementation using switch case switch (vector_length) { case 128: { // predicate for activating higher lanes for 4 float32 elements const svbool_t ph4 = svptrue_pat_b32(SV_VL4); for (; ib + 1 < nb; ib += 2) { const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // load x const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); // 4-bit -> 8-bit const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F)); const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04)); const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F)); const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04)); // sub 8 const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8); const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8); const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8); const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8); // load y const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs); const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16); const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs); const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16); // dot product sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, svdot_s32(svdup_n_s32(0), qx0ls, qy0l), svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, svdot_s32(svdup_n_s32(0), qx1ls, qy1l), svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); } break; case 256: { // predicate for activating higher lanes for 16 int8 elements const svbool_t ph16 = svptrue_pat_b8(SV_VL16); // predicate for activating lower lanes for 16 int8 elements const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16); for (; ib + 1 < nb; ib += 2) { const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // load x const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); // 4-bit -> 8-bit const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); // sub 8 const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8); const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8); // load y const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); // dot product sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); } break; case 512: { // predicate for activating higher lanes for 32 int8 elements const svbool_t ph32 = svptrue_pat_b8(SV_VL32); // predicate for activating higher lanes for 16 int8 elements const svbool_t ph16 = svptrue_pat_b8(SV_VL16); // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes const svbool_t pl16 = svnot_b_z(ph32, ph16); for (; ib + 1 < nb; ib += 2) { const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // load x const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs); const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs); // 4-bit -> 8-bit const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); // sub 8 const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8); const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8); // load y const svint8_t qy0 = svld1_s8(ph32, y0->qs); const svint8_t qy1 = svld1_s8(ph32, y1->qs); // dot product sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); } break; default: assert(false && "Unsupported vector length"); break; } #elif defined(__ARM_NEON) float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); for (; ib + 1 < nb; ib += 2) { const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; const uint8x16_t m4b = vdupq_n_u8(0x0F); const int8x16_t s8b = vdupq_n_s8(0x8); const uint8x16_t v0_0 = vld1q_u8(x0->qs); const uint8x16_t v0_1 = vld1q_u8(x1->qs); // 4-bit -> 8-bit const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // sub 8 const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); // load y const int8x16_t v1_0l = vld1q_s8(y0->qs); const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); // dot product into int32x4_t const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); #if defined(__ARM_FEATURE_MATMUL_INT8) assert((nrc == 2) || (nrc == 1)); #else assert(nrc == 1); #endif UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_1 * GGML_RESTRICT vx0 = vx; const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); const block_q8_1 * GGML_RESTRICT vy0 = vy; const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t summs0 = vdupq_n_f32(0.0f); for (int i = 0; i < nb; i++) { const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i]; const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i]; const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i]; const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; float32_t summs_t[4] = { GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y1->s), GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y1->s) }; summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); const uint8x16_t m4b = vdupq_n_u8(0x0F); const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); // 4-bit -> 8-bit const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // load y const int8x16_t y0_l = vld1q_s8(b_y0->qs); const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); // mmla into int32x4_t float32_t _scale[4] = { GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); } float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); sumv2 = vaddq_f32(sumv2, summs0); vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); return; } #endif int ib = 0; float sumf = 0; #if defined(__ARM_NEON) float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); float summs = 0; for (; ib + 1 < nb; ib += 2) { const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s) + GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); const uint8x16_t m4b = vdupq_n_u8(0x0F); const uint8x16_t v0_0 = vld1q_u8(x0->qs); const uint8x16_t v0_1 = vld1q_u8(x1->qs); // 4-bit -> 8-bit const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // load y const int8x16_t v1_0l = vld1q_s8(y0->qs); const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); // dot product into int32x4_t const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F); const int v1 = (x[ib].qs[j] >> 4); sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_MXFP4 == 0); static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); const block_mxfp4 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK_MXFP4; int ib = 0; float sumf = 0; #if defined __ARM_NEON const int8x16_t values = vld1q_s8(kvalues_mxfp4); const uint8x16_t m4b = vdupq_n_u8(0x0f); uint8x16x2_t q4bits; int8x16x4_t q4b; int8x16x4_t q8b; int32x4_t prod_1; int32x4_t prod_2; for (; ib + 1 < nb; ib += 2) { q4bits.val[0] = vld1q_u8(x[ib + 0].qs); q4bits.val[1] = vld1q_u8(x[ib + 1].qs); q8b.val[0] = vld1q_s8(y[ib + 0].qs); q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); q8b.val[2] = vld1q_s8(y[ib + 1].qs); q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); sumf += GGML_E8M0_TO_FP32_HALF(x[ib + 0].e) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + GGML_E8M0_TO_FP32_HALF(x[ib + 1].e) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); } #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); int sumi1 = 0; int sumi2 = 0; for (int j = 0; j < QK_MXFP4/2; ++j) { sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__ARM_NEON) float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); uint32_t qh0; uint32_t qh1; uint64_t tmp0[4]; uint64_t tmp1[4]; for (; ib + 1 < nb; ib += 2) { const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; const uint8x16_t m4b = vdupq_n_u8(0x0F); // extract the 5th bit via lookup table ((!b) << 4) memcpy(&qh0, x0->qh, sizeof(qh0)); memcpy(&qh1, x1->qh, sizeof(qh1)); tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; tmp0[3] = table_b2b_1[(qh0 >> 24) ]; tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; tmp1[3] = table_b2b_1[(qh1 >> 24) ]; const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); const uint8x16_t v0_0 = vld1q_u8(x0->qs); const uint8x16_t v0_1 = vld1q_u8(x1->qs); // 4-bit -> 8-bit int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); // load y const int8x16_t v1_0l = vld1q_s8(y0->qs); const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); #endif for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__ARM_NEON) float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); float summs0 = 0.0f; float summs1 = 0.0f; uint32_t qh0; uint32_t qh1; uint64_t tmp0[4]; uint64_t tmp1[4]; for (; ib + 1 < nb; ib += 2) { const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; const uint8x16_t m4b = vdupq_n_u8(0x0F); summs0 += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); summs1 += GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); // extract the 5th bit via lookup table ((b) << 4) memcpy(&qh0, x0->qh, sizeof(qh0)); memcpy(&qh1, x1->qh, sizeof(qh1)); tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; tmp0[3] = table_b2b_0[(qh0 >> 24) ]; tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; tmp1[3] = table_b2b_0[(qh1 >> 24) ]; const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); const uint8x16_t v0_0 = vld1q_u8(x0->qs); const uint8x16_t v0_1 = vld1q_u8(x1->qs); // 4-bit -> 8-bit const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); // add high bit const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); // load y const int8x16_t v1_0l = vld1q_s8(y0->qs); const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); const int8x16_t v1_1l = vld1q_s8(y1->qs); const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; #endif for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); #if defined(__ARM_FEATURE_MATMUL_INT8) assert((nrc == 2) || (nrc == 1)); #else assert(nrc == 1); #endif UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q8_0 * GGML_RESTRICT vx0 = vx; const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * GGML_RESTRICT vy0 = vy; const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); for (int i = 0; i < nb; i++) { const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i]; const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i]; const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; const int8x16_t x0_l = vld1q_s8(b_x0->qs); const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); const int8x16_t x1_l = vld1q_s8(b_x1->qs); const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); // load y const int8x16_t y0_l = vld1q_s8(b_y0->qs); const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); float32_t _scale[4] = { GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), l1, r1)), l2, r2)), l3, r3))), scale); } float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); return; } #endif int ib = 0; float sumf = 0; #if defined(__ARM_FEATURE_SVE) svfloat32_t sumv0 = svdup_n_f32(0.0f); svfloat32_t sumv1 = svdup_n_f32(0.0f); const int vector_length = ggml_cpu_get_sve_cnt()*8; //VLA Implemenation for SVE switch (vector_length) { case 128: { // predicate for activating lanes for 16 Int8 elements const svbool_t ph16 = svptrue_pat_b8 (SV_VL16); const svbool_t pl16 = svptrue_pat_b32(SV_VL4); for (; ib + 1 < nb; ib += 2) { const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // load x const svint8_t qx0_0 = svld1_s8(ph16, x0->qs); const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16); const svint8_t qx1_0 = svld1_s8(ph16, x1->qs); const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16); // load y const svint8_t qy0_0 = svld1_s8(ph16, y0->qs); const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16); const svint8_t qy1_0 = svld1_s8(ph16, y1->qs); const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16); sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); } break; case 256: { //printf("sve256"); for (; ib + 1 < nb; ib += 2) { const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // load x const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs); const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs); // load y const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); } break; case 512: { // predicate for activating high 256 bit const svbool_t ph32 = svptrue_pat_b8(SV_VL32); // predicate for activating low 256 bit const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32); // predicate for activating high lanes for 8 float32 elements const svbool_t ph8 = svptrue_pat_b32(SV_VL8); // predicate for activating low lanes for 8 float32 elements const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8); svfloat32_t sumv00 = svdup_n_f32(0.0f); for (; ib + 1 < nb; ib += 2) { const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits // and add them to make one 64 element vector // load x const svint8_t qx_32 = svld1_s8(ph32, x0->qs); svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2); qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64); // load y const svint8_t qy_32 = svld1_s8(ph32, y0->qs); svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2); qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); // scale creation const float32_t deq1 = GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d); const float32_t deq2 = GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d); // duplicate deq1 in first half of vector and deq2 in second half of vector const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64)); sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp); } sumf = svaddv_f32(svptrue_b32(), sumv00); break; } default: assert(false && "Unsupported vector length"); break; } #elif defined(__ARM_NEON) float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); for (; ib + 1 < nb; ib += 2) { const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; const int8x16_t x0_0 = vld1q_s8(x0->qs); const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); const int8x16_t x1_0 = vld1q_s8(x1->qs); const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); // load y const int8x16_t y0_0 = vld1q_s8(y0->qs); const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); const int8x16_t y1_0 = vld1q_s8(y1->qs); const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); #endif for (; ib < nb; ++ib) { int sumi = 0; for (int j = 0; j < qk; j++) { sumi += x[ib].qs[j]*y[ib].qs[j]; } sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; } void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq1_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) float sumf = 0.0f; uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27}; const uint8x16_t shift = vld1q_u8(k_shift); for (int i = 0; i < nb; ++i) { #if defined(__ARM_FEATURE_DOTPROD) int32x4_t sumi0 = vdupq_n_s32(0); int32x4_t sumi1 = vdupq_n_s32(0); #else int16x8_t sumi0 = vdupq_n_s16(0); int16x8_t sumi1 = vdupq_n_s16(0); #endif // first 32 bytes of 5 elements { uint8x16_t qx0 = vld1q_u8(x[i].qs + 0); uint8x16_t qx1 = vld1q_u8(x[i].qs + 16); uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3)); uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3)); uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9)); uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9)); uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27)); uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27)); uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81)); uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81)); // multiply by 3 and keep the 2 bits above 8 bits int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6)); int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6)); int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6)); int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6)); const int8x16_t qy0 = vld1q_s8(y[i].qs + 0); const int8x16_t qy1 = vld1q_s8(y[i].qs + 16); const int8x16_t qy2 = vld1q_s8(y[i].qs + 32); const int8x16_t qy3 = vld1q_s8(y[i].qs + 48); const int8x16_t qy4 = vld1q_s8(y[i].qs + 64); const int8x16_t qy5 = vld1q_s8(y[i].qs + 80); const int8x16_t qy6 = vld1q_s8(y[i].qs + 96); const int8x16_t qy7 = vld1q_s8(y[i].qs + 112); const int8x16_t qy8 = vld1q_s8(y[i].qs + 128); const int8x16_t qy9 = vld1q_s8(y[i].qs + 144); #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vdotq_s32(sumi0, sqx0, qy0); sumi1 = vdotq_s32(sumi1, sqx1, qy1); sumi0 = vdotq_s32(sumi0, sqx2, qy2); sumi1 = vdotq_s32(sumi1, sqx3, qy3); sumi0 = vdotq_s32(sumi0, sqx4, qy4); sumi1 = vdotq_s32(sumi1, sqx5, qy5); sumi0 = vdotq_s32(sumi0, sqx6, qy6); sumi1 = vdotq_s32(sumi1, sqx7, qy7); sumi0 = vdotq_s32(sumi0, sqx8, qy8); sumi1 = vdotq_s32(sumi1, sqx9, qy9); #else sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9)); #endif } // last 16 bytes of 5-element, along with the 4 bytes of 4 elements { uint8x16_t qx0 = vld1q_u8(x[i].qs + 32); uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3)); uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9)); uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27)); uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81)); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh)); qx5 = vmulq_u8(qx5, shift); // multiply by 3 and keep the 2 bits above 8 bits int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); const int8x16_t qy0 = vld1q_s8(y[i].qs + 160); const int8x16_t qy1 = vld1q_s8(y[i].qs + 176); const int8x16_t qy2 = vld1q_s8(y[i].qs + 192); const int8x16_t qy3 = vld1q_s8(y[i].qs + 208); const int8x16_t qy4 = vld1q_s8(y[i].qs + 224); const int8x16_t qy5 = vld1q_s8(y[i].qs + 240); #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vdotq_s32(sumi0, sqx0, qy0); sumi1 = vdotq_s32(sumi1, sqx1, qy1); sumi0 = vdotq_s32(sumi0, sqx2, qy2); sumi1 = vdotq_s32(sumi1, sqx3, qy3); sumi0 = vdotq_s32(sumi0, sqx4, qy4); sumi1 = vdotq_s32(sumi1, sqx5, qy5); #else sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); #endif } const int16x8_t ysum0 = vld1q_s16(y[i].bsums); const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vaddq_s32(sumi0, sumi1); sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); sumf += d * (float) vaddvq_s32(sumi0); #else sumi0 = vaddq_s16(sumi0, sumi1); sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); sumf += d * (float) vaddlvq_s16(sumi0); #endif } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq2_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) float sumf = 0.0f; const uint8x16_t m3 = vdupq_n_u8(3); for (int i = 0; i < nb; ++i) { #if defined(__ARM_FEATURE_DOTPROD) int32x4_t sumi0 = vdupq_n_s32(0); int32x4_t sumi1 = vdupq_n_s32(0); #else int16x8_t sumi0 = vdupq_n_s16(0); int16x8_t sumi1 = vdupq_n_s16(0); #endif for (size_t j = 0; j < sizeof(x->qs); j += 32) { uint8x16_t qx0 = vld1q_u8(x[i].qs + j); uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16); uint8x16_t qx2 = vshrq_n_u8(qx0, 2); uint8x16_t qx3 = vshrq_n_u8(qx1, 2); uint8x16_t qx4 = vshrq_n_u8(qx0, 4); uint8x16_t qx5 = vshrq_n_u8(qx1, 4); uint8x16_t qx6 = vshrq_n_u8(qx0, 6); uint8x16_t qx7 = vshrq_n_u8(qx1, 6); int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3)); int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3)); int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3)); int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3)); int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3)); int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3)); int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3)); int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3)); const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0); const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16); const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32); const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48); const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64); const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80); const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96); const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112); #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vdotq_s32(sumi0, sqx0, qy0); sumi1 = vdotq_s32(sumi1, sqx1, qy1); sumi0 = vdotq_s32(sumi0, sqx2, qy2); sumi1 = vdotq_s32(sumi1, sqx3, qy3); sumi0 = vdotq_s32(sumi0, sqx4, qy4); sumi1 = vdotq_s32(sumi1, sqx5, qy5); sumi0 = vdotq_s32(sumi0, sqx6, qy6); sumi1 = vdotq_s32(sumi1, sqx7, qy7); #else sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); #endif } const int16x8_t ysum0 = vld1q_s16(y[i].bsums); const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vaddq_s32(sumi0, sumi1); sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); sumf += d * (float) vaddvq_s32(sumi0); #else sumi0 = vaddq_s16(sumi0, sumi1); sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); sumf += d * (float) vaddlvq_s16(sumi0); #endif } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #ifdef __ARM_FEATURE_SVE const int vector_length = svcntb()*8; const svuint8_t m3s = svdup_n_u8(0x3); const svuint32_t m4s = svdup_n_u32(0xF); const svint32_t vzero_sv = svdup_n_s32(0); svfloat32_t acc_sum = svdup_n_f32(0); svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); switch (vector_length) { case 128: for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); svfloat32_t d_broad = svdup_n_f32((float32_t)d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8_sv = y[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4); const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4); const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8); const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12); const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8); q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12); svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); svint32_t sumi1 = svdup_n_s32(0); { const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); //------------------------------- q2 += 32; const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s)); const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); } acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); } *s = svaddv_f32(svptrue_b32(), acc_sum); break; case 256: case 512: for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); svfloat32_t d_broad = svdup_n_f32((float32_t)d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8_sv = y[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8; const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8); svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); svint32_t sumi1 = svdup_n_s32(0); { const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); q2 += 32; const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); } acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); } *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); break; default: assert(false && "Unsupported vector length"); break; } #elif __ARM_NEON const uint8x16_t m3 = vdupq_n_u8(0x3); const uint8x16_t m4 = vdupq_n_u8(0xF); const int32x4_t vzero = vdupq_n_s32(0); ggml_int8x16x2_t q2bytes; uint8_t aux[16]; float sum = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const uint8x16_t mins_and_scales = vld1q_u8(sc); const uint8x16_t scales = vandq_u8(mins_and_scales, m4); vst1q_u8(aux, scales); const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); int isum = 0; int is = 0; // We use this macro instead of a function call because for some reason // the code runs 2-3% slower, even if the function is declared inline #define MULTIPLY_ACCUM_WITH_SCALE(index)\ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ MULTIPLY_ACCUM_WITH_SCALE((index)); for (int j = 0; j < QK_K/128; ++j) { const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); MULTIPLY_ACCUM_WITH_SCALE(0); SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); is += 8; } sum += d * isum; } *s = sum; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_FEATURE_SVE) uint32_t aux[3]; uint32_t utmp[4]; const int8_t m32 = 32; const int vector_length = svcntb()*8; const svuint8_t m3b_sv = svdup_n_u8(0x3); const svint32_t vzero_sv = svdup_n_s32(0); const svuint8_t m0_sv = svdup_n_u8(1); const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); float sum = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; const int8_t * GGML_RESTRICT q8_sv = y[i].qs; // Set up scales memcpy(aux, x[i].scales, 12); utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); int8_t * scale = (int8_t *)utmp; for (int j = 0; j < 16; ++j) scale[j] -= m32; switch (vector_length) { case 128: { svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16); svuint8_t q3h_sv; svint32_t sumi1_1 = svdup_n_s32(0); svint8_t q3bytes_sv; for (int j = 0; j < QK_K/128; ++j) { const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); scale += 4; q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); if (j == 0) { qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); } scale += 4; } sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); } break; case 256: case 512: { svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); svuint8_t q3h_sv; svint32_t sumi1_1 = svdup_n_s32(0); svint8_t q3bytes_sv; for (int j = 0; j < QK_K/128; ++j) { const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32; svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); scale += 4; q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); if (j == 0) { qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); } scale += 4; } sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); } break; default: assert(false && "Unsupported vector length"); break; } } *s = sum; #elif __ARM_NEON uint32_t aux[3]; uint32_t utmp[4]; const uint8x16_t m3b = vdupq_n_u8(0x3); const int32x4_t vzero = vdupq_n_s32(0); const uint8x16_t m0 = vdupq_n_u8(1); const uint8x16_t m1 = vshlq_n_u8(m0, 1); const uint8x16_t m2 = vshlq_n_u8(m0, 2); const uint8x16_t m3 = vshlq_n_u8(m0, 3); const int8_t m32 = 32; ggml_int8x16x4_t q3bytes; float sum = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].hmask; const int8_t * GGML_RESTRICT q8 = y[i].qs; ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); ggml_uint8x16x4_t q3h; int32_t isum = 0; // Set up scales memcpy(aux, x[i].scales, 12); utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); int8_t * scale = (int8_t *)utmp; for (int j = 0; j < 16; ++j) scale[j] -= m32; for (int j = 0; j < QK_K/128; ++j) { const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; scale += 4; q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; scale += 4; if (j == 0) { qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); } } sum += d * isum; } *s = sum; #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #ifdef __ARM_FEATURE_SVE static inline svuint32_t ggml_decode_q4scales_and_mins_for_mmla(const uint32_t * vx_scales) { const svbool_t pg_all = svptrue_pat_b32(SV_VL4); const svbool_t pg_false = svpfalse_b(); // 0x0000 const svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8); // 0x00ff const svbool_t pg_odd = svzip1_b32(pg_false, pg_lo_8); svuint32_t vutmp_hi, vutmp_lo; svuint32_t vx01 = svld1_u32(pg_lo_8, vx_scales); vutmp_hi = svzip1_u32(vx01, vx01); vutmp_hi = svlsr_n_u32_m(pg_odd, vutmp_hi, 2); vutmp_hi = svreinterpret_u32_u64(svand_n_u64_x(pg_all, svreinterpret_u64_u32(vutmp_hi), UINT64_C(0x303030303f3f3f3f))); const svuint32_t vx2 = svdup_u32(vx_scales[2]); vutmp_lo = svlsr_u32_x(pg_all, vx2, svreinterpret_u32_s32(svindex_s32(-2, 2))); vutmp_lo = svand_n_u32_z(pg_odd, vutmp_lo, UINT32_C(0x0f0f0f0f)); svuint32_t vutmp = svorr_u32_z(pg_all, vutmp_hi, vutmp_lo); return vutmp; } #endif void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); #ifdef __ARM_FEATURE_MATMUL_INT8 assert((nrc == 2) || (nrc == 1)); #else assert(nrc == 1); #endif UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #ifdef __ARM_FEATURE_SVE const int vector_length = ggml_cpu_get_sve_cnt()*8; #endif #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { svbool_t pg32_2 = svptrue_pat_b32(SV_VL2); const block_q4_K * GGML_RESTRICT vx0 = vx; const block_q8_K * GGML_RESTRICT vy0 = vy; const block_q4_K * GGML_RESTRICT vx1 = (const block_q4_K *) ((const uint8_t*)vx + bx); const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by); union { uint32_t u32[8]; uint64_t u64[4]; } new_utmp; svfloat32_t sumf1 = svdup_n_f32(0); switch (vector_length) { case 128: { svbool_t pg_false = svpfalse_b(); svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8); svbool_t vmins_mask1= svzip1_b32(pg_lo_8, pg_false); svbool_t vmins_mask2 = svzip1_b32(pg_false, pg_lo_8); svbool_t pg128_all = svptrue_pat_b8(SV_VL16); for (int i = 0; i < nb; ++i) { svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d); svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin))); svfloat32_t vy_dmins = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); svfloat32_t svdmins = svmul_n_f32_x(pg128_all, svmul_f32_x(pg128_all, vy_dmins, vx_dmins), -1); const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs; const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs; const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs; const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs; svint16_t lo = svld1_s16(pg128_all, vy0[i].bsums + 0); svint16_t hi = svld1_s16(pg128_all, vy0[i].bsums + 8); svint16_t sum_tmp1 = svuzp1_s16(lo, hi); svint16_t sum_tmp2 = svuzp2_s16(lo, hi); svint16_t svq8sums_0 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2); lo = svld1_s16(pg128_all, vy1[i].bsums + 0); hi = svld1_s16(pg128_all, vy1[i].bsums + 8); sum_tmp1 = svuzp1(lo, hi); sum_tmp2 = svuzp2(lo, hi); svint16_t svq8sums_1 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2); svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales); svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales); svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1); svst2_u32(pg128_all, new_utmp.u32, decoded_scales); svint16_t svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp1_u32(svld1_u32(vmins_mask1, new_utmp.u32+4), svdup_n_u32(0))))); svint16_t svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp2_u32(svld1_u32(vmins_mask2, new_utmp.u32+4), svdup_n_u32(0))))); svint32_t svsumfs_tmp1 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_0)); svint32_t svsumfs_tmp2 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_1)); svint32_t svsumfs_tmp3 = svtrn1_s32(svsumfs_tmp1, svsumfs_tmp2); svint32_t svsumfs_tmp4 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_0)); svint32_t svsumfs_tmp5 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_1)); svint32_t svsumfs_tmp6 = svtrn1_s32(svsumfs_tmp4, svsumfs_tmp5); svint32_t svsumfs_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6))); svint32_t svsumfs_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6))); svint32_t svsumfs_tmp = svadd_s32_x(pg128_all, svsumfs_tmp7, svsumfs_tmp8); svint32_t svscales, sumi1, sumi2; svint32_t acc_sumif1 = svdup_n_s32(0); svint32_t acc_sumif2 = svdup_n_s32(0); svint8_t q4bytes_0_l, q4bytes_0_h, q4bytes_1_l, q4bytes_1_h, l0, l1, l2, l3, q8bytes_0_h, q8bytes_0_l, q8bytes_1_h, q8bytes_1_l, r0, r1, r2, r3; #pragma GCC unroll 1 for (int j = 0; j < QK_K/64; ++j) { q4bytes_0_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 0xf)); q4bytes_1_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 0xf)); q4bytes_0_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 0xf)); q4bytes_1_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 0xf)); l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); q8bytes_0_h = svld1_s8(pg128_all, q8_0); q8bytes_1_h = svld1_s8(pg128_all, q8_1); q8bytes_0_l = svld1_s8(pg128_all, q8_0+16); q8bytes_1_l = svld1_s8(pg128_all, q8_1+16); r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); sumi1 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3); svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24)); acc_sumif1 = svmla_s32_x(pg128_all, acc_sumif1, svscales, sumi1); q4bytes_0_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 4)); q4bytes_1_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 4)); q4bytes_0_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 4)); q4bytes_1_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 4)); l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); q8bytes_0_h = svld1_s8(pg128_all, q8_0+32); q8bytes_1_h = svld1_s8(pg128_all, q8_1+32); q8bytes_0_l = svld1_s8(pg128_all, q8_0+48); q8bytes_1_l = svld1_s8(pg128_all, q8_1+48); r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); sumi2 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3); svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24)); acc_sumif2 = svmla_s32_x(pg128_all, acc_sumif2, svscales, sumi2); q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64; } sumf1 = svmla_f32_x(pg128_all, svmla_f32_x(pg128_all, sumf1, svcvt_f32_x(pg128_all, svadd_s32_x(pg128_all, acc_sumif1, acc_sumif2)), svsuper_block_scales), svdmins, svcvt_f32_s32_x(pg128_all, svsumfs_tmp)); } //end of for nb } // end of case 128 break; case 256: case 512: { const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); const svbool_t pg256_all = svptrue_pat_b8(SV_ALL); for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs; const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs; const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs; const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs; svint32_t svscales, sumi1, sumi2; svint32_t acc_sumif1 = svdup_n_s32(0); svint32_t acc_sumif2 = svdup_n_s32(0); svint8_t l0, l1, l2, l3, r0, r1, r2, r3; svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp)); svfloat32_t svsuper_block_scales = svmul_f32_z(pg32_4, vy_d, vx_d); svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin))); svfloat64_t vy_dmins_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); svfloat32_t vy_dmins = svreinterpret_f32_f64(svuzp1_f64(vy_dmins_tmp, vy_dmins_tmp)); svfloat32_t svdmins = svmul_n_f32_x(pg32_4, svmul_f32_x(pg32_4, vx_dmins, vy_dmins), -1); svint16_t rc1 = svuzp1_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums)); svint16_t rc2 = svuzp2_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums)); svint16_t svq8sums = svadd_s16_x(pg256_all, rc1, rc2); svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales); svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales); svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1); svst2_u32(pg8_16, new_utmp.u32, decoded_scales); svint16_t new_svq8sums_0 = svreinterpret_s16_u64(svtrn1_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums))); svint16_t new_svq8sums_1 = svreinterpret_s16_u64(svtrn2_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums))); svuint64_t new_mins_0 = svdup_u64(new_utmp.u64[2]); svuint64_t new_mins_1 = svdup_u64(new_utmp.u64[3]); svint16_t new_svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_0))); svint16_t new_svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_1))); svint64_t dot_prod_0 = svdot_s64(svdup_s64(0), new_svmins8_0, new_svq8sums_0); svint64_t dot_prod_1 = svdot_s64(dot_prod_0, new_svmins8_1, new_svq8sums_1); svfloat32_t converted_dot_prod_1 = svcvt_f32_s64_x(pg256_all, dot_prod_1); svfloat32_t svsumfs_tmp = svuzp1_f32(converted_dot_prod_1, converted_dot_prod_1); #pragma GCC unroll 1 for (int j = 0; j < QK_K/64; ++j) { svuint8_t q4bytes_0 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 0xf); svuint8_t q4bytes_1 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 0xf); svuint8_t q4bytes_2 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 4); svuint8_t q4bytes_3 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 4); l0 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1))); l1 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1))); l2 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3))); l3 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3))); svint8_t q8bytes_0 = svld1_s8(pg256_all, q8_0); svint8_t q8bytes_1 = svld1_s8(pg256_all, q8_1); svint8_t q8bytes_2 = svld1_s8(pg256_all, q8_0+32); svint8_t q8bytes_3 = svld1_s8(pg256_all, q8_1+32); r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3))); r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3))); sumi1 = svmmla(svmmla(svdup_n_s32(0), r0, l0), r1, l1); svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24)); acc_sumif1 = svmla_s32_x(pg256_all, acc_sumif1, svscales, sumi1); sumi2 = svmmla(svmmla(svdup_n_s32(0), r2, l2), r3, l3); svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24)); acc_sumif2 = svmla_s32_x(pg256_all, acc_sumif2, svscales, sumi2); q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64; } svint32_t acc_sumif = svadd_s32_x(pg256_all, acc_sumif1, acc_sumif2); svint32_t swap_acc_sumif = svext_s32(acc_sumif, acc_sumif, 4); acc_sumif = svadd_s32_x(pg32_4, acc_sumif, swap_acc_sumif); sumf1 = svmla_f32_x(pg32_4, svmla_f32_x(pg32_4, sumf1, svcvt_f32_x(pg32_4, acc_sumif), svsuper_block_scales), svdmins, svsumfs_tmp); } // end of for nb } // end of case 256-512 break; default: assert(false && "Unsupported vector length"); break; } svst1_f32(pg32_2, s, sumf1); svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sumf1), svdup_n_u8(0), 8))); return; } #elif defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_K * GGML_RESTRICT x0 = x; const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); const block_q8_K * GGML_RESTRICT y0 = y; const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); const uint8x16_t m4b = vdupq_n_u8(0x0f); float32x4_t vfsum = vdupq_n_f32(0.0f); for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { const uint8_t * GGML_RESTRICT qx0 = x0->qs; const uint8_t * GGML_RESTRICT qx1 = x1->qs; const int8_t * GGML_RESTRICT qy0 = y0->qs; const int8_t * GGML_RESTRICT qy1 = y1->qs; // decode scales and mins int8_t x0_scales[8], x1_scales[8]; int16x8_t x0_mins, x1_mins; { uint32_t scales_mins[3]; memcpy(scales_mins, x0->scales, 12); const uint32_t mins_0_3 = scales_mins[1] & kmask1; const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); const uint32x2_t mins = {mins_0_3, mins_4_7}; x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); uint32_t scales[2]; scales[0] = scales_mins[0] & kmask1; // scales 0~3 scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 memcpy(x0_scales, scales, 8); } { uint32_t scales_mins[3]; memcpy(scales_mins, x1->scales, 12); const uint32_t mins_0_3 = scales_mins[1] & kmask1; const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); const uint32x2_t mins = {mins_0_3, mins_4_7}; x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); uint32_t scales[2]; scales[0] = scales_mins[0] & kmask1; // scales 0~3 scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 memcpy(x1_scales, scales, 8); } int32x4_t visum = {0}; // process 64 data points per iteration, totally 256 data points for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { const int8x16x4_t vy0 = vld1q_s8_x4(qy0); const int8x16x4_t vy1 = vld1q_s8_x4(qy1); int8x16_t vx0[4], vx1[4]; { const uint8x16x2_t vv = vld1q_u8_x2(qx0); vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); } { const uint8x16x2_t vv = vld1q_u8_x2(qx1); vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); } // process 32 data points (share same block scale) per iteration for (int k = 0; k < 2; ++k) { const int blk = j * 2 + k; const int32x4_t block_scale = { x0_scales[blk], x0_scales[blk], x1_scales[blk], x1_scales[blk], }; int32x4_t vr = {0}; for (int l = 0; l < 2; ++l) { const int idx = k * 2 + l; const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); vr = vmmlaq_s32(vr, vx_l, vy_l); vr = vmmlaq_s32(vr, vx_h, vy_h); } // apply block scale, will NOT overflow // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits visum = vmlaq_s32(visum, vr, block_scale); } } // adjust bias, apply superblock scale { int32_t bias[4]; // no obvious uplift from sve sdot-16, just use neon mul add const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); const float32x4_t dmins = { GGML_CPU_FP16_TO_FP32(x0->dmin) * y0->d, GGML_CPU_FP16_TO_FP32(x0->dmin) * y1->d, GGML_CPU_FP16_TO_FP32(x1->dmin) * y0->d, GGML_CPU_FP16_TO_FP32(x1->dmin) * y1->d, }; vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); const float32x4_t superblock_scale = { GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, }; vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); } } // vfsum = ABCD -> ACBD // AC -> s, BD -> (s+bs) vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); vst1_f32(s, vget_low_f32 (vfsum)); vst1_f32(s + bs, vget_high_f32(vfsum)); return; } #endif #ifdef __ARM_FEATURE_SVE float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); memcpy(utmp, x[i].scales, K_SCALE_SIZE); uint32x2_t mins8 = { 0 }; mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[0] &= kmask1; const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); sumf -= dmin * vaddvq_s32(prod); const uint8_t * scales = (const uint8_t *)utmp; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const svuint8_t m4b = svdup_n_u8(0xf); const svint32_t mzero = svdup_n_s32(0); svint32_t sumi1 = svdup_n_s32(0); svint32_t sumi1_1 = svdup_n_s32(0); svint32_t sumi1_2 = svdup_n_s32(0); svint32_t sumi2 = svdup_n_s32(0); svint32_t sumi2_1 = svdup_n_s32(0); svint32_t sumi2_2 = svdup_n_s32(0); switch (vector_length) { case 128: { for (int j = 0; j < QK_K/64; ++j) { svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b)); q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4)); q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); q4 += 32; } sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); } break; case 256: case 512: { for (int j = 0; j < QK_K/64; ++j) { const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32; svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); } sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); } break; default: assert(false && "Unsupported vector length"); break; } } *s = sumf; #elif defined __ARM_NEON const uint8x16_t m4b = vdupq_n_u8(0xf); const int32x4_t mzero = vdupq_n_s32(0); ggml_int8x16x2_t q4bytes; ggml_int8x16x2_t q8bytes; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); memcpy(utmp, x[i].scales, 12); uint32x2_t mins8 = { 0 }; mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[0] &= kmask1; const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); sumf -= dmin * vaddvq_s32(prod); const uint8_t * scales = (const uint8_t *)utmp; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t sumi1 = 0; int32_t sumi2 = 0; for (int j = 0; j < QK_K/64; ++j) { const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); sumi1 += vaddvq_s32(p1) * scales[2*j+0]; q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); sumi2 += vaddvq_s32(p2) * scales[2*j+1]; } sumf += d * (sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #ifdef __ARM_NEON const uint8x16_t m4b = vdupq_n_u8(0xf); const uint8x16_t mone = vdupq_n_u8(1); const uint8x16_t mtwo = vdupq_n_u8(2); const int32x4_t mzero = vdupq_n_s32(0); ggml_int8x16x4_t q5bytes; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); int32_t sumi_mins = vaddvq_s32(prod); const uint8_t * scales = (const uint8_t *)utmp; const uint8_t * GGML_RESTRICT q5 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); ggml_uint8x16x4_t q5h; int32_t sumi = 0; for (int j = 0; j < QK_K/64; ++j) { const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; } sumf += d * sumi - dmin * sumi_mins; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); #ifdef __ARM_FEATURE_MATMUL_INT8 assert((nrc == 2) || (nrc == 1)); #else assert(nrc == 1); #endif UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #ifdef __ARM_FEATURE_SVE const int vector_length = ggml_cpu_get_sve_cnt()*8; #endif #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const svbool_t pg32_2 = svptrue_pat_b32(SV_VL2); svfloat32_t sum = svdup_n_f32(0); const block_q6_K * GGML_RESTRICT vx0 = vx; const block_q8_K * GGML_RESTRICT vy0 = vy; const block_q6_K * GGML_RESTRICT vx1 = (const block_q6_K *) ((const uint8_t*)vx + bx); const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by); switch (vector_length) { case 128: { const svbool_t pg128_all = svptrue_pat_b8(SV_ALL); for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql; const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh; const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql; const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh; const int8_t * GGML_RESTRICT q80 = vy0[i].qs; const int8_t * GGML_RESTRICT q81 = vy1[i].qs; const int8_t * GGML_RESTRICT scale0 = vx0[i].scales; const int8_t * GGML_RESTRICT scale1 = vx1[i].scales; svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d); // process q8sum summation 128 bit route const svint16_t q8sums_01 = svld1_s16(pg128_all, vy0[i].bsums); const svint16_t q8sums_02 = svld1_s16(pg128_all, vy0[i].bsums + 8); const svint16_t q8sums_11 = svld1_s16(pg128_all, vy1[i].bsums); const svint16_t q8sums_12 = svld1_s16(pg128_all, vy1[i].bsums + 8); const svint64x2_t q6scales_0_tmp = svld2_s64(pg128_all, (const int64_t *)scale0); const svint16_t q6scales_01 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 0))); const svint16_t q6scales_02 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 1))); const svint64x2_t q6scales_1_tmp = svld2_s64(pg128_all, (const int64_t *)scale1); const svint16_t q6scales_11 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 0))); const svint16_t q6scales_12 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 1))); const svint64_t prod = svdup_n_s64(0); svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_01), q8sums_02, q6scales_02)); svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_11), q8sums_02, q6scales_12)); svint32_t isum_tmp3 = svtrn1_s32(isum_tmp1, isum_tmp2); svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_01), q8sums_12, q6scales_02)); svint32_t isum_tmp5 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_11), q8sums_12, q6scales_12)); svint32_t isum_tmp6 = svtrn1_s32(isum_tmp4, isum_tmp5); svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6))); svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6))); svint32_t svisum_mins = svadd_s32_x(pg128_all, isum_tmp7, isum_tmp8); // process mmla svint8_t l0, l1, r0, r1; svint32_t isum_tmp = svdup_n_s32(0); for (int j = 0; j < QK_K/128; ++j) { for (int k = 0; k < 8; ++k) { svuint8_t qhbits_0 = svld1_u8(pg128_all, qh0+16*(k%2)); svuint8_t qhbits_1 = svld1_u8(pg128_all, qh1+16*(k%2)); svuint8_t q6bits_0 = svld1_u8(pg128_all, ql0+16*(k%4)); svuint8_t q6bits_1 = svld1_u8(pg128_all, ql1+16*(k%4)); const int ql_pos = (k/4)*4; svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_0, 4); svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_1, 4); const int qh_pos = (k/2)*2; svuint8_t q6bytes_0_hi = svand_n_u8_x(pg128_all, qhbits_0, 0x3 << qh_pos); svuint8_t q6bytes_1_hi = svand_n_u8_x(pg128_all, qhbits_1, 0x3 << qh_pos); svint8_t q6bytes_0, q6bytes_1; if (qh_pos <= 4) { q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos))); q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos))); } else { q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_0_lo, svlsr_n_u8_x(pg128_all, q6bytes_0_hi, (qh_pos - 4)))); q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_1_lo, svlsr_n_u8_x(pg128_all, q6bytes_1_hi, (qh_pos - 4)))); } svint8_t q8bytes_0 = svld1_s8(pg128_all, q80+16*(k%8)); svint8_t q8bytes_1 = svld1_s8(pg128_all, q81+16*(k%8)); l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); svint32_t svscale = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k])); isum_tmp = svmla_s32_x(pg128_all, isum_tmp, svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), svscale); } qh0 += 32; qh1 += 32; ql0 += 64; ql1 += 64; q80 += 128; q81 += 128; scale0 += 8; scale1 += 8; } sum = svmla_f32_x(pg128_all, sum, svcvt_f32_x(pg128_all, svmla_s32_x(pg128_all, isum_tmp, svisum_mins, svdup_n_s32(-32))), svsuper_block_scales); } } // end of case 128 break; case 256: case 512: { const svbool_t pg256_all = svptrue_pat_b8(SV_ALL); const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql; const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh; const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql; const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh; const int8_t * GGML_RESTRICT q80 = vy0[i].qs; const int8_t * GGML_RESTRICT q81 = vy1[i].qs; const int8_t * GGML_RESTRICT scale0 = vx0[i].scales; const int8_t * GGML_RESTRICT scale1 = vx1[i].scales; svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp)); svfloat32_t svsuper_block_scales = svmul_f32_x(pg32_4, vy_d, vx_d); // process q8sum summation 256 bit route const svint16_t q8sums_0 = svld1_s16(pg256_all, vy0[i].bsums); const svint16_t q8sums_1 = svld1_s16(pg256_all, vy1[i].bsums); const svint16_t q6scales_0 = svunpklo_s16(svld1_s8(pg256_all, scale0)); const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(pg256_all, scale1)); const svint64_t prod = svdup_n_s64(0); svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_0)); svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_1)); svint32_t isum_tmp3 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_0)); svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_1)); svint32_t isum_tmp5 = svtrn1_s32(isum_tmp1, isum_tmp2); svint32_t isum_tmp6 = svtrn1_s32(isum_tmp3, isum_tmp4); svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6))); svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6))); svint32_t isum_tmp9 = svadd_s32_x(pg256_all, isum_tmp7, isum_tmp8); svint32_t isum_tmp10 = svreinterpret_s32_u8(svext_u8(svreinterpret_u8_s32(isum_tmp9), svreinterpret_u8_s32(isum_tmp9), 16)); svint32_t svisum_mins = svadd_s32_z(pg32_4, isum_tmp9, isum_tmp10); // process mmla svint8_t l0, l1, r0, r1; svint32_t isum_tmp = svdup_n_s32(0); for (int j = 0; j < QK_K/128; ++j) { for (int k = 0; k < 8; k+=2) { // process 2 block svuint8_t qhbits_0 = svld1_u8(pg256_all, qh0); svuint8_t qhbits_1 = svld1_u8(pg256_all, qh1); svuint8_t q6bits_0 = svld1_u8(pg256_all, ql0+32*((k%4)/2)); svuint8_t q6bits_1 = svld1_u8(pg256_all, ql1+32*((k%4)/2)); const int ql_pos = (k/4)*4; svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_0, 4); svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_1, 4); const int qh_pos = (k/2)*2; svuint8_t q6bytes_0_hi = svand_n_u8_x(pg256_all, qhbits_0, 0x3 << qh_pos); svuint8_t q6bytes_1_hi = svand_n_u8_x(pg256_all, qhbits_1, 0x3 << qh_pos); svint8_t q6bytes_0, q6bytes_1; if (qh_pos <= 4) { q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos))); q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos))); } else { q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_0_lo, svlsr_n_u8_x(pg256_all, q6bytes_0_hi, (qh_pos - 4)))); q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_1_lo, svlsr_n_u8_x(pg256_all, q6bytes_1_hi, (qh_pos - 4)))); } svint8_t q8bytes_0 = svld1_s8(pg256_all, q80+32*(k/2)); svint8_t q8bytes_1 = svld1_s8(pg256_all, q81+32*(k/2)); l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); svint32_t svscale0 = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k])); svint32_t svscale1 = svzip1_s32(svdup_n_s32(scale0[k+1]), svdup_n_s32(scale1[k+1])); isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r0, l0), svscale0); isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r1, l1), svscale1); } qh0 += 32; qh1 += 32; ql0 += 64; ql1 += 64; q80 += 128; q81 += 128; scale0 += 8; scale1 += 8; } // end of for svint32_t swap_isum_tmp = svext_s32(isum_tmp, isum_tmp, 4); isum_tmp = svadd_s32_x(pg32_4, isum_tmp, swap_isum_tmp); sum = svmla_f32_x(pg32_4, sum, svcvt_f32_x(pg32_4, svmla_s32_x(pg32_4, isum_tmp, svisum_mins, svdup_n_s32(-32))), svsuper_block_scales); } } // end of case 256 break; default: assert(false && "Unsupported vector length"); break; } // end of switch svst1_f32(pg32_2, s, sum); svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sum), svdup_n_u8(0), 8))); return; } #elif defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q6_K * GGML_RESTRICT x0 = x; const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx); const block_q8_K * GGML_RESTRICT y0 = y; const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); float32x4_t vfsum = vdupq_n_f32(0.0f); for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { const uint8_t * GGML_RESTRICT ql0 = x0->ql; const uint8_t * GGML_RESTRICT ql1 = x1->ql; const uint8_t * GGML_RESTRICT qh0 = x0->qh; const uint8_t * GGML_RESTRICT qh1 = x1->qh; const int8_t * GGML_RESTRICT qy0 = y0->qs; const int8_t * GGML_RESTRICT qy1 = y1->qs; const uint8x16_t mone = vdupq_n_u8(0x30); const uint8x16_t m4b = vdupq_n_u8(0x0f); int32x4_t visum = vdupq_n_s32(0); // process 8 blocks per iteration, totally 16 blocks for (int j = 0; j < 2; ++j, qh0 += 32, ql0 += 64, qh1 += 32, ql1 += 64) { int8x16_t vx0[8], vx1[8]; // de-quantize vx0[8] { const uint8x16x2_t qh_bits = vld1q_u8_x2(qh0); const uint8x16x4_t ql_bits = vld1q_u8_x4(ql0); uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); vx0[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); vx0[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); vx0[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); vx0[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); q6h_0 = vandq_u8(mone, qh_bits.val[0]); q6h_1 = vandq_u8(mone, qh_bits.val[1]); q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); vx0[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); vx0[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); vx0[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); vx0[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); } // de-quantize vx1[8] { const uint8x16x2_t qh_bits = vld1q_u8_x2(qh1); const uint8x16x4_t ql_bits = vld1q_u8_x4(ql1); uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); vx1[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); vx1[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); vx1[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); vx1[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); q6h_0 = vandq_u8(mone, qh_bits.val[0]); q6h_1 = vandq_u8(mone, qh_bits.val[1]); q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); vx1[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); vx1[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); vx1[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); vx1[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); } // process 16 elements (one block with same scale) per iteration // - vx = concat(ql, qh) - 32 // - r1,r2,r3,r4 = smmla(vx, vy) for (int k = 0; k < 8; ++k) { const int blk = j * 8 + k; const int8x16_t vy0 = vld1q_s8(qy0); const int8x16_t vy1 = vld1q_s8(qy1); qy0 += 16; qy1 += 16; const int32x4_t block_scale = { x0->scales[blk], x0->scales[blk], x1->scales[blk], x1->scales[blk], }; // calculate four results at once with outer product const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); int32x4_t vr = vdupq_n_s32(0); vr = vmmlaq_s32(vr, vx_l, vy_l); vr = vmmlaq_s32(vr, vx_h, vy_h); // apply block scale, will NOT overflow // block_scale * sum_256(int6*int8) <= 2^(8+8+6+8) = 30 bits visum = vmlaq_s32(visum, vr, block_scale); } } // adjust bias, apply superblock scale { int32_t bias[4]; // NEON doesn't support int16 dot product, fallback to separated mul and add const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums); const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums); int8x16_t scales_s8 = vld1q_s8(x0->scales); const int16x8x2_t q6scales0 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; scales_s8 = vld1q_s8(x1->scales); const int16x8x2_t q6scales1 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; int32x4_t prod; prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales0.val[0])), vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales0.val[0]))), vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales0.val[1])), vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales0.val[1])))); bias[0] = vaddvq_s32(prod); prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales0.val[0])), vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales0.val[0]))), vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales0.val[1])), vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales0.val[1])))); bias[1] = vaddvq_s32(prod); prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales1.val[0])), vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales1.val[0]))), vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales1.val[1])), vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales1.val[1])))); bias[2] = vaddvq_s32(prod); prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales1.val[0])), vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales1.val[0]))), vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales1.val[1])), vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1])))); bias[3] = vaddvq_s32(prod); const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); const float32x4_t superblock_scale = { GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, }; visum = vsubq_s32(visum, vibias); vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); } } // vfsum = ABCD -> ACBD // AC -> s, BD -> (s+bs) vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); vst1_f32(s, vget_low_f32 (vfsum)); vst1_f32(s + bs, vget_high_f32(vfsum)); return; } #endif #ifdef __ARM_FEATURE_SVE float sum = 0; svuint8_t m4b = svdup_n_u8(0xf); svint32_t vzero = svdup_n_s32(0); svuint8_t mone = svdup_n_u8(0x30); svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; for (int i = 0; i < nb; ++i) { const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const int8_t * GGML_RESTRICT scale = x[i].scales; const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); const svint64_t prod = svdup_n_s64(0); int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), svdot_s64(prod, q8sums_2, q6scales_2))); int32_t isum = 0; switch (vector_length) { case 128: { const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); svint32_t isum_tmp = svdup_n_s32(0); for (int j = 0; j < QK_K/128; ++j) { svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); qh += 32; svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); q6 += 64; svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); q8 += 64; q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); scale += 4; q8bytes_1 = svld1_s8(pg8_16, q8); q8bytes_2 = svld1_s8(pg8_16, q8+16); q8bytes_3 = svld1_s8(pg8_16, q8+32); q8bytes_4 = svld1_s8(pg8_16, q8+48); q8 += 64; q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); scale += 4; } isum += svaddv_s32(pg32_4, isum_tmp); sum += d_all * y[i].d * (isum - 32 * isum_mins); } break; case 256: case 512: { const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); svint32_t isum_tmp = svdup_n_s32(0); for (int j = 0; j < QK_K/128; j++) { svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); qh += 32; svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); q6 += 64; svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); q8 += 128; q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); scale += 8; } isum += svaddv_s32(pg32_8, isum_tmp); sum += d_all * y[i].d * (isum - 32 * isum_mins); } break; default: assert(false && "Unsupported vector length"); break; } } *s = sum; #elif __ARM_NEON float sum = 0; const uint8x16_t m4b = vdupq_n_u8(0xF); const int32x4_t vzero = vdupq_n_s32(0); //const int8x16_t m32s = vdupq_n_s8(32); const uint8x16_t mone = vdupq_n_u8(3); ggml_int8x16x4_t q6bytes; ggml_uint8x16x4_t q6h; for (int i = 0; i < nb; ++i) { const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const int8_t * GGML_RESTRICT scale = x[i].scales; const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); const int8x16_t scales = vld1q_s8(scale); const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); int32_t isum_mins = vaddvq_s32(prod); int32_t isum = 0; for (int j = 0; j < QK_K/128; ++j) { ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); shifted = vshrq_n_u8(qhbits.val[1], 2); q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; scale += 4; q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; shifted = vshrq_n_u8(qhbits.val[0], 4); q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); shifted = vshrq_n_u8(qhbits.val[1], 4); q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); shifted = vshrq_n_u8(qhbits.val[0], 6); q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); shifted = vshrq_n_u8(qhbits.val[1], 6); q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; scale += 4; } //sum += isum * d_all * y[i].d; sum += d_all * y[i].d * (isum - 32 * isum_mins); } *s = sum; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #if defined (__ARM_NEON) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, }; #endif void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; ggml_int8x16x4_t q2u; ggml_int8x16x4_t q2s; ggml_int8x16x4_t q8b; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; float sumf1 = 0, sumf2 = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { q8b = ggml_vld1q_s8_x4(q8); q8 += 64; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); } sumf += d*(sumf1 + sumf2); } *s = 0.25f * sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; ggml_int8x16x4_t q2u; ggml_int8x16x4_t q2s; ggml_int8x16x4_t q8b; int32x4x4_t scales32; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const uint8x8_t scales8 = vld1_u8(x[i].scales); const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); const uint8x8_t scales_h = vshr_n_u8(scales8, 4); uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); int32x4_t sumi = vdupq_n_s32(0); for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { q8b = ggml_vld1q_s8_x4(q8); q8 += 64; q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); q2 += 8; } sumf += d*vaddvq_s32(sumi); } *s = 0.125f * sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); const uint8x16_t mask2 = vld1q_u8(k_mask2); const uint8x16_t m1 = vdupq_n_u8(1); const int32x4_t vzero = vdupq_n_s32(0); uint8x16x2_t vs; ggml_int8x16x4_t q2s; ggml_int8x16x4_t q8b; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const int8_t * GGML_RESTRICT q8 = y[i].qs; int sumi1 = 0, sumi2 = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { q8b = ggml_vld1q_s8_x4(q8); q8 += 64; q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))), vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300))))); q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))), vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300))))); q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))), vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300))))); q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))), vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300))))); qs += 8; vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); vs.val[0] = vceqq_u8(vs.val[0], mask2); vs.val[1] = vceqq_u8(vs.val[1], mask2); q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]); q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]); vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); vs.val[0] = vceqq_u8(vs.val[0], mask2); vs.val[1] = vceqq_u8(vs.val[1], mask2); signs += 4; q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]); q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]); const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]); const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]); const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]); const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]); sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf)); sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4)); sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf)); sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4)); } sumf += d*(sumi1 + sumi2); } *s = 0.125f * sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[2]; ggml_int8x16x4_t q3s; ggml_int8x16x4_t q8b; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; float sumf1 = 0, sumf2 = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { q8b = ggml_vld1q_s8_x4(q8); q8 += 64; memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); q3 += 16; q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); } sumf += d*(sumf1 + sumf2); } *s = 0.5f * sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__ARM_NEON) typedef union { uint16x8_t vec_index; uint16_t index[8]; } vec_index_t; static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); const uint8x16_t mask2 = vld1q_u8(k_mask2); const int16x8_t hshift = vld1q_s16(k_shift); const uint16x8_t m256 = vdupq_n_u16(256); const uint8x16_t m1 = vdupq_n_u8(1); uint8x16x2_t vs; ggml_int8x16x4_t q3s; ggml_int8x16x4_t q8b; vec_index_t idx; uint32_t scales32[2]; const uint8_t * scales8 = (const uint8_t *)scales32; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(scales32, x[i].scales, 4); scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; int sumi1 = 0, sumi2 = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { q8b = ggml_vld1q_s8_x4(q8); q8 += 64; const uint8x16_t idx_l = vld1q_u8(qs); qs += 16; idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256)); const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256)); const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0)); q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1)); vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); signs += 4; q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2)); q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3)); const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0]; sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4]; } sumf += d*(sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __ARM_NEON ggml_int8x16x4_t q1b; ggml_int8x16x4_t q8b; float sumf = 0; for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; int sumi1 = 0, sumi2 = 0, sumi3 = 0; for (int ib = 0; ib < QK_K/32; ib += 2) { q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700))))); q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700))))); q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700))))); q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700))))); qs += 8; q8b = ggml_vld1q_s8_x4(q8); q8 += 64; const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]); const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]); const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; sumi1 += vaddvq_s32(p1) * ls1; sumi2 += vaddvq_s32(p2) * ls2; sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1) + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1); } sumf += y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_m * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; iq1m_scale_t scale; #if defined __ARM_NEON const int32x4_t mask = vdupq_n_s32(0x7); const int32x4_t mone = vdupq_n_s32(1); const int32x4_t mzero = vdupq_n_s32(0); ggml_int8x16x4_t deltas; deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1)); deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1)); deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1)); deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1)); ggml_int8x16x4_t q1b; ggml_int8x16x4_t q8b; uint32_t aux32; const uint8_t * aux8 = (const uint8_t *)&aux32; float sumf = 0; for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); int32x4_t sumi1 = mzero; int32x4_t sumi2 = mzero; for (int ib = 0; ib < QK_K/32; ib += 2) { q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700))))); q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700))))); q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700))))); q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))), vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700))))); q8b = ggml_vld1q_s8_x4(q8); q8 += 64; const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1])); const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3])); const int32x4_t p12 = vpaddq_s32(p1, p2); const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202); const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1])); const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3])); const int32x4_t p34 = vpaddq_s32(p3, p4); int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9); scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone); sumi1 = vmlaq_s32(sumi1, scales_4, p12); sumi2 = vmlaq_s32(sumi2, scales_4, p34); qs += 8; qh += 4; } sumf += y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(scale); ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined __ARM_NEON const int8x16_t values = vld1q_s8(kvalues_iq4nl); const uint8x16_t m4b = vdupq_n_u8(0x0f); uint8x16x2_t q4bits; int8x16x4_t q4b; int8x16x4_t q8b; int32x4_t prod_1, prod_2; for (; ib + 1 < nb; ib += 2) { q4bits.val[0] = vld1q_u8(x[ib + 0].qs); q4bits.val[1] = vld1q_u8(x[ib + 1].qs); q8b.val[0] = vld1q_s8(y[ib + 0].qs); q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); q8b.val[2] = vld1q_s8(y[ib + 1].qs); q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); sumf += GGML_CPU_FP16_TO_FP32(x[ib+0].d) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + GGML_CPU_FP16_TO_FP32(x[ib+1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); } #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __ARM_NEON const int8x16_t values = vld1q_s8(kvalues_iq4nl); const uint8x16_t m4b = vdupq_n_u8(0x0f); ggml_uint8x16x2_t q4bits; ggml_int8x16x4_t q4b; ggml_int8x16x4_t q8b; int32x4_t prod_1, prod_2; float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { const int8_t * q8 = y[ibl].qs; const uint8_t * q4 = x[ibl].qs; uint16_t h = x[ibl].scales_h; int sumi1 = 0, sumi2 = 0; for (int ib = 0; ib < QK_K/64; ++ib) { q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; q8b = ggml_vld1q_s8_x4(q8); q8 += 64; q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32; int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; h >>= 4; sumi1 += vaddvq_s32(prod_1) * ls1; sumi2 += vaddvq_s32(prod_2) * ls2; } sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/arm/repack.cpp000066400000000000000000004006331512524704700223410ustar00rootroot00000000000000#define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "ggml-backend-impl.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "traits.h" #include #include #include #include // for qsort #include // for GGML_ASSERT #define GGML_CPU_CLANG_WORKAROUND #include "../../repack.h" #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #endif #define UNUSED GGML_UNUSED #if defined(__aarch64__) && defined(__ARM_NEON) && (defined(__ARM_FEATURE_MATMUL_INT8) || defined(__ARM_FEATURE_DOTPROD)) static inline void decode_q4_Kx8_scales_mins(const uint8_t * scales_in, int16x8_t * out_mins, int8_t * out_scales) { constexpr uint32_t kmask1 = 0x3f3f3f3f; constexpr uint32_t kmask2 = 0x0f0f0f0f; constexpr uint32_t kmask3 = 0x03030303; constexpr uint8_t scales_size = 12; uint32_t sm[3]; memcpy(sm, scales_in, scales_size); const uint32_t mins_0_3 = sm[1] & kmask1; const uint32_t mins_4_7 = ((sm[2] >> 4) & kmask2) | (((sm[1] >> 6) & kmask3) << 4); const uint32x2_t mins_u32 = { mins_0_3, mins_4_7 }; *out_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins_u32))); uint32_t scales_u32[2]; scales_u32[0] = sm[0] & kmask1; scales_u32[1] = (sm[2] & kmask2) | (((sm[0] >> 6) & kmask3) << 4); memcpy(out_scales, scales_u32, 8); } #endif void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; #if defined(__ARM_NEON) float32x4_t srcv[4][8]; float id[4]; for (int i = 0; i < nb; i++) { float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int row_iter = 0; row_iter < 4; row_iter++) { for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); const float amax = vmaxvq_f32(amaxv[0]); const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < 8; j++) { float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]); int32x4_t vi = vcvtnq_s32_f32(v); y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0); y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1); y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2); y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[1][j], id[1]); vi = vcvtnq_s32_f32(v); y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0); y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1); y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2); y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[2][j], id[2]); vi = vcvtnq_s32_f32(v); y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0); y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1); y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2); y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[3][j], id[3]); vi = vcvtnq_s32_f32(v); y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0); y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1); y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2); y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3); } } #else UNUSED(nb); UNUSED(y); ggml_quantize_mat_q8_0_4x4_generic(x, vy, k); #endif } void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; #if defined(__ARM_NEON) float32x4_t srcv[4][8]; float id[4]; for (int i = 0; i < nb; i++) { float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int row_iter = 0; row_iter < 4; row_iter++) { for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); const float amax = vmaxvq_f32(amaxv[0]); const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < 4; j++) { float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]); int32x4_t vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[1][2 * j], id[1]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[2][2 * j], id[2]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[3][2 * j], id[3]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3); v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]); vi = vcvtnq_s32_f32(v); y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0); y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1); y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2); y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3); } } #else UNUSED(nb); UNUSED(y); ggml_quantize_mat_q8_0_4x8_generic(x, vy, k); #endif } void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; for (int c = 0; c < nc; c += ncols_interleaved) { const block_q8_0 * a_ptr = (const block_q8_0 *) vy; float32x4_t acc = vdupq_n_f32(0); for (int b = 0; b < nb; b++) { int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); int8x16_t a0 = vld1q_s8(a_ptr->qs); int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); int32x4_t ret = vdupq_n_s32(0); ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); a_ptr++; b_ptr++; } vst1q_f32(s, acc); s += ncols_interleaved; } return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; for (int c = 0; c < nc; c += ncols_interleaved) { const block_q8_0 * a_ptr = (const block_q8_0 *) vy; float32x4_t acc = vdupq_n_f32(0); for (int b = 0; b < nb; b++) { int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); int32x4_t ret0 = vdupq_n_s32(0); int32x4_t ret1 = vdupq_n_s32(0); ret0 = vdotq_s32(ret0, b0 << 4, a0); ret1 = vdotq_s32(ret1, b1 << 4, a0); ret0 = vdotq_s32(ret0, b2 << 4, a1); ret1 = vdotq_s32(ret1, b3 << 4, a1); ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); int32x4_t ret = vpaddq_s32(ret0, ret1); acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); a_ptr++; b_ptr++; } vst1q_f32(s, acc); s += ncols_interleaved; } return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) #if defined(__ARM_FEATURE_SVE) if (ggml_cpu_get_sve_cnt() == QK8_0) { const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; __asm__ __volatile__( "ptrue p0.b\n" "add %x[b_ptr], %x[b_ptr], #0x10\n" "1:" // Column loop "add x22, %x[a_ptr], #0x2\n" "mov z31.b, #0x0\n" "mov x21, %x[nb]\n" "2:" // Block loop "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n" "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n" "mov z28.s, #0x0\n" "mov z27.s, #0x0\n" "ld1rd { z26.d }, p0/Z, [x22]\n" "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n" "sub x20, x22, #0x2\n" "sub x21, x21, #0x1\n" "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n" "ld1rd { z23.d }, p0/Z, [x22, #8]\n" "lsl z22.b, z30.b, #0x4\n" "lsl z16.b, z29.b, #0x4\n" "and z30.b, z30.b, #0xf0\n" "and z29.b, z29.b, #0xf0\n" "ld1rd { z21.d }, p0/Z, [x22, #16]\n" "ld1rd { z20.d }, p0/Z, [x22, #24]\n" "lsl z19.b, z25.b, #0x4\n" "and z25.b, z25.b, #0xf0\n" "ld1rh { z17.h }, p0/Z, [x20]\n" "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n" "sdot z28.s, z22.b, z26.b\n" "sdot z27.s, z16.b, z26.b\n" "lsl z16.b, z24.b, #0x4\n" "add x22, x22, #0x22\n" "and z24.b, z24.b, #0xf0\n" "add %x[b_ptr], %x[b_ptr], #0x90\n" "fcvt z17.s, p0/m, z17.h\n" "fcvt z18.s, p0/m, z18.h\n" "sdot z28.s, z19.b, z23.b\n" "sdot z27.s, z16.b, z23.b\n" "fmul z18.s, z18.s, z17.s\n" "sdot z28.s, z30.b, z21.b\n" "sdot z27.s, z29.b, z21.b\n" "sdot z28.s, z25.b, z20.b\n" "sdot z27.s, z24.b, z20.b\n" "uzp1 z17.s, z28.s, z27.s\n" "uzp2 z16.s, z28.s, z27.s\n" "add z17.s, z17.s, z16.s\n" "asr z17.s, z17.s, #0x4\n" "scvtf z17.s, p0/m, z17.s\n" "fmla z31.s, p0/M, z17.s, z18.s\n" "cbnz x21, 2b\n" "sub %x[nc], %x[nc], #0x8\n" "st1w { z31.s }, p0, [%x[res_ptr]]\n" "add %x[res_ptr], %x[res_ptr], #0x20\n" "cbnz %x[nc], 1b\n" : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) : [a_ptr] "r" (a_ptr), [nb] "r" (nb) : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); return; } #endif // #if defined(__ARM_FEATURE_SVE) #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); const block_q8_0 * a_ptr = (const block_q8_0 *) vy; float * res_ptr = s; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); float32x4_t sumf = vdupq_n_f32(0); for (int l = 0; l < nb; l++) { uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); int32x4_t sumi = vdupq_n_s32(0); sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); float32x4_t d = a_d * b_d; sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); } vst1q_f32(res_ptr + x * 4, sumf); } return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) ggml_gemv_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { constexpr int qk = QK_K; const int nb = n / qk; constexpr int ncols_interleaved = 8; constexpr int blocklen = 8; assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) constexpr int col_groups = ncols_interleaved / 4; // 0123 and 4567 const uint8x16_t m4b = vdupq_n_u8(0x0f); // 1x8 tile = 2 x 4 float32x4_t acc_f32[col_groups]; const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int i = 0; i < col_groups; i++) { acc_f32[i] = vdupq_n_f32(0); } for (int b = 0; b < nb; b++) { float32x4_t q4_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d)); // d0 d1 d2 d3 float32x4_t q4_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4)); // d4 d5 d6 d7 float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d); float32x4_t sb_scale_0123 = vmulq_f32(q4_d_0, q8_d); float32x4_t sb_scale_4567 = vmulq_f32(q4_d_1, q8_d); float32x4_t q4_dmin_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin)); // dmin 0..3 float32x4_t q4_dmin_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4)); // dmin 4..7 float32x4_t sb_min_0123 = vmulq_f32(q4_dmin_0, q8_d); float32x4_t sb_min_4567 = vmulq_f32(q4_dmin_1, q8_d); // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567 int32x4_t bias_acc[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; int32x4_t acc_lo[col_groups]; int32x4_t acc_hi[col_groups]; // Each bsum is 16 elements, pairwise add leaves us with the 8 bsums of the entire block const int16x8_t bsums = vpaddq_s16(vld1q_s16(q8_ptr[b].bsums), vld1q_s16(q8_ptr[b].bsums + 8)); int16_t bsums_arr[8]; vst1q_s16(bsums_arr, bsums); for (int sb = 0; sb < QK_K / 64; sb++) { for (int i = 0; i < col_groups; i++) { acc_lo[i] = vdupq_n_s32(0); acc_hi[i] = vdupq_n_s32(0); } // Need scales for the low and high nibbles // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total int16x8_t q4sb_mins[2]; int16x8_t q4sb_scales[2]; for (int i = 0; i < 2; i++) { int8_t aux_q4sb[8]; const int offset = sb * 24 + i * 12; decode_q4_Kx8_scales_mins(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb); q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb)); } int8x16_t q8_qs[64 / 16]; for (int i = 0; i < 64 / 16; i++) { q8_qs[i] = vld1q_s8(q8_ptr[b].qs + sb * 64 + i * 16); } for (int c = 0; c < col_groups; c++) { uint8x16_t q4_cols[8]; for (int i = 0; i < 8; i++) { q4_cols[i] = vld1q_u8(q4_ptr[b].qs + sb * QK_K + i * 32 + 16 * c); } acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[0], m4b)), q8_qs[0], 0); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[1], m4b)), q8_qs[0], 1); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[2], m4b)), q8_qs[0], 2); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[3], m4b)), q8_qs[0], 3); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[4], m4b)), q8_qs[1], 0); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[5], m4b)), q8_qs[1], 1); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[6], m4b)), q8_qs[1], 2); acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[7], m4b)), q8_qs[1], 3); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[0], 4)), q8_qs[2], 0); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[1], 4)), q8_qs[2], 1); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[2], 4)), q8_qs[2], 2); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[3], 4)), q8_qs[2], 3); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[4], 4)), q8_qs[3], 0); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[5], 4)), q8_qs[3], 1); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[6], 4)), q8_qs[3], 2); acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[7], 4)), q8_qs[3], 3); } // Scales // row c0123 blk0 and blk1 const int16x4_t sc_0123_lo = vget_low_s16(q4sb_scales[0]); const int16x4_t sc_0123_hi = vget_low_s16(q4sb_scales[1]); const float32x4_t sumf_0123 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_0123_lo), acc_lo[0]), vmulq_s32(vmovl_s16(sc_0123_hi), acc_hi[0]))); acc_f32[0] = vfmaq_f32(acc_f32[0], sb_scale_0123, sumf_0123); // row c4567 blk0 and blk1 const int16x4_t sc_4567_lo = vget_high_s16(q4sb_scales[0]); const int16x4_t sc_4567_hi = vget_high_s16(q4sb_scales[1]); const float32x4_t sumf_4567 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_4567_lo), acc_lo[1]), vmulq_s32(vmovl_s16(sc_4567_hi), acc_hi[1]))); acc_f32[1] = vfmaq_f32(acc_f32[1], sb_scale_4567, sumf_4567); // Bias Correction const int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[2 * sb + 0]); const int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[2 * sb + 1]); bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_lo, vget_low_s16(q4sb_mins[0])); bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_hi, vget_low_s16(q4sb_mins[1])); bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_lo, vget_high_s16(q4sb_mins[0])); bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_hi, vget_high_s16(q4sb_mins[1])); } // for sb acc_f32[0] = vmlsq_f32(acc_f32[0], vcvtq_f32_s32(bias_acc[0]), sb_min_0123); acc_f32[1] = vmlsq_f32(acc_f32[1], vcvtq_f32_s32(bias_acc[1]), sb_min_4567); } // for b int base = x * ncols_interleaved; vst1q_f32(s + base, acc_f32[0]); vst1q_f32(s + base + 4, acc_f32[1]); } // for x return; #endif // #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q4_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { constexpr int qk = QK_K; const int nb = n / qk; constexpr int ncols_interleaved = 8; constexpr int blocklen = 8; assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) constexpr int col_pairs = ncols_interleaved / 2; const uint8x16_t m4b = vdupq_n_u8(0x0f); // 1x8 tile = 2 x 4 float32x4_t acc_f32[ncols_interleaved / 4]; const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int i = 0; i < ncols_interleaved / 4; i++) { acc_f32[i] = vdupq_n_f32(0); } for (int b = 0; b < nb; b++) { float32x4_t q4_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d)); // d0 d1 d2 d3 float32x4_t q4_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4)); // d4 d5 d6 d7 float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d); float32x4_t sb_scale_0 = vmulq_f32(q4_d_0, q8_d); float32x4_t sb_scale_1 = vmulq_f32(q4_d_1, q8_d); float32x4_t q4_dmin_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin)); // dmin 0..3 float32x4_t q4_dmin_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4)); // dmin 4..7 float32x4_t sb_min_0 = vmulq_f32(q4_dmin_0, q8_d); float32x4_t sb_min_1 = vmulq_f32(q4_dmin_1, q8_d); // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567 int32x4_t bias_acc[2] = { vdupq_n_s32(0), vdupq_n_s32(0) }; // 2 sb each iteration int32x4_t acc_lo[col_pairs]; int32x4_t acc_hi[col_pairs]; // Each bsum is 16 elements, pairwise add leaves us with the 8 bsums of the entire block const int16x8_t bsums = vpaddq_s16(vld1q_s16(q8_ptr[b].bsums), vld1q_s16(q8_ptr[b].bsums + 8)); int16_t bsums_arr[8]; vst1q_s16(bsums_arr, bsums); for (int sb = 0; sb < QK_K / 64; sb++) { for (int i = 0; i < col_pairs; i++) { acc_lo[i] = vdupq_n_s32(0); acc_hi[i] = vdupq_n_s32(0); } // Need scales for the low and high nibbles // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total int16x8_t q4sb_mins[2]; // int16 as its needed for bias_acc later int16x8_t q4sb_scales[2]; for (int i = 0; i < 2; i++) { int8_t aux_q4sb[8]; const int offset = sb * 24 + i * 12; decode_q4_Kx8_scales_mins(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb); q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb)); } const uint8_t * q4_base = q4_ptr[b].qs + sb * QK_K; // Load the 64 quants from q8K duplicated to use vecdots with the interelaved columns // but still need the qs to use the low and hi bits from q4 const int8_t * q8_base = q8_ptr[b].qs + sb * 64; int8x16_t q8_qs[8]; for (int i = 0; i < 8; i++) { q8_qs[i] = (int8x16_t) vld1q_dup_s64((const int64_t *) (q8_base + i * 8)); } // Q4s columns iterated in pairs (01, 23, 45, 67) for (int cp = 0; cp < col_pairs; cp++) { uint8x16_t q4_qs_cp_0 = vld1q_u8(q4_base + 16 * cp); uint8x16_t q4_qs_cp_1 = vld1q_u8(q4_base + 16 * cp + 64); uint8x16_t q4_qs_cp_2 = vld1q_u8(q4_base + 16 * cp + 128); uint8x16_t q4_qs_cp_3 = vld1q_u8(q4_base + 16 * cp + 192); acc_lo[cp] = ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_0, m4b)), q8_qs[0]); // 0 .. 7 acc_lo[cp] = ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_1, m4b)), q8_qs[1]); // 8 ..15 acc_lo[cp] = ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_2, m4b)), q8_qs[2]); // 16..23 acc_lo[cp] = ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_3, m4b)), q8_qs[3]); // 24..31 acc_hi[cp] = ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_0, 4)), q8_qs[4]); // 32..39 acc_hi[cp] = ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_1, 4)), q8_qs[5]); // 40..47 acc_hi[cp] = ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_2, 4)), q8_qs[6]); // 48..55 acc_hi[cp] = ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_3, 4)), q8_qs[7]); // 56..63 } // Iterates over a pair of column pairs (4 columns) to use a single 128 register // p = 0 -> 0123 p2 -> 4567 for (int i = 0, p = 0; p < col_pairs; i++, p += 2) { int16x4_t group_scales_lo = p == 0 ? vget_low_s16(q4sb_scales[0]) : vget_high_s16(q4sb_scales[0]); int16x4_t group_scales_hi = p == 0 ? vget_low_s16(q4sb_scales[1]) : vget_high_s16(q4sb_scales[1]); float32x4_t sb_scale = p == 0 ? sb_scale_0 : sb_scale_1; // 0123 or 4567 float32x4_t sumf_0 = vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_lo), vpaddq_s32(acc_lo[p], acc_lo[p + 1]))); acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_0); float32x4_t sumf_1 = vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_hi), vpaddq_s32(acc_hi[p], acc_hi[p + 1]))); acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_1); } // Multiply Acc bsum + mins // Each pair of subblocks share the same bsums // Load scalar bsum → broadcast to a vector (vdupq_n_s16(s)). int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[2 * sb + 0]); int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[2 * sb + 1]); // cols 0-3 bias bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_lo, vget_low_s16(q4sb_mins[0])); bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_hi, vget_low_s16(q4sb_mins[1])); // cols 4-7 bias bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_lo, vget_high_s16(q4sb_mins[0])); bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_hi, vget_high_s16(q4sb_mins[1])); } // for sb acc_f32[0] = vmlsq_f32(acc_f32[0], vcvtq_f32_s32(bias_acc[0]), sb_min_0); acc_f32[1] = vmlsq_f32(acc_f32[1], vcvtq_f32_s32(bias_acc[1]), sb_min_1); } // for b int base = x * ncols_interleaved; vst1q_f32(s + base, acc_f32[0]); vst1q_f32(s + base + 4, acc_f32[1]); } // for x return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q8_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx; for (int c = 0; c < nc; c += ncols_interleaved) { const block_q8_0 * a_ptr = (const block_q8_0 *) vy; float32x4_t acc = vdupq_n_f32(0); for (int b = 0; b < nb; b++) { int8x16x4_t b_low = vld1q_s8_x4((const int8_t *) b_ptr->qs); int8x16x4_t b_high = vld1q_s8_x4((const int8_t *) b_ptr->qs + 64); float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); int8x16x2_t a = vld1q_s8_x2(a_ptr->qs); float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); int32x4_t ret = vdupq_n_s32(0); ret = vdotq_laneq_s32(ret, b_low.val[0], a.val[0], 0); ret = vdotq_laneq_s32(ret, b_low.val[1], a.val[0], 1); ret = vdotq_laneq_s32(ret, b_low.val[2], a.val[0], 2); ret = vdotq_laneq_s32(ret, b_low.val[3], a.val[0], 3); ret = vdotq_laneq_s32(ret, b_high.val[0], a.val[1], 0); ret = vdotq_laneq_s32(ret, b_high.val[1], a.val[1], 1); ret = vdotq_laneq_s32(ret, b_high.val[2], a.val[1], 2); ret = vdotq_laneq_s32(ret, b_high.val[3], a.val[1], 3); acc = vfmaq_f32(acc, vcvtq_f32_s32(ret), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); a_ptr++; b_ptr++; } vst1q_f32(s, acc); s += ncols_interleaved; } return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q8_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q8_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx; for (int c = 0; c < nc; c += ncols_interleaved) { const block_q8_0 * a_ptr = (const block_q8_0 *) vy; float32x4_t acc = vdupq_n_f32(0); for (int b = 0; b < nb; b++) { int8x16x4_t b_low = vld1q_s8_x4((const int8_t *) b_ptr->qs); int8x16x4_t b_high = vld1q_s8_x4((const int8_t *) b_ptr->qs + 64); float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); int8x8x4_t a_chunks = vld1_s8_x4(a_ptr->qs); int8x16_t a0 = vcombine_s8(a_chunks.val[0], a_chunks.val[0]); int8x16_t a1 = vcombine_s8(a_chunks.val[1], a_chunks.val[1]); int8x16_t a2 = vcombine_s8(a_chunks.val[2], a_chunks.val[2]); int8x16_t a3 = vcombine_s8(a_chunks.val[3], a_chunks.val[3]); float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); int32x4_t ret0 = vdupq_n_s32(0); int32x4_t ret1 = vdupq_n_s32(0); // 0..7 ret0 = vdotq_s32(ret0, b_low.val[0], a0); ret1 = vdotq_s32(ret1, b_low.val[1], a0); // 8..15 ret0 = vdotq_s32(ret0, b_low.val[2], a1); ret1 = vdotq_s32(ret1, b_low.val[3], a1); // 16..23 ret0 = vdotq_s32(ret0, b_high.val[0], a2); ret1 = vdotq_s32(ret1, b_high.val[1], a2); // 24..31 ret0 = vdotq_s32(ret0, b_high.val[2], a3); ret1 = vdotq_s32(ret1, b_high.val[3], a3); int32x4_t ret = vpaddq_s32(ret0, ret1); acc = vfmaq_f32(acc, vcvtq_f32_s32(ret), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); a_ptr++; b_ptr++; } vst1q_f32(s, acc); s += ncols_interleaved; } return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemv_q8_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; size_t res_stride = bs * sizeof(float); __asm__ __volatile__( "mov x10, %x[nr]\n" "mov x9, #0x88\n" "cmp x10, #0x10\n" "mul x9, %x[nb], x9\n" "blt 4f\n" "1:" // Row loop "add x28, %x[b_ptr], #0x8\n" "mov x27, %x[nc]\n" "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" "2:" // Column loop "add x25, %x[a_ptr], #0x8\n" "movi v15.16b, #0x0\n" "movi v19.16b, #0x0\n" "mov x24, %x[nb]\n" "add x23, x25, x9\n" "movi v18.16b, #0x0\n" "movi v14.16b, #0x0\n" "add x22, x23, x9\n" "movi v11.16b, #0x0\n" "movi v13.16b, #0x0\n" "add x21, x22, x9\n" "movi v23.16b, #0x0\n" "movi v16.16b, #0x0\n" "movi v25.16b, #0x0\n" "movi v7.16b, #0x0\n" "movi v0.16b, #0x0\n" "movi v4.16b, #0x0\n" "movi v5.16b, #0x0\n" "movi v21.16b, #0x0\n" "movi v8.16b, #0x0\n" "movi v1.16b, #0x0\n" "3:" // Block loop "ldr q3, [x28, #0x0]\n" "ldr q31, [x25, #0x0]\n" "movi v28.16b, #0x4\n" "movi v10.4s, #0x0\n" "ldr q22, [x28, #0x10]\n" "ldr q6, [x25, #0x10]\n" "movi v29.4s, #0x0\n" "movi v9.4s, #0x0\n" "ldr q27, [x28, #0x20]\n" "ldr q30, [x28, #0x30]\n" "movi v20.4s, #0x0\n" "movi v24.16b, #0xf0\n" "ldr d2, [x25, #-0x8]\n" "ldr d26, [x23, #-0x8]\n" "sshl v12.16b, v3.16b, v28.16b\n" "sub x20, x28, #0x8\n" "ldr d17, [x20, #0x0]\n" "and v3.16b, v3.16b, v24.16b\n" "subs x24, x24, #0x1\n" "add x28, x28, #0x48\n" ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" "sshl v31.16b, v22.16b, v28.16b\n" "and v22.16b, v22.16b, v24.16b\n" "fcvtl v17.4s, v17.4h\n" "fcvtl v2.4s, v2.4h\n" "fcvtl v26.4s, v26.4h\n" ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" "sshl v6.16b, v27.16b, v28.16b\n" "sshl v28.16b, v30.16b, v28.16b\n" "and v27.16b, v27.16b, v24.16b\n" "and v30.16b, v30.16b, v24.16b\n" "ldr q24, [x25, #0x20]\n" ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" "ldr q24, [x25, #0x30]\n" ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" "ldr q24, [x25, #0x40]\n" ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" "ldr q24, [x25, #0x50]\n" ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" "ldr q24, [x25, #0x60]\n" ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" "ldr q24, [x25, #0x70]\n" "add x25, x25, #0x88\n" ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" "fmul v24.4s, v17.4s, v2.s[0]\n" "scvtf v10.4s, v10.4s, #0x4\n" "scvtf v29.4s, v29.4s, #0x4\n" "scvtf v9.4s, v9.4s, #0x4\n" "scvtf v20.4s, v20.4s, #0x4\n" "fmla v15.4s, v10.4s, v24.4s\n" "ldr q24, [x23, #0x0]\n" "fmul v10.4s, v17.4s, v2.s[1]\n" "fmla v19.4s, v29.4s, v10.4s\n" "ldr q10, [x23, #0x10]\n" "fmul v29.4s, v17.4s, v2.s[2]\n" "fmul v2.4s, v17.4s, v2.s[3]\n" "fmla v18.4s, v9.4s, v29.4s\n" "movi v9.4s, #0x0\n" "movi v29.4s, #0x0\n" ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" "fmla v14.4s, v20.4s, v2.4s\n" "movi v20.4s, #0x0\n" "movi v2.4s, #0x0\n" ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" "ldr q24, [x23, #0x20]\n" ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" "ldr q10, [x23, #0x30]\n" ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" "ldr q24, [x23, #0x40]\n" ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" "ldr q10, [x23, #0x50]\n" ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" "ldr q24, [x23, #0x60]\n" ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" "ldr q10, [x23, #0x70]\n" "add x23, x23, #0x88\n" ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" "ldr q24, [x22, #0x0]\n" ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" "fmul v10.4s, v17.4s, v26.s[0]\n" "scvtf v9.4s, v9.4s, #0x4\n" "scvtf v29.4s, v29.4s, #0x4\n" "scvtf v20.4s, v20.4s, #0x4\n" "scvtf v2.4s, v2.4s, #0x4\n" "fmla v11.4s, v9.4s, v10.4s\n" "ldr q9, [x22, #0x10]\n" "fmul v10.4s, v17.4s, v26.s[1]\n" "fmla v13.4s, v29.4s, v10.4s\n" "ldr d29, [x22, #-0x8]\n" "fmul v10.4s, v17.4s, v26.s[2]\n" "fmul v26.4s, v17.4s, v26.s[3]\n" "fcvtl v29.4s, v29.4h\n" "fmla v23.4s, v20.4s, v10.4s\n" "movi v20.4s, #0x0\n" "movi v10.4s, #0x0\n" "fmla v16.4s, v2.4s, v26.4s\n" "movi v26.4s, #0x0\n" "movi v2.4s, #0x0\n" ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" "ldr q24, [x22, #0x20]\n" ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" "ldr q9, [x22, #0x30]\n" ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" "ldr q24, [x22, #0x40]\n" ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" "ldr q9, [x22, #0x50]\n" ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" "ldr q24, [x22, #0x60]\n" ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" "ldr q9, [x22, #0x70]\n" "add x22, x22, #0x88\n" ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" "ldr q24, [x21, #0x0]\n" ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" "fmul v9.4s, v17.4s, v29.s[0]\n" "scvtf v20.4s, v20.4s, #0x4\n" "scvtf v10.4s, v10.4s, #0x4\n" "scvtf v26.4s, v26.4s, #0x4\n" "scvtf v2.4s, v2.4s, #0x4\n" "fmla v25.4s, v20.4s, v9.4s\n" "ldr q9, [x21, #0x10]\n" "fmul v20.4s, v17.4s, v29.s[1]\n" "fmla v7.4s, v10.4s, v20.4s\n" "ldr d20, [x21, #-0x8]\n" "fmul v10.4s, v17.4s, v29.s[2]\n" "fmul v29.4s, v17.4s, v29.s[3]\n" "fcvtl v20.4s, v20.4h\n" "fmla v0.4s, v26.4s, v10.4s\n" "movi v26.4s, #0x0\n" "movi v10.4s, #0x0\n" "fmla v4.4s, v2.4s, v29.4s\n" "movi v2.4s, #0x0\n" "movi v29.4s, #0x0\n" ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" "ldr q12, [x21, #0x20]\n" "fmul v24.4s, v17.4s, v20.s[0]\n" ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" "ldr q9, [x21, #0x30]\n" "fmul v31.4s, v17.4s, v20.s[1]\n" ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" "ldr q12, [x21, #0x40]\n" "fmul v6.4s, v17.4s, v20.s[2]\n" "fmul v20.4s, v17.4s, v20.s[3]\n" ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" "ldr q9, [x21, #0x50]\n" ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" "ldr q12, [x21, #0x60]\n" ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" "ldr q17, [x21, #0x70]\n" "add x21, x21, #0x88\n" ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" "scvtf v26.4s, v26.4s, #0x4\n" "scvtf v10.4s, v10.4s, #0x4\n" "fmla v5.4s, v26.4s, v24.4s\n" "scvtf v2.4s, v2.4s, #0x4\n" "scvtf v29.4s, v29.4s, #0x4\n" "fmla v21.4s, v10.4s, v31.4s\n" "fmla v8.4s, v2.4s, v6.4s\n" "fmla v1.4s, v29.4s, v20.4s\n" "bgt 3b\n" "mov x20, %x[res_ptr]\n" "subs x27, x27, #0x4\n" "add %x[res_ptr], %x[res_ptr], #0x10\n" "str q15, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q19, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q18, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q14, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q11, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q13, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q23, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q16, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q25, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q7, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q0, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q4, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q5, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q21, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q8, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q1, [x20, #0x0]\n" "bne 2b\n" "mov x20, #0x4\n" "sub x10, x10, #0x10\n" "cmp x10, #0x10\n" "mov %x[res_ptr], x26\n" "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" "bge 1b\n" "4:" // Row loop skip "cbz x10, 9f\n" "5:" // Row tail: Row loop "add x24, %x[b_ptr], #0x8\n" "mov x23, %x[nc]\n" "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" "6:" // Row tail: Column loop "movi v15.16b, #0x0\n" "movi v19.16b, #0x0\n" "add x25, %x[a_ptr], #0x8\n" "mov x21, %x[nb]\n" "movi v18.16b, #0x0\n" "movi v14.16b, #0x0\n" "7:" // Row tail: Block loop "ldr q7, [x24, #0x0]\n" "ldr q5, [x25, #0x0]\n" "movi v9.16b, #0x4\n" "movi v4.4s, #0x0\n" "ldr q3, [x24, #0x10]\n" "ldr q2, [x25, #0x10]\n" "movi v1.4s, #0x0\n" "movi v0.4s, #0x0\n" "ldr q13, [x24, #0x20]\n" "ldr q31, [x25, #0x20]\n" "movi v30.4s, #0x0\n" "movi v29.16b, #0xf0\n" "ldr q28, [x24, #0x30]\n" "ldr q27, [x25, #0x30]\n" "sshl v20.16b, v7.16b, v9.16b\n" "sub x20, x24, #0x8\n" "ldr q26, [x25, #0x40]\n" "ldr q25, [x25, #0x50]\n" "sshl v17.16b, v3.16b, v9.16b\n" "and v7.16b, v7.16b, v29.16b\n" "ldr q24, [x25, #0x60]\n" "ldr q16, [x25, #0x70]\n" "sshl v22.16b, v13.16b, v9.16b\n" "and v3.16b, v3.16b, v29.16b\n" "ldr d21, [x20, #0x0]\n" "ldr d12, [x25, #-0x8]\n" ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" "sshl v9.16b, v28.16b, v9.16b\n" "subs x21, x21, #0x1\n" "and v13.16b, v13.16b, v29.16b\n" "and v28.16b, v28.16b, v29.16b\n" "add x25, x25, #0x88\n" "add x24, x24, #0x48\n" "fcvtl v21.4s, v21.4h\n" "fcvtl v12.4s, v12.4h\n" ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" "fmul v11.4s, v21.4s, v12.s[0]\n" "fmul v23.4s, v21.4s, v12.s[1]\n" "fmul v17.4s, v21.4s, v12.s[2]\n" ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" "fmul v6.4s, v21.4s, v12.s[3]\n" ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" "scvtf v4.4s, v4.4s, #0x4\n" "scvtf v1.4s, v1.4s, #0x4\n" "scvtf v0.4s, v0.4s, #0x4\n" "fmla v15.4s, v4.4s, v11.4s\n" "scvtf v30.4s, v30.4s, #0x4\n" "fmla v19.4s, v1.4s, v23.4s\n" "fmla v18.4s, v0.4s, v17.4s\n" "fmla v14.4s, v30.4s, v6.4s\n" "bgt 7b\n" "mov x20, %x[res_ptr]\n" "cmp x10, #0x1\n" "str q15, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x10, #0x2\n" "str q19, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x10, #0x3\n" "str q18, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "str q14, [x20, #0x0]\n" "8:" // Row tail: Accumulator store skip "subs x23, x23, #0x4\n" "add %x[res_ptr], %x[res_ptr], #0x10\n" "bne 6b\n" "subs x10, x10, #0x4\n" "add %x[a_ptr], %x[a_ptr], x9\n" "mov %x[res_ptr], x22\n" "bgt 5b\n" "9:" // Row tail: Row loop skip : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) ggml_gemm_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; size_t res_stride = bs * sizeof(float); __asm__ __volatile__( "mov x10, %x[nr]\n" "mov x9, #0x88\n" "cmp x10, #0x10\n" "mul x9, %x[nb], x9\n" "blt 4f\n" "1:" // Row loop "add x28, %x[b_ptr], #0x8\n" "mov x27, %x[nc]\n" "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" "2:" // Column loop "add x25, %x[a_ptr], #0x8\n" "movi v2.16b, #0x0\n" "movi v10.16b, #0x0\n" "mov x24, %x[nb]\n" "add x23, x25, x9\n" "movi v12.16b, #0x0\n" "movi v28.16b, #0x0\n" "add x22, x23, x9\n" "movi v11.16b, #0x0\n" "movi v13.16b, #0x0\n" "add x21, x22, x9\n" "movi v22.16b, #0x0\n" "movi v23.16b, #0x0\n" "movi v25.16b, #0x0\n" "movi v5.16b, #0x0\n" "movi v7.16b, #0x0\n" "movi v4.16b, #0x0\n" "movi v6.16b, #0x0\n" "movi v30.16b, #0x0\n" "movi v24.16b, #0x0\n" "movi v14.16b, #0x0\n" "3:" // Block loop "ldr q21, [x28, #0x0]\n" "ldr q16, [x28, #0x10]\n" "movi v1.16b, #0x4\n" "movi v19.4s, #0x0\n" "ldr q27, [x25, #0x0]\n" "ldr q15, [x25, #0x10]\n" "movi v26.4s, #0x0\n" "movi v18.4s, #0x0\n" "ldr q29, [x28, #0x20]\n" "ldr q3, [x28, #0x30]\n" "movi v17.4s, #0x0\n" "movi v0.16b, #0xf0\n" "ldr d20, [x25, #-0x8]\n" "ldr d9, [x23, #-0x8]\n" "sshl v8.16b, v21.16b, v1.16b\n" "sshl v31.16b, v16.16b, v1.16b\n" "and v21.16b, v21.16b, v0.16b\n" "and v16.16b, v16.16b, v0.16b\n" "sub x20, x28, #0x8\n" "subs x24, x24, #0x1\n" "add x28, x28, #0x48\n" ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" "ldr q27, [x25, #0x20]\n" ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" "sshl v15.16b, v29.16b, v1.16b\n" "sshl v1.16b, v3.16b, v1.16b\n" "and v29.16b, v29.16b, v0.16b\n" "and v3.16b, v3.16b, v0.16b\n" "ldr q0, [x25, #0x30]\n" "fcvtl v20.4s, v20.4h\n" ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" "fcvtl v9.4s, v9.4h\n" ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" "ldr q27, [x25, #0x40]\n" ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" "ldr q0, [x25, #0x50]\n" ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" "ldr q27, [x25, #0x60]\n" ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" "ldr q0, [x25, #0x70]\n" "add x25, x25, #0x88\n" ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" "ldr d27, [x20, #0x0]\n" ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" "fcvtl v27.4s, v27.4h\n" "uzp1 v0.2d, v19.2d, v26.2d\n" "uzp2 v26.2d, v19.2d, v26.2d\n" "fmul v19.4s, v27.4s, v20.s[0]\n" "scvtf v0.4s, v0.4s, #0x4\n" "scvtf v26.4s, v26.4s, #0x4\n" "fmla v2.4s, v0.4s, v19.4s\n" "ldr q19, [x23, #0x0]\n" "uzp1 v0.2d, v18.2d, v17.2d\n" "uzp2 v18.2d, v18.2d, v17.2d\n" "fmul v17.4s, v27.4s, v20.s[1]\n" "scvtf v0.4s, v0.4s, #0x4\n" "scvtf v18.4s, v18.4s, #0x4\n" "fmla v10.4s, v26.4s, v17.4s\n" "ldr q17, [x23, #0x10]\n" "fmul v26.4s, v27.4s, v20.s[2]\n" "fmul v20.4s, v27.4s, v20.s[3]\n" "fmla v12.4s, v0.4s, v26.4s\n" "ldr d0, [x22, #-0x8]\n" "ldr d26, [x21, #-0x8]\n" "fcvtl v0.4s, v0.4h\n" "fmla v28.4s, v18.4s, v20.4s\n" "movi v20.4s, #0x0\n" "movi v18.4s, #0x0\n" ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" "ldr q19, [x23, #0x20]\n" "fcvtl v26.4s, v26.4h\n" ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" "ldr q19, [x23, #0x40]\n" ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" "ldr q19, [x23, #0x60]\n" ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" "uzp1 v19.2d, v20.2d, v18.2d\n" "scvtf v19.4s, v19.4s, #0x4\n" "uzp2 v20.2d, v20.2d, v18.2d\n" "fmul v18.4s, v27.4s, v9.s[0]\n" "scvtf v20.4s, v20.4s, #0x4\n" "fmla v11.4s, v19.4s, v18.4s\n" "ldr q18, [x22, #0x0]\n" "fmul v19.4s, v27.4s, v9.s[1]\n" "fmla v13.4s, v20.4s, v19.4s\n" "movi v19.4s, #0x0\n" "movi v20.4s, #0x0\n" ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" "ldr q17, [x23, #0x30]\n" ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" "ldr q17, [x23, #0x50]\n" ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" "ldr q17, [x23, #0x70]\n" "add x23, x23, #0x88\n" ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" "uzp1 v17.2d, v19.2d, v20.2d\n" "scvtf v17.4s, v17.4s, #0x4\n" "uzp2 v20.2d, v19.2d, v20.2d\n" "fmul v19.4s, v27.4s, v9.s[2]\n" "fmul v9.4s, v27.4s, v9.s[3]\n" "scvtf v20.4s, v20.4s, #0x4\n" "fmla v22.4s, v17.4s, v19.4s\n" "ldr q17, [x22, #0x10]\n" "movi v19.4s, #0x0\n" ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" "fmla v23.4s, v20.4s, v9.4s\n" "movi v20.4s, #0x0\n" "movi v9.4s, #0x0\n" ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" "ldr q18, [x22, #0x20]\n" ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" "ldr q18, [x22, #0x40]\n" ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" "ldr q18, [x22, #0x60]\n" ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" "movi v18.4s, #0x0\n" ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" "ldr q17, [x22, #0x30]\n" ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" "ldr q17, [x22, #0x50]\n" ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" "ldr q17, [x22, #0x70]\n" "add x22, x22, #0x88\n" ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" "uzp1 v17.2d, v19.2d, v20.2d\n" "uzp2 v20.2d, v19.2d, v20.2d\n" "fmul v19.4s, v27.4s, v0.s[0]\n" "scvtf v17.4s, v17.4s, #0x4\n" "scvtf v20.4s, v20.4s, #0x4\n" "fmla v25.4s, v17.4s, v19.4s\n" "ldr q19, [x21, #0x0]\n" "fmul v17.4s, v27.4s, v0.s[1]\n" "fmla v5.4s, v20.4s, v17.4s\n" "ldr q17, [x21, #0x10]\n" "uzp1 v20.2d, v9.2d, v18.2d\n" "uzp2 v9.2d, v9.2d, v18.2d\n" "fmul v18.4s, v27.4s, v0.s[2]\n" "fmul v0.4s, v27.4s, v0.s[3]\n" "scvtf v20.4s, v20.4s, #0x4\n" "scvtf v9.4s, v9.4s, #0x4\n" "fmla v7.4s, v20.4s, v18.4s\n" "movi v20.4s, #0x0\n" "movi v18.4s, #0x0\n" ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" "ldr q19, [x21, #0x20]\n" "fmla v4.4s, v9.4s, v0.4s\n" "movi v9.4s, #0x0\n" "movi v0.4s, #0x0\n" ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" "fmul v8.4s, v27.4s, v26.s[0]\n" ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" "ldr q17, [x21, #0x30]\n" ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" "fmul v31.4s, v27.4s, v26.s[1]\n" ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" "ldr q19, [x21, #0x40]\n" ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" "fmul v15.4s, v27.4s, v26.s[2]\n" "fmul v27.4s, v27.4s, v26.s[3]\n" ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" "ldr q1, [x21, #0x50]\n" ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" "ldr q26, [x21, #0x60]\n" ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" "ldr q21, [x21, #0x70]\n" "add x21, x21, #0x88\n" ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" "uzp1 v29.2d, v20.2d, v18.2d\n" "uzp2 v21.2d, v20.2d, v18.2d\n" "scvtf v29.4s, v29.4s, #0x4\n" "uzp1 v18.2d, v9.2d, v0.2d\n" "uzp2 v16.2d, v9.2d, v0.2d\n" "scvtf v21.4s, v21.4s, #0x4\n" "fmla v6.4s, v29.4s, v8.4s\n" "scvtf v18.4s, v18.4s, #0x4\n" "scvtf v16.4s, v16.4s, #0x4\n" "fmla v30.4s, v21.4s, v31.4s\n" "fmla v24.4s, v18.4s, v15.4s\n" "fmla v14.4s, v16.4s, v27.4s\n" "bgt 3b\n" "mov x20, %x[res_ptr]\n" "subs x27, x27, #0x4\n" "add %x[res_ptr], %x[res_ptr], #0x10\n" "str q2, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q10, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q12, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q28, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q11, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q13, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q22, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q23, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q25, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q5, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q7, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q4, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q6, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q30, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q24, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "str q14, [x20, #0x0]\n" "bne 2b\n" "mov x20, #0x4\n" "sub x10, x10, #0x10\n" "cmp x10, #0x10\n" "mov %x[res_ptr], x26\n" "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" "bge 1b\n" "4:" // Row loop skip "cbz x10, 9f\n" "5:" // Row tail: Row loop "add x24, %x[b_ptr], #0x8\n" "mov x23, %x[nc]\n" "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" "6:" // Row tail: Column loop "movi v2.16b, #0x0\n" "movi v10.16b, #0x0\n" "add x25, %x[a_ptr], #0x8\n" "mov x21, %x[nb]\n" "movi v12.16b, #0x0\n" "movi v28.16b, #0x0\n" "7:" // Row tail: Block loop "ldr q6, [x24, #0x0]\n" "ldr q5, [x24, #0x10]\n" "movi v17.16b, #0x4\n" "movi v8.4s, #0x0\n" "ldr q4, [x25, #0x0]\n" "ldr q13, [x25, #0x10]\n" "movi v27.4s, #0x0\n" "movi v0.4s, #0x0\n" "ldr q31, [x24, #0x20]\n" "ldr q14, [x24, #0x30]\n" "movi v29.4s, #0x0\n" "movi v22.16b, #0xf0\n" "ldr q11, [x25, #0x20]\n" "ldr q23, [x25, #0x30]\n" "sshl v21.16b, v6.16b, v17.16b\n" "sshl v16.16b, v5.16b, v17.16b\n" "ldr q20, [x25, #0x40]\n" "ldr q26, [x25, #0x50]\n" "and v6.16b, v6.16b, v22.16b\n" "and v5.16b, v5.16b, v22.16b\n" "ldr q25, [x25, #0x60]\n" "ldr q3, [x25, #0x70]\n" "sshl v19.16b, v31.16b, v17.16b\n" "sshl v18.16b, v14.16b, v17.16b\n" "ldr d17, [x25, #-0x8]\n" ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" "and v31.16b, v31.16b, v22.16b\n" ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" "and v14.16b, v14.16b, v22.16b\n" "sub x20, x24, #0x8\n" "ldr d16, [x20, #0x0]\n" "subs x21, x21, #0x1\n" "add x25, x25, #0x88\n" "fcvtl v17.4s, v17.4h\n" "add x24, x24, #0x48\n" ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" "fcvtl v16.4s, v16.4h\n" ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" "fmul v23.4s, v16.4s, v17.s[0]\n" "fmul v21.4s, v16.4s, v17.s[1]\n" "fmul v1.4s, v16.4s, v17.s[2]\n" "fmul v20.4s, v16.4s, v17.s[3]\n" ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" "uzp1 v19.2d, v8.2d, v27.2d\n" "uzp2 v18.2d, v8.2d, v27.2d\n" "scvtf v19.4s, v19.4s, #0x4\n" "uzp1 v17.2d, v0.2d, v29.2d\n" "uzp2 v16.2d, v0.2d, v29.2d\n" "scvtf v18.4s, v18.4s, #0x4\n" "fmla v2.4s, v19.4s, v23.4s\n" "scvtf v17.4s, v17.4s, #0x4\n" "scvtf v16.4s, v16.4s, #0x4\n" "fmla v10.4s, v18.4s, v21.4s\n" "fmla v12.4s, v17.4s, v1.4s\n" "fmla v28.4s, v16.4s, v20.4s\n" "bgt 7b\n" "mov x20, %x[res_ptr]\n" "cmp x10, #0x1\n" "str q2, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x10, #0x2\n" "str q10, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x10, #0x3\n" "str q12, [x20, #0x0]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "str q28, [x20, #0x0]\n" "8:" // Row tail: Accumulator store skip "subs x23, x23, #0x4\n" "add %x[res_ptr], %x[res_ptr], #0x10\n" "bne 6b\n" "subs x10, x10, #0x4\n" "add %x[a_ptr], %x[a_ptr], x9\n" "mov %x[res_ptr], x22\n" "bgt 5b\n" "9:" // Row tail: Row loop skip : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) ggml_gemm_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) if (ggml_cpu_get_sve_cnt() == QK8_0) { const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; size_t res_stride = bs * sizeof(float); __asm__ __volatile__( "mov x20, #0x4\n" "mov x13, %x[nr]\n" "mov z28.s, #-0x4\n" "mov x12, #0x88\n" "ptrue p1.b\n" "whilelt p0.s, XZR, x20\n" "cmp x13, #0x10\n" "mul x12, %x[nb], x12\n" "blt 4f\n" "1:" // Row loop "add x11, %x[b_ptr], #0x10\n" "mov x10, %x[nc]\n" "add x9, %x[res_ptr], %x[res_stride], LSL #4\n" "2:" // Column loop "add x28, %x[a_ptr], #0x8\n" "mov z24.b, #0x0\n" "mov z15.b, #0x0\n" "mov x27, %x[nb]\n" "add x26, x28, x12\n" "mov z12.b, #0x0\n" "mov z0.b, #0x0\n" "add x25, x26, x12\n" "mov z13.b, #0x0\n" "mov z1.b, #0x0\n" "add x24, x25, x12\n" "mov z20.b, #0x0\n" "mov z25.b, #0x0\n" "mov z11.b, #0x0\n" "mov z16.b, #0x0\n" "mov z19.b, #0x0\n" "mov z26.b, #0x0\n" "mov z8.b, #0x0\n" "mov z29.b, #0x0\n" "mov z27.b, #0x0\n" "mov z10.b, #0x0\n" "3:" // Block loop "ld1b { z30.b }, p1/Z, [x11]\n" "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n" "mov z18.s, #0x0\n" "mov z7.s, #0x0\n" "ld1rqb { z3.b }, p1/Z, [x28]\n" "ld1rqb { z5.b }, p1/Z, [x28, #16]\n" "mov z9.s, #0x0\n" "mov z22.s, #0x0\n" "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n" "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n" "sub x20, x11, #0x10\n" "sub x23, x28, #0x8\n" "lsl z31.b, z30.b, #0x4\n" "lsl z6.b, z21.b, #0x4\n" "ld1h { z23.s }, p1/Z, [x20]\n" "sub x22, x26, #0x8\n" "and z30.b, z30.b, #0xf0\n" "and z21.b, z21.b, #0xf0\n" "sub x21, x25, #0x8\n" "sub x20, x24, #0x8\n" "lsl z14.b, z4.b, #0x4\n" "lsl z2.b, z17.b, #0x4\n" "subs x27, x27, #0x1\n" "add x11, x11, #0x90\n" ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n" ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n" "ld1rqb { z3.b }, p1/Z, [x28, #32]\n" "and z4.b, z4.b, #0xf0\n" ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" "ld1rqb { z5.b }, p1/Z, [x28, #48]\n" "and z17.b, z17.b, #0xf0\n" "fcvt z23.s, p1/m, z23.h\n" ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n" ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n" "ld1rqb { z3.b }, p1/Z, [x28, #64]\n" ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" "ld1rqb { z5.b }, p1/Z, [x28, #80]\n" "fscale z23.s, p1/m, z23.s, z28.s\n" ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n" ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n" "ld1rqb { z3.b }, p1/Z, [x28, #96]\n" ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" "ld1rqb { z5.b }, p1/Z, [x28, #112]\n" "add x28, x28, #0x88\n" ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n" ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n" "ld1h { z3.s }, p0/Z, [x23]\n" ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" "fcvt z3.s, p1/m, z3.h\n" "uzp1 z5.d, z18.d, z7.d\n" "uzp2 z18.d, z18.d, z7.d\n" "mov z3.q, z3.q[0]\n" "uzp1 z7.d, z9.d, z22.d\n" "uzp2 z22.d, z9.d, z22.d\n" "fmul z9.s, z23.s, z3.s[0]\n" "scvtf z5.s, p1/m, z5.s\n" "scvtf z18.s, p1/m, z18.s\n" "scvtf z7.s, p1/m, z7.s\n" "scvtf z22.s, p1/m, z22.s\n" "fmla z24.s, p1/M, z5.s, z9.s\n" "ld1rqb { z5.b }, p1/Z, [x26]\n" "fmul z9.s, z23.s, z3.s[1]\n" "fmla z15.s, p1/M, z18.s, z9.s\n" "ld1rqb { z18.b }, p1/Z, [x26, #16]\n" "fmul z9.s, z23.s, z3.s[2]\n" "fmul z3.s, z23.s, z3.s[3]\n" "fmla z12.s, p1/M, z7.s, z9.s\n" "mov z9.s, #0x0\n" "ld1h { z7.s }, p0/Z, [x22]\n" ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" "fmla z0.s, p1/M, z22.s, z3.s\n" "mov z22.s, #0x0\n" "ld1h { z3.s }, p0/Z, [x21]\n" ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" "ld1rqb { z5.b }, p1/Z, [x26, #32]\n" "fcvt z7.s, p1/m, z7.h\n" "fcvt z3.s, p1/m, z3.h\n" ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" "ld1rqb { z5.b }, p1/Z, [x26, #64]\n" "mov z7.q, z7.q[0]\n" "mov z3.q, z3.q[0]\n" ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" "ld1rqb { z5.b }, p1/Z, [x26, #96]\n" ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" "uzp1 z5.d, z9.d, z22.d\n" "scvtf z5.s, p1/m, z5.s\n" "uzp2 z22.d, z9.d, z22.d\n" "fmul z9.s, z23.s, z7.s[0]\n" "scvtf z22.s, p1/m, z22.s\n" "fmla z13.s, p1/M, z5.s, z9.s\n" "ld1rqb { z9.b }, p1/Z, [x25]\n" "fmul z5.s, z23.s, z7.s[1]\n" "fmla z1.s, p1/M, z22.s, z5.s\n" "mov z5.s, #0x0\n" "mov z22.s, #0x0\n" ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n" ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n" "ld1rqb { z18.b }, p1/Z, [x26, #48]\n" ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n" ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n" "ld1rqb { z18.b }, p1/Z, [x26, #80]\n" ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n" ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n" "ld1rqb { z18.b }, p1/Z, [x26, #112]\n" "add x26, x26, #0x88\n" ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n" ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n" "uzp1 z18.d, z5.d, z22.d\n" "scvtf z18.s, p1/m, z18.s\n" "uzp2 z22.d, z5.d, z22.d\n" "fmul z5.s, z23.s, z7.s[2]\n" "fmul z7.s, z23.s, z7.s[3]\n" "scvtf z22.s, p1/m, z22.s\n" "fmla z20.s, p1/M, z18.s, z5.s\n" "ld1rqb { z18.b }, p1/Z, [x25, #16]\n" "ld1h { z5.s }, p0/Z, [x20]\n" "fcvt z5.s, p1/m, z5.h\n" "fmla z25.s, p1/M, z22.s, z7.s\n" "mov z22.s, #0x0\n" "mov z7.s, #0x0\n" ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n" ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" "ld1rqb { z9.b }, p1/Z, [x25, #32]\n" "mov z5.q, z5.q[0]\n" ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n" ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" "ld1rqb { z9.b }, p1/Z, [x25, #64]\n" ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n" ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n" "ld1rqb { z9.b }, p1/Z, [x25, #96]\n" ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n" ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n" "uzp1 z9.d, z22.d, z7.d\n" "scvtf z9.s, p1/m, z9.s\n" "uzp2 z22.d, z22.d, z7.d\n" "fmul z7.s, z23.s, z3.s[0]\n" "scvtf z22.s, p1/m, z22.s\n" "fmla z11.s, p1/M, z9.s, z7.s\n" "ld1rqb { z9.b }, p1/Z, [x24]\n" "fmul z7.s, z23.s, z3.s[1]\n" "fmla z16.s, p1/M, z22.s, z7.s\n" "mov z22.s, #0x0\n" "mov z7.s, #0x0\n" ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n" ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n" "ld1rqb { z18.b }, p1/Z, [x25, #48]\n" ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n" ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n" "ld1rqb { z18.b }, p1/Z, [x25, #80]\n" ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n" ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" "ld1rqb { z18.b }, p1/Z, [x25, #112]\n" "add x25, x25, #0x88\n" ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n" ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n" "uzp1 z18.d, z22.d, z7.d\n" "scvtf z18.s, p1/m, z18.s\n" "uzp2 z7.d, z22.d, z7.d\n" "fmul z22.s, z23.s, z3.s[2]\n" "fmul z3.s, z23.s, z3.s[3]\n" "scvtf z7.s, p1/m, z7.s\n" "fmla z19.s, p1/M, z18.s, z22.s\n" "ld1rqb { z18.b }, p1/Z, [x24, #16]\n" "fmul z22.s, z23.s, z5.s[0]\n" "fmla z26.s, p1/M, z7.s, z3.s\n" "mov z3.s, #0x0\n" "mov z7.s, #0x0\n" ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n" ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" "ld1rqb { z9.b }, p1/Z, [x24, #32]\n" ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n" ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" "mov z9.s, #0x0\n" ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n" "mov z31.s, #0x0\n" ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n" "ld1rqb { z6.b }, p1/Z, [x24, #48]\n" "ld1rqb { z18.b }, p1/Z, [x24, #64]\n" ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n" "fmul z14.s, z23.s, z5.s[1]\n" ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n" "ld1rqb { z6.b }, p1/Z, [x24, #80]\n" "fmul z2.s, z23.s, z5.s[2]\n" "fmul z23.s, z23.s, z5.s[3]\n" ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n" ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" "ld1rqb { z5.b }, p1/Z, [x24, #96]\n" ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n" ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n" "ld1rqb { z18.b }, p1/Z, [x24, #112]\n" "add x24, x24, #0x88\n" ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n" ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n" ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n" ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n" "uzp1 z18.d, z3.d, z7.d\n" "uzp2 z5.d, z3.d, z7.d\n" "scvtf z18.s, p1/m, z18.s\n" "uzp1 z6.d, z9.d, z31.d\n" "uzp2 z9.d, z9.d, z31.d\n" "scvtf z5.s, p1/m, z5.s\n" "fmla z8.s, p1/M, z18.s, z22.s\n" "scvtf z6.s, p1/m, z6.s\n" "scvtf z9.s, p1/m, z9.s\n" "fmla z29.s, p1/M, z5.s, z14.s\n" "fmla z27.s, p1/M, z6.s, z2.s\n" "fmla z10.s, p1/M, z9.s, z23.s\n" "bgt 3b\n" "mov x20, %x[res_ptr]\n" "subs x10, x10, #0x8\n" "add %x[res_ptr], %x[res_ptr], #0x20\n" "st1w { z24.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z15.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z12.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z0.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z13.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z1.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z20.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z25.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z11.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z16.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z19.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z26.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z8.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z29.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z27.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "st1w { z10.s }, p1, [x20]\n" "bne 2b\n" "mov x20, #0x4\n" "sub x13, x13, #0x10\n" "cmp x13, #0x10\n" "mov %x[res_ptr], x9\n" "madd %x[a_ptr], x20, x12, %x[a_ptr]\n" "bge 1b\n" "4:" // Row loop skip "cbz x13, 9f\n" "5:" // Row tail: Row loop "add x25, %x[b_ptr], #0x10\n" "mov x24, %x[nc]\n" "add x23, %x[res_ptr], %x[res_stride], LSL #2\n" "6:" // Row tail: Column loop "mov z24.b, #0x0\n" "mov z15.b, #0x0\n" "add x28, %x[a_ptr], #0x8\n" "mov x22, %x[nb]\n" "mov z12.b, #0x0\n" "mov z0.b, #0x0\n" "7:" // Row tail: Block loop "ld1b { z3.b }, p1/Z, [x25]\n" "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n" "mov z2.s, #0x0\n" "mov z25.s, #0x0\n" "ld1rqb { z26.b }, p1/Z, [x28]\n" "ld1rqb { z21.b }, p1/Z, [x28, #16]\n" "mov z27.s, #0x0\n" "mov z19.s, #0x0\n" "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n" "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n" "sub x21, x25, #0x10\n" "sub x20, x28, #0x8\n" "lsl z20.b, z3.b, #0x4\n" "lsl z4.b, z6.b, #0x4\n" "ld1rqb { z10.b }, p1/Z, [x28, #32]\n" "ld1rqb { z23.b }, p1/Z, [x28, #48]\n" "and z3.b, z3.b, #0xf0\n" "and z6.b, z6.b, #0xf0\n" "ld1rqb { z11.b }, p1/Z, [x28, #64]\n" "ld1rqb { z7.b }, p1/Z, [x28, #80]\n" "lsl z8.b, z29.b, #0x4\n" "lsl z14.b, z16.b, #0x4\n" "ld1rqb { z18.b }, p1/Z, [x28, #96]\n" "ld1rqb { z30.b }, p1/Z, [x28, #112]\n" ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n" ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n" "and z29.b, z29.b, #0xf0\n" "ld1h { z17.s }, p1/Z, [x21]\n" ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n" ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n" "and z16.b, z16.b, #0xf0\n" "ld1h { z4.s }, p0/Z, [x20]\n" "subs x22, x22, #0x1\n" "add x28, x28, #0x88\n" "fcvt z17.s, p1/m, z17.h\n" "add x25, x25, #0x90\n" ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n" ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n" "fcvt z4.s, p1/m, z4.h\n" ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n" ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n" "fscale z17.s, p1/m, z17.s, z28.s\n" "mov z4.q, z4.q[0]\n" ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n" ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n" "fmul z23.s, z17.s, z4.s[0]\n" "fmul z9.s, z17.s, z4.s[1]\n" "fmul z21.s, z17.s, z4.s[2]\n" "fmul z4.s, z17.s, z4.s[3]\n" ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n" ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n" ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n" ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n" ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n" ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n" "uzp1 z31.d, z2.d, z25.d\n" "uzp2 z13.d, z2.d, z25.d\n" "scvtf z31.s, p1/m, z31.s\n" "uzp1 z17.d, z27.d, z19.d\n" "uzp2 z18.d, z27.d, z19.d\n" "scvtf z13.s, p1/m, z13.s\n" "fmla z24.s, p1/M, z31.s, z23.s\n" "scvtf z17.s, p1/m, z17.s\n" "scvtf z18.s, p1/m, z18.s\n" "fmla z15.s, p1/M, z13.s, z9.s\n" "fmla z12.s, p1/M, z17.s, z21.s\n" "fmla z0.s, p1/M, z18.s, z4.s\n" "bgt 7b\n" "mov x20, %x[res_ptr]\n" "cmp x13, #0x1\n" "st1w { z24.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x13, #0x2\n" "st1w { z15.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "cmp x13, #0x3\n" "st1w { z12.s }, p1, [x20]\n" "add x20, x20, %x[res_stride]\n" "ble 8f\n" "st1w { z0.s }, p1, [x20]\n" "8:" // Row tail: Accumulator store skip "subs x24, x24, #0x8\n" "add %x[res_ptr], %x[res_ptr], #0x20\n" "bne 6b\n" "subs x13, x13, #0x4\n" "add %x[a_ptr], %x[a_ptr], x12\n" "mov %x[res_ptr], x23\n" "bgt 5b\n" "9:" // Row tail: Row loop skip : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); return; } #endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); float32x4_t sumf[4]; for (int m = 0; m < 4; m++) { sumf[m] = vdupq_n_f32(0); } for (int l = 0; l < nb; l++) { float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); int32x4_t sumi_0 = vdupq_n_s32(0); int32x4_t sumi_1 = vdupq_n_s32(0); int32x4_t sumi_2 = vdupq_n_s32(0); int32x4_t sumi_3 = vdupq_n_s32(0); for (int k = 0; k < 4; k++) { int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); } sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); } for (int m = 0; m < 4; m++) { vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); } } } return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) ggml_gemm_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { constexpr int qk = QK_K; const int nb = n / qk; constexpr int ncols_interleaved = 8; constexpr int blocklen = 4; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) constexpr int q8_k_blocklen = 4; constexpr int acc_size = 2 * 4; // 2 row pairs × 4 col pairs const uint8x16_t m4b = vdupq_n_u8(0x0f); // 8 accumulators: 2 row pairs × 4 col pairs float32x4_t acc_f32[acc_size]; for (int y = 0; y < nr / q8_k_blocklen; y++) { const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int i = 0; i < acc_size; i++) { acc_f32[i] = vdupq_n_f32(0); } for (int b = 0; b < nb; b++) { // d4 0 1 2 3, 4 5 6 7 float32x4_t q4_d_0123 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d)); float32x4_t q4_d_4567 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4)); // d8 0 1 2 3 float32x4_t q8_d_0123 = vld1q_f32(q8_ptr[b].d); // mins float32x4_t q4_dmin_0123 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin)); float32x4_t q4_dmin_4567 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4)); // Precomputation of scales and mins float32x4_t sbd_scale_0123[q8_k_blocklen]; float32x4_t sbd_scale_4567[q8_k_blocklen]; float32x4_t sbd_min_0123[q8_k_blocklen]; float32x4_t sbd_min_4567[q8_k_blocklen]; sbd_scale_0123[0] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 0); sbd_scale_4567[0] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 0); sbd_min_0123[0] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 0); sbd_min_4567[0] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 0); sbd_scale_0123[1] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 1); sbd_scale_4567[1] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 1); sbd_min_0123[1] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 1); sbd_min_4567[1] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 1); sbd_scale_0123[2] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 2); sbd_scale_4567[2] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 2); sbd_min_0123[2] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 2); sbd_min_4567[2] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 2); sbd_scale_0123[3] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 3); sbd_scale_4567[3] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 3); sbd_min_0123[3] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 3); sbd_min_4567[3] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 3); // Precomputation of bsums, each vpaddq calcs all the bsums for each row const int16x8_t bsums[q8_k_blocklen] = { vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 0), vld1q_s16(q8_ptr[b].bsums + 16 * 0 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 1), vld1q_s16(q8_ptr[b].bsums + 16 * 1 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 2), vld1q_s16(q8_ptr[b].bsums + 16 * 2 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 3), vld1q_s16(q8_ptr[b].bsums + 16 * 3 + 8)), }; int16_t bsums_arr[QK_K / 64][8]; for (int q8_row = 0; q8_row < 4; q8_row++) { vst1q_s16(bsums_arr[q8_row], bsums[q8_row]); } // interleaved bias_acc: [0]->r0 0123, [1]->r1 0123, .., [4]->r0 4567, [5]->r1 4567 .. int32x4_t bias_acc[acc_size]; for (int i = 0; i < acc_size; i++) { bias_acc[i] = vdupq_n_s32(0); } for (int sb = 0; sb < QK_K / 64; sb++) { // Int accumulators for qs vecdot (4 row x 2 col quartets) int32x4_t acc_lo[acc_size]; int32x4_t acc_hi[acc_size]; for (int i = 0; i < acc_size; i++) { acc_lo[i] = vdupq_n_s32(0); acc_hi[i] = vdupq_n_s32(0); } // Need scales for the low and high nibbles // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total int16x8_t q4sb_scales[2]; int16x8_t q4sb_mins[2]; for (int i = 0; i < 2; i++) { int8_t aux_q4sb[8]; const int offset = sb * 24 + i * 12; decode_q4_Kx8_scales_mins(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb); q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb)); } constexpr int reads_per_sb = 8; // 8 * 16 bytes each => 32 qs * 4 rows for (int k = 0; k < reads_per_sb; k++) { const int8x16_t q8_blk0 = vld1q_s8(q8_ptr[b].qs + sb * 256 + 16 * k); const int8x16_t q8_blk1 = vld1q_s8(q8_ptr[b].qs + sb * 256 + 16 * k + 128); // 0..3 & 32..35 const uint8x16_t q4_0123 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 32 * k); const uint8x16_t q4_4567 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 32 * k + 16); const int8x16_t q4_0123_lo = vreinterpretq_s8_u8(vandq_u8(q4_0123, m4b)); const int8x16_t q4_0123_hi = vreinterpretq_s8_u8(vshrq_n_u8(q4_0123, 4)); acc_lo[0] = vdotq_laneq_s32(acc_lo[0], q4_0123_lo, q8_blk0, 0); // 0..3 r0 c0123 acc_lo[1] = vdotq_laneq_s32(acc_lo[1], q4_0123_lo, q8_blk0, 1); // 0..3 r1 c0123 acc_lo[2] = vdotq_laneq_s32(acc_lo[2], q4_0123_lo, q8_blk0, 2); // 0..3 r2 c0123 acc_lo[3] = vdotq_laneq_s32(acc_lo[3], q4_0123_lo, q8_blk0, 3); // 0..3 r3 c0123 acc_hi[0] = vdotq_laneq_s32(acc_hi[0], q4_0123_hi, q8_blk1, 0); // 32..35 r0 c0123 acc_hi[1] = vdotq_laneq_s32(acc_hi[1], q4_0123_hi, q8_blk1, 1); // 32..35 r1 c0123 acc_hi[2] = vdotq_laneq_s32(acc_hi[2], q4_0123_hi, q8_blk1, 2); // 32..35 r2 c0123 acc_hi[3] = vdotq_laneq_s32(acc_hi[3], q4_0123_hi, q8_blk1, 3); // 32..35 r3 c0123 const int8x16_t q4_4567_lo = vreinterpretq_s8_u8(vandq_u8(q4_4567, m4b)); const int8x16_t q4_4567_hi = vreinterpretq_s8_u8(vshrq_n_u8(q4_4567, 4)); acc_lo[4] = vdotq_laneq_s32(acc_lo[4], q4_4567_lo, q8_blk0, 0); // 0..3 r0 c4567 acc_lo[5] = vdotq_laneq_s32(acc_lo[5], q4_4567_lo, q8_blk0, 1); // 0..3 r1 c4567 acc_lo[6] = vdotq_laneq_s32(acc_lo[6], q4_4567_lo, q8_blk0, 2); // 0..3 r2 c4567 acc_lo[7] = vdotq_laneq_s32(acc_lo[7], q4_4567_lo, q8_blk0, 3); // 0..3 r3 c4567 acc_hi[4] = vdotq_laneq_s32(acc_hi[4], q4_4567_hi, q8_blk1, 0); // 32..35 r0 c4567 acc_hi[5] = vdotq_laneq_s32(acc_hi[5], q4_4567_hi, q8_blk1, 1); // 32..35 r1 c4567 acc_hi[6] = vdotq_laneq_s32(acc_hi[6], q4_4567_hi, q8_blk1, 2); // 32..35 r2 c4567 acc_hi[7] = vdotq_laneq_s32(acc_hi[7], q4_4567_hi, q8_blk1, 3); // 32..35 r3 c4567 } // Scale and bias application // acc is stored interleaved to match output layout const int16x4_t sc_0123_lo = vget_low_s16(q4sb_scales[0]); const int16x4_t sc_4567_lo = vget_high_s16(q4sb_scales[0]); const int16x4_t sc_0123_hi = vget_low_s16(q4sb_scales[1]); const int16x4_t sc_4567_hi = vget_high_s16(q4sb_scales[1]); for (int row = 0; row < q8_k_blocklen; row++) { // Bias correction // row c0123 blk0 and blk1 const float32x4_t sumf_0123 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_0123_lo), acc_lo[row]), vmulq_s32(vmovl_s16(sc_0123_hi), acc_hi[row]))); acc_f32[2 * row] = vfmaq_f32(acc_f32[2 * row], sbd_scale_0123[row], sumf_0123); // row c4567 blk0 and blk1 const float32x4_t sumf_4567 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_4567_lo), acc_lo[row + 4]), vmulq_s32(vmovl_s16(sc_4567_hi), acc_hi[row + 4]))); acc_f32[2 * row + 1] = vfmaq_f32(acc_f32[2 * row + 1], sbd_scale_4567[row], sumf_4567); // Bias const int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[sb][row * 2]); const int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[sb][row * 2 + 1]); // row c0123 blk0 and blk1 bias_acc[2 * row] = vmlal_s16(bias_acc[2 * row], bsums_vec_lo, vget_low_s16(q4sb_mins[0])); bias_acc[2 * row] = vmlal_s16(bias_acc[2 * row], bsums_vec_hi, vget_low_s16(q4sb_mins[1])); // row c4567 blk0 and blk1 bias_acc[2 * row + 1] = vmlal_s16(bias_acc[2 * row + 1], bsums_vec_lo, vget_high_s16(q4sb_mins[0])); bias_acc[2 * row + 1] = vmlal_s16(bias_acc[2 * row + 1], bsums_vec_hi, vget_high_s16(q4sb_mins[1])); } } // for sb for (int row = 0; row < q8_k_blocklen; row++) { acc_f32[2 * row] = vmlsq_f32(acc_f32[2 * row], vcvtq_f32_s32(bias_acc[2 * row]), sbd_min_0123[row]); acc_f32[2 * row + 1] = vmlsq_f32(acc_f32[2 * row + 1], vcvtq_f32_s32(bias_acc[2 * row + 1]), sbd_min_4567[row]); } } // for b for (int i = 0; i < q8_k_blocklen; i++) { int row = y * q8_k_blocklen + i; for (int j = 0; j < 2; j++) { int col = x * ncols_interleaved + j * 4; int offset = row * bs + col; vst1q_f32(s + offset, acc_f32[2 * i + j]); } } } // for x } // for y return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemm_q4_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { constexpr int qk = QK_K; const int nb = n / qk; constexpr int ncols_interleaved = 8; constexpr int blocklen = 8; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) constexpr int q8_k_blocklen = 4; const uint8x16_t m4b = vdupq_n_u8(0x0f); // 8 accumulators: 2 row pairs × 4 col pairs float32x4_t acc_f32[blocklen]; for (int y = 0; y < nr / q8_k_blocklen; y++) { const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int i = 0; i < blocklen; i++) { acc_f32[i] = vdupq_n_f32(0); } for (int b = 0; b < nb; b++) { // bsums pairs belongs to the same q8_k subblock const int16x8_t bsums[4]{ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 0), vld1q_s16(q8_ptr[b].bsums + 16 * 0 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 1), vld1q_s16(q8_ptr[b].bsums + 16 * 1 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 2), vld1q_s16(q8_ptr[b].bsums + 16 * 2 + 8)), vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 3), vld1q_s16(q8_ptr[b].bsums + 16 * 3 + 8)), }; int16_t bsums_arr[4][8]; for (int q8_row = 0; q8_row < 4; q8_row++) { vst1q_s16(bsums_arr[q8_row], bsums[q8_row]); } int32x4_t sb_acc[4]; // Aux accumulators to store subblock (partial) results int32x4_t acc[8]; // rows 01 stored in [0][1][2][3] rows 23 stored in [4][5][6][7] int32x4_t bias_acc[8]; // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567, [2]->r1 0123 ... for (int i = 0; i < 8; i++) { acc[i] = vdupq_n_s32(0); bias_acc[i] = vdupq_n_s32(0); } for (int sb = 0; sb < QK_K / 64; sb++) { // Need scales for the low and high nibbles // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total int8_t q4sb_scales[2][8]; int16x8_t q4sb_mins[2]; // int16 as its needed for bias_acc later for (int i = 0; i < 2; i++) { const int offset = sb * 24 + i * 12; decode_q4_Kx8_scales_mins(&q4_ptr[b].scales[offset], &q4sb_mins[i], q4sb_scales[i]); } // q8_ptr[b].qs has interleaved Q8 rows (01, 23) const int8_t * q8_base = q8_ptr[b].qs + sb * 256; int8x16_t q8_qs_01[8]; int8x16_t q8_qs_23[8]; // Load 32-byte per row pair, 1 subblock each time for (int i = 0; i < 8; i++) { const int offset = i * 32; // 16 for row 01, 16 for row 23 q8_qs_01[i] = vld1q_s8(q8_base + offset); q8_qs_23[i] = vld1q_s8(q8_base + offset + 16); } const int8x16_t q8s[2][8] = { { q8_qs_01[0], q8_qs_01[1], q8_qs_01[2], q8_qs_01[3], q8_qs_01[4], q8_qs_01[5], q8_qs_01[6], q8_qs_01[7] }, { q8_qs_23[0], q8_qs_23[1], q8_qs_23[2], q8_qs_23[3], q8_qs_23[4], q8_qs_23[5], q8_qs_23[6], q8_qs_23[7] }, }; // Q4s columns iterated in pairs (01, 23, 45, 67) for (int cp = 0; cp < ncols_interleaved / 2; cp++) { for (int i = 0; i < 4; i++) { sb_acc[i] = vdupq_n_s32(0); } uint8x16_t q4_qs_cp_0 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 0); // 0 .. 7 & 32..39 uint8x16_t q4_qs_cp_1 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 64); // 8 ..15 & 40..47 uint8x16_t q4_qs_cp_2 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 128); // 16..23 & 48..55 uint8x16_t q4_qs_cp_3 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 192); // 24..31 & 56..63 const int8x16_t q4_nibbles[2][4] = { { vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_0, m4b)), vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_1, m4b)), vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_2, m4b)), vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_3, m4b)), }, { vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_0, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_1, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_2, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_3, 4)), } }; // Calculates the Qs muladd of every row pair (rp) rows 01 and 23 of q8 // for each of the internal 32 qs subblock (blk) for (int rp = 0; rp < 2; rp++) { for (int blk = 0; blk < 2; blk++) { const int8x16_t * q8 = &q8s[rp][4 * blk]; const int8x16_t * q4 = q4_nibbles[blk]; int32x4_t acc = sb_acc[2 * rp + blk]; // mul add for each qs in the same subblock for (int qs_offset = 0; qs_offset < 4; qs_offset++) { acc = vmmlaq_s32(acc, q4[qs_offset], q8[qs_offset]); } sb_acc[2 * rp + blk] = acc; } } // Scales[i] corresponds to column i const int scale_offset = cp * 2; for (int blk = 0; blk < 2; blk++) { const int32x4_t block_scale = { (int32_t) q4sb_scales[blk][scale_offset], (int32_t) q4sb_scales[blk][scale_offset], (int32_t) q4sb_scales[blk][scale_offset + 1], (int32_t) q4sb_scales[blk][scale_offset + 1], }; acc[cp] = vmlaq_s32(acc[cp], sb_acc[blk], block_scale); acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc[blk + 2], block_scale); } } // Multiply Acc bsum + mins for (int q8_row = 0; q8_row < 4; q8_row++) { // Each pair of subblocks share the same bsums // Load scalar bsum → broadcast to a vector (vdupq_n_s16(s)). int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[sb][q8_row * 2]); int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[sb][q8_row * 2 + 1]); bias_acc[2 * q8_row] = vmlal_s16(bias_acc[2 * q8_row], bsums_vec_lo, vget_low_s16(q4sb_mins[0])); bias_acc[2 * q8_row] = vmlal_s16(bias_acc[2 * q8_row], bsums_vec_hi, vget_low_s16(q4sb_mins[1])); bias_acc[2 * q8_row + 1] = vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_lo, vget_high_s16(q4sb_mins[0])); bias_acc[2 * q8_row + 1] = vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_hi, vget_high_s16(q4sb_mins[1])); } } // for sb // Reorder of i8mm output with bias and output layout for (int i = 0; i < 8; i++) { int32x2x2_t aux = vzip_s32(vget_low_s32(acc[i]), vget_high_s32(acc[i])); acc[i] = vcombine_s32(aux.val[0], aux.val[1]); } int32x4_t reorder_acc[8] = { vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1])), vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3])), vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1])), vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3])), vcombine_s32(vget_low_s32(acc[4]), vget_low_s32(acc[5])), vcombine_s32(vget_low_s32(acc[6]), vget_low_s32(acc[7])), vcombine_s32(vget_high_s32(acc[4]), vget_high_s32(acc[5])), vcombine_s32(vget_high_s32(acc[6]), vget_high_s32(acc[7])), }; for (int i = 0; i < q8_k_blocklen; i++) { for (int j = 0; j < 2; j++) { float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d[i]); float32x4_t q4_dmin = vcvt_f32_f16(vld1_f16((const __fp16 *) (q4_ptr[b].dmin + j * 4))); const float32x4_t dmins = vmulq_f32(q4_dmin, q8_d); float32x4_t q4_d = vcvt_f32_f16(vld1_f16((const __fp16 *) (q4_ptr[b].d + j * 4))); const float32x4_t scale = vmulq_f32(q4_d, q8_d); acc_f32[2 * i + j] = vmlsq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(bias_acc[2 * i + j]), dmins); acc_f32[2 * i + j] = vmlaq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(reorder_acc[2 * i + j]), scale); } } } // for b // With the previous reorder, the tile is already in the correct memory layout. for (int i = 0; i < q8_k_blocklen; i++) { int row = y * q8_k_blocklen + i; for (int j = 0; j < 2; j++) { int col = x * ncols_interleaved + j * 4; int offset = row * bs + col; vst1q_f32(s + offset, acc_f32[2 * i + j]); } } } // for x } // for y return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) ggml_gemm_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q8_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb); float32x4_t sumf[4]; for (int m = 0; m < 4; m++) { sumf[m] = vdupq_n_f32(0); } for (int l = 0; l < nb; l++) { float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *) a_ptr[l].d)); float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *) b_ptr[l].d)); int32x4_t sumi_0 = vdupq_n_s32(0); int32x4_t sumi_1 = vdupq_n_s32(0); int32x4_t sumi_2 = vdupq_n_s32(0); int32x4_t sumi_3 = vdupq_n_s32(0); for (int k_group = 0; k_group < 8; k_group += 4) { int8x16x4_t a = vld1q_s8_x4(a_ptr[l].qs + 16 * k_group); int8x16x4_t b = vld1q_s8_x4(b_ptr[l].qs + 16 * k_group); for (int k = 0; k < 4; k++) { sumi_0 = vdotq_laneq_s32(sumi_0, b.val[k], a.val[k], 0); sumi_1 = vdotq_laneq_s32(sumi_1, b.val[k], a.val[k], 1); sumi_2 = vdotq_laneq_s32(sumi_2, b.val[k], a.val[k], 2); sumi_3 = vdotq_laneq_s32(sumi_3, b.val[k], a.val[k], 3); } } sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); } for (int m = 0; m < 4; m++) { vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); } } } return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) ggml_gemm_q8_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q8_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) const block_q8_0x4 * b_ptr_base = (const block_q8_0x4 *) vx; for (int y = 0; y < nr; y += 4) { const block_q8_0x4 * a_ptr_base = (const block_q8_0x4 *) vy + (y / 4) * nb; for (int x = 0; x < nc; x += ncols_interleaved) { const block_q8_0x4 * b_ptr = b_ptr_base + (x / 4) * nb; const block_q8_0x4 * a_ptr = a_ptr_base; float32x4_t acc_f32[4]; for (int i = 0; i < 4; i++) { acc_f32[i] = vdupq_n_f32(0); } for (int b = 0; b < nb; b++) { int32x4_t acc[4]; for (int i = 0; i < 4; i++) { acc[i] = vdupq_n_s32(0); } // Process 4 chunks of 8 positions each for (int chunk = 0; chunk < 4; chunk++) { int8x16_t a01 = vld1q_s8(a_ptr->qs + chunk * 32); int8x16_t a23 = vld1q_s8(a_ptr->qs + chunk * 32 + 16); int8x16_t b01 = vld1q_s8(b_ptr->qs + chunk * 32); int8x16_t b23 = vld1q_s8(b_ptr->qs + chunk * 32 + 16); acc[0] = vmmlaq_s32(acc[0], a01, b01); acc[1] = vmmlaq_s32(acc[1], a01, b23); acc[2] = vmmlaq_s32(acc[2], a23, b01); acc[3] = vmmlaq_s32(acc[3], a23, b23); } // Reorder outputs from 2×2 tiles to row-major // acc[0] = [r0c0, r0c1, r1c0, r1c1] // acc[1] = [r0c2, r0c3, r1c2, r1c3] // acc[2] = [r2c0, r2c1, r3c0, r3c1] // acc[3] = [r2c2, r2c3, r3c2, r3c3] int32x4_t row0 = vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1])); int32x4_t row1 = vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1])); int32x4_t row2 = vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3])); int32x4_t row3 = vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3])); // Scales float32x4_t a_d = vcvt_f32_f16(vld1_f16((const __fp16 *) a_ptr->d)); float32x4_t b_d = vcvt_f32_f16(vld1_f16((const __fp16 *) b_ptr->d)); acc_f32[0] = vfmaq_f32(acc_f32[0], vcvtq_f32_s32(row0), vmulq_laneq_f32(b_d, a_d, 0)); acc_f32[1] = vfmaq_f32(acc_f32[1], vcvtq_f32_s32(row1), vmulq_laneq_f32(b_d, a_d, 1)); acc_f32[2] = vfmaq_f32(acc_f32[2], vcvtq_f32_s32(row2), vmulq_laneq_f32(b_d, a_d, 2)); acc_f32[3] = vfmaq_f32(acc_f32[3], vcvtq_f32_s32(row3), vmulq_laneq_f32(b_d, a_d, 3)); a_ptr++; b_ptr++; } for (int row = 0; row < 4; row++) { vst1q_f32(s + (y + row) * bs + x, acc_f32[row]); } } } return; #endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) ggml_gemm_q8_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } ggml-org-ggml-3678254/src/ggml-cpu/arch/loongarch/000077500000000000000000000000001512524704700215575ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/loongarch/quants.c000066400000000000000000002522001512524704700232370ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__loongarch_sx) static __m128i lsx_packs_w(__m128i a, __m128i b) { __m128i tmp, tmp1; tmp = __lsx_vsat_w(a, 15); tmp1 = __lsx_vsat_w(b, 15); return __lsx_vpickev_h(tmp1, tmp); } static __m128i lsx_packs_h(__m128i a, __m128i b) { __m128i tmp, tmp1; tmp = __lsx_vsat_h(a, 7); tmp1 = __lsx_vsat_h(b, 7); return __lsx_vpickev_b(tmp1, tmp); } static __m128i lsx_packus_h(__m128i a, __m128i b) { __m128i tmp, tmp1; tmp = __lsx_vsat_hu(a, 7); tmp1 = __lsx_vsat_hu(b, 7); return __lsx_vpickev_b(tmp1, tmp); } static __m128i lsx_maddubs_h(__m128i a, __m128i b) { __m128i tmp1, tmp2; tmp1 = __lsx_vmulwev_h_b(a, b); tmp2 = __lsx_vmulwod_h_b(a, b); return __lsx_vsadd_h(tmp1, tmp2); } static __m128i lsx_madd_h(__m128i a, __m128i b) { __m128i tmp1, tmp2; tmp1 = __lsx_vmulwev_w_h(a, b); tmp2 = __lsx_vmulwod_w_h(a, b); return __lsx_vadd_w(tmp1, tmp2); } static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { v4i32 __ret = {d, c, b, a}; return (__m128i)__ret; } static __m128i lsx_shuffle_b(__m128i a, __m128i b) { __m128i mask_f, zero, tmp0, tmp2, mask; int f = 0x8f; mask_f = __lsx_vreplgr2vr_b(f); zero = __lsx_vldi(0); tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones return __lsx_vshuf_b(a, zero, tmp2); } static __m128i lsx_hadd_h(__m128i a, __m128i b) { __m128i tmp1 = __lsx_vpickev_h(b, a); __m128i tmp2 = __lsx_vpickod_h(b, a); return __lsx_vadd_h(tmp1, tmp2); } static __m128i lsx_hadd_w(__m128i a, __m128i b) { __m128i tmp1 = __lsx_vpickev_w(b, a); __m128i tmp2 = __lsx_vpickod_w(b, a); return __lsx_vadd_w(tmp1, tmp2); } static __m128 lsx_hadd_s(__m128 a, __m128 b) { __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); return __lsx_vfadd_s(tmp1, tmp2); } static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { __m128 res_0 =lsx_hadd_s(a, b); __m128 res_1 =lsx_hadd_s(c, d); __m128 res =lsx_hadd_s(res_0, res_1); res =lsx_hadd_s(res, res); res =lsx_hadd_s(res, res); return ((v4f32)res)[0]; } // multiply int8_t, add results pairwise twice static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { // Get absolute values of x vectors const __m128i ax = __lsx_vsigncov_b(x, x); // Sign the values of the y vectors const __m128i sy = __lsx_vsigncov_b(x, y); // Perform multiplication and create 16-bit values const __m128i dot = lsx_maddubs_h(ax, sy); const __m128i ones = __lsx_vreplgr2vr_h(1); return lsx_madd_h(ones, dot); } #endif #if defined(__loongarch_asx) #ifdef __clang__ #define VREGS_PREFIX "$vr" #define XREGS_PREFIX "$xr" #else // GCC #define VREGS_PREFIX "$f" #define XREGS_PREFIX "$f" #endif #define __ALL_REGS "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31" // Convert __m128i to __m256i static inline __m256i ____m256i(__m128i in) { __m256i out = __lasx_xvldi(0); __asm__ volatile ( ".irp i," __ALL_REGS "\n\t" " .ifc %[out], " XREGS_PREFIX"\\i \n\t" " .irp j," __ALL_REGS "\n\t" " .ifc %[in], " VREGS_PREFIX "\\j \n\t" " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" " .endif \n\t" " .endr \n\t" " .endif \n\t" ".endr \n\t" : [out] "+f" (out) : [in] "f" (in) ); return out; } // Convert two __m128i to __m256i static inline __m256i lasx_set_q(__m128i inhi, __m128i inlo) { __m256i out; __asm__ volatile ( ".irp i," __ALL_REGS "\n\t" " .ifc %[hi], " VREGS_PREFIX "\\i \n\t" " .irp j," __ALL_REGS "\n\t" " .ifc %[lo], " VREGS_PREFIX "\\j \n\t" " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" " .endif \n\t" " .endr \n\t" " .endif \n\t" ".endr \n\t" ".ifnc %[out], %[hi] \n\t" ".irp i," __ALL_REGS "\n\t" " .ifc %[out], " XREGS_PREFIX "\\i \n\t" " .irp j," __ALL_REGS "\n\t" " .ifc %[hi], " VREGS_PREFIX "\\j \n\t" " xvori.b $xr\\i, $xr\\j, 0 \n\t" " .endif \n\t" " .endr \n\t" " .endif \n\t" ".endr \n\t" ".endif \n\t" : [out] "=f" (out), [hi] "+f" (inhi) : [lo] "f" (inlo) ); return out; } // Convert __m256i low part to __m128i static inline __m128i lasx_extracti128_lo(__m256i in) { __m128i out; __asm__ volatile ( ".ifnc %[out], %[in] \n\t" ".irp i," __ALL_REGS "\n\t" " .ifc %[out], " VREGS_PREFIX "\\i \n\t" " .irp j," __ALL_REGS "\n\t" " .ifc %[in], " XREGS_PREFIX "\\j \n\t" " vori.b $vr\\i, $vr\\j, 0 \n\t" " .endif \n\t" " .endr \n\t" " .endif \n\t" ".endr \n\t" ".endif \n\t" : [out] "=f" (out) : [in] "f" (in) ); return out; } // Convert __m256i high part to __m128i static inline __m128i lasx_extracti128_hi(__m256i in) { __m128i out; __asm__ volatile ( ".irp i," __ALL_REGS "\n\t" " .ifc %[out], " VREGS_PREFIX "\\i \n\t" " .irp j," __ALL_REGS "\n\t" " .ifc %[in], " XREGS_PREFIX "\\j \n\t" " xvpermi.q $xr\\i, $xr\\j, 0x11 \n\t" " .endif \n\t" " .endr \n\t" " .endif \n\t" ".endr \n\t" : [out] "=f" (out) : [in] "f" (in) ); return out; } static __m256i lasx_set_w(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { v8i32 __ret = {e0, e1, e2, e3, e4, e5, e6, e7}; return (__m256i)__ret; } static __m256i lasx_set_d(int64_t a, int64_t b, int64_t c, int64_t d) { v4i64 __ret = {d, c, b, a}; return (__m256i)__ret; } static __m256i lasx_insertf128( __m128i x, __m128i y) { return lasx_set_q(x, y); } static __m256i lasx_shuffle_b(__m256i a, __m256i b) { __m256i mask_f, zero, tmp0, tmp2, mask; int f = 0x8f; mask_f = __lasx_xvreplgr2vr_b(f); zero = __lasx_xvldi(0); tmp0 = __lasx_xvand_v(b, mask_f); // get mask with low 4 bit and sign bits tmp0 = __lasx_xvori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive mask = __lasx_xvsle_b(zero, tmp0); // if mask >= 0, set mask tmp2 = __lasx_xvand_v(tmp0, mask); // maskout the in2 < ones return __lasx_xvshuf_b(a, zero, tmp2); } static __m256i lasx_extu8_16(__m128i a) { return __lasx_vext2xv_hu_bu(____m256i(a)); } static __m256i lasx_ext8_16(__m128i a) { return __lasx_vext2xv_h_b(____m256i(a)); } static __m256i lasx_ext16_32(__m128i a) { return __lasx_vext2xv_w_h(____m256i(a)); } static __m128i lasx_extracti128( __m256i a, int pos) { __m128i ret; if( pos == 0) { ret = lasx_extracti128_lo(a); } else { ret = lasx_extracti128_hi(a); } return ret; } static __m128 lasx_extractf128( __m256 a, int pos) { __m128 ret; if( pos == 0) { ret = (__m128)lasx_extracti128_lo((__m256i)a); } else { ret = (__m128)lasx_extracti128_hi((__m256i)a); } return ret; } static __m256i lasx_maddubs_h(__m256i a, __m256i b) { __m256i tmp1, tmp2; tmp1 = __lasx_xvmulwev_h_b(a, b); tmp2 = __lasx_xvmulwod_h_b(a, b); return __lasx_xvsadd_h(tmp1, tmp2); } static __m256i lasx_madd_h(__m256i a, __m256i b) { __m256i tmp1, tmp2; tmp1 = __lasx_xvmulwev_w_h(a, b); tmp2 = __lasx_xvmulwod_w_h(a, b); return __lasx_xvadd_w(tmp1, tmp2); } static __m256i lasx_packs_w(__m256i a, __m256i b) { __m256i tmp, tmp1; tmp = __lasx_xvsat_w(a, 15); tmp1 = __lasx_xvsat_w(b, 15); return __lasx_xvpickev_h(tmp1, tmp); } static __m256i lasx_packs_h(__m256i a, __m256i b) { __m256i tmp, tmp1; tmp = __lasx_xvsat_h(a, 7); tmp1 = __lasx_xvsat_h(b, 7); return __lasx_xvpickev_b(tmp1, tmp); } static inline __m256i lasx_madd_h_b(__m256i a, __m256i b) { __m256i tmp1, tmp2; tmp1 = __lasx_xvmulwev_h_b(a, b); tmp2 = __lasx_xvmulwod_h_b(a, b); return __lasx_xvadd_h(tmp1, tmp2); } static inline __m256i lasx_xvrepl128vei_h(__m256i a, const unsigned int b) { switch (b) { case 0: return __lasx_xvrepl128vei_h(a, 0); case 1: return __lasx_xvrepl128vei_h(a, 1); case 2: return __lasx_xvrepl128vei_h(a, 2); case 3: return __lasx_xvrepl128vei_h(a, 3); case 4: return __lasx_xvrepl128vei_h(a, 4); case 5: return __lasx_xvrepl128vei_h(a, 5); case 6: return __lasx_xvrepl128vei_h(a, 6); case 7: return __lasx_xvrepl128vei_h(a, 7); default: __builtin_unreachable(); } } static inline __m256i lasx_xvandi_b_bit(__m256i a, const unsigned int b) { switch (b) { case 0: return __lasx_xvandi_b(a, 1 << 0); case 1: return __lasx_xvandi_b(a, 1 << 1); case 2: return __lasx_xvandi_b(a, 1 << 2); case 3: return __lasx_xvandi_b(a, 1 << 3); case 4: return __lasx_xvandi_b(a, 1 << 4); case 5: return __lasx_xvandi_b(a, 1 << 5); case 6: return __lasx_xvandi_b(a, 1 << 6); case 7: return __lasx_xvandi_b(a, 1 << 7); default: __builtin_unreachable(); } } // horizontally add 8 floats static inline float hsum_float_8(const __m256 x) { __m128 res = lasx_extractf128(x, 1); res = __lsx_vfadd_s(res, lasx_extractf128(x, 0)); res = __lsx_vfadd_s(res, (__m128)__lsx_vpickod_d((__m128i)res, (__m128i)res)); res = __lsx_vfadd_s(res, (__m128)__lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w(res, 1), 0)); return ((v4f32)res)[0]; } // horizontally add 8 int32_t static inline int hsum_i32_8(const __m256i a) { __m256i tmp1 = __lasx_xvpermi_q(a, a, 0x11); __m256i tmp2 = __lasx_xvpermi_q(a, a, 0x00); __m128i tmp1_128 = lasx_extracti128_lo(tmp1); __m128i tmp2_128 = lasx_extracti128_lo(tmp2); __m128i sum128 = __lsx_vadd_w(tmp1_128, tmp2_128); __m128i ev = __lsx_vpickev_w(sum128, sum128); __m128i od = __lsx_vpickod_w(sum128, sum128); __m128i sum64 = __lsx_vadd_w(ev, od); int sum64_1, sum64_2; sum64_1 = __lsx_vpickve2gr_w(sum64, 0); sum64_2 = __lsx_vpickve2gr_w(sum64, 1); return sum64_1 + sum64_2; } // horizontally add 4 int32_t static inline int hsum_i32_4(const __m128i a) { __m128i ev = __lsx_vpickev_w(a, a); __m128i od = __lsx_vpickod_w(a, a); __m128i sum64 = __lsx_vadd_w(ev, od); int sum64_1, sum64_2; sum64_1 = __lsx_vpickve2gr_w(sum64, 0); sum64_2 = __lsx_vpickve2gr_w(sum64, 1); return sum64_1 + sum64_2; } // spread 32 bits to 32 bytes { 0x00, 0xFF } static inline __m256i bytes_from_bits_32(const uint8_t * x) { uint32_t x32; memcpy(&x32, x, sizeof(uint32_t)); const __m256i shuf_mask = lasx_set_d( 0x0303030303030303, 0x0202020202020202, 0x0101010101010101, 0x0000000000000000); __m256i bytes = lasx_shuffle_b(__lasx_xvreplgr2vr_w(x32), shuf_mask); const __m256i bit_mask = __lasx_xvreplgr2vr_d(0x7fbfdfeff7fbfdfe); bytes = __lasx_xvor_v(bytes, bit_mask); return __lasx_xvseq_b(bytes, __lasx_xvreplgr2vr_d(-1)); } // Unpack 32 4-bit fields into 32 bytes // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { const __m128i lo = __lsx_vld((const __m128i *)rsi, 0); __m128i hi = __lsx_vsrli_h(lo, 4); return __lasx_xvandi_b(lasx_insertf128(hi, lo), 0xf); } // add int16_t pairwise and return as float vector static inline __m256 sum_i16_pairs_float(const __m256i x) { __m256i v = __lasx_xvpackod_h(x, x); __m256i summed_pairs = __lasx_xvaddwev_w_h(x, v); return __lasx_xvffint_s_w(summed_pairs); } static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { // Perform multiplication and create 16-bit values const __m256i dot = lasx_maddubs_h(ax, sy); return sum_i16_pairs_float(dot); } // multiply int8_t, add results pairwise twice and return as float vector static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { const __m256i dot = lasx_madd_h_b(x, y); return sum_i16_pairs_float(dot); } static inline __m128i packNibbles( __m256i bytes ) { // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh const __m256i lowByte = __lasx_xvreplgr2vr_h(0xFF); __m256i high = __lasx_xvandn_v(lowByte, bytes); __m256i low = __lasx_xvand_v(lowByte, bytes); high = __lasx_xvsrli_h(high, 4); bytes = __lasx_xvor_v(low, high); // Compress uint16_t lanes into bytes __m128i *r0 = (__m128i *)&bytes; __m256i tmp_h128 = __lasx_xvpermi_q(bytes, bytes, 0x11); __m128i *r1 = (__m128i *)&tmp_h128; __m128i zero = __lsx_vldi(0); __m128i tmp, tmp2, tmp3; tmp = __lsx_vmax_h(zero, *r0); tmp2 = __lsx_vsat_hu(tmp, 7); tmp = __lsx_vmax_h(zero, *r1); tmp3 = __lsx_vsat_hu(tmp, 7); return __lsx_vpickev_b(tmp3, tmp2); } #endif //__loongarch_asx void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__loongarch_asx) for (int i = 0; i < nb; i++) { __m256 v0 = (__m256)__lasx_xvld( x , 0); __m256 v1 = (__m256)__lasx_xvld( x , 32); __m256 v2 = (__m256)__lasx_xvld( x , 64); __m256 v3 = (__m256)__lasx_xvld( x , 96); x += 32; // Compute max(abs(e)) for the block const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs , 0) ); max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); __m128 tmp = max4; max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vinsgr2vr_w(tmp, __lsx_vpickve2gr_w( max4, 1 ), 0 )); const float max_scalar = ((v4f32)max4)[0]; // Quantize these floats const float d = max_scalar / 127.f; y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = (__m256)__lasx_xvreplfr2vr_s( id ); // Apply the multiplier v0 = __lasx_xvfmul_s( v0, mul ); v1 = __lasx_xvfmul_s( v1, mul ); v2 = __lasx_xvfmul_s( v2, mul ); v3 = __lasx_xvfmul_s( v3, mul ); // Round to nearest integer __m256i i0 = __lasx_xvftintrne_w_s( v0 ); __m256i i1 = __lasx_xvftintrne_w_s( v1 ); __m256i i2 = __lasx_xvftintrne_w_s( v2 ); __m256i i3 = __lasx_xvftintrne_w_s( v3 ); __m128i ni0 = lasx_extracti128( i0, 0 ); __m128i ni1 = lasx_extracti128( i0, 1); __m128i ni2 = lasx_extracti128( i1, 0); __m128i ni3 = lasx_extracti128( i1, 1); __m128i ni4 = lasx_extracti128( i2, 0); __m128i ni5 = lasx_extracti128( i2, 1); __m128i ni6 = lasx_extracti128( i3, 0); __m128i ni7 = lasx_extracti128( i3, 1); // Convert int32 to int16 ni0 = lsx_packs_w( ni0, ni1 ); ni2 = lsx_packs_w( ni2, ni3 ); ni4 = lsx_packs_w( ni4, ni5 ); ni6 = lsx_packs_w( ni6, ni7 ); // Convert int16 to int8 ni0 = lsx_packs_h( ni0, ni2 ); ni4 = lsx_packs_h( ni4, ni6 ); __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__loongarch_asx) for (int i = 0; i < nb; i++) { __m256 v0 = (__m256)__lasx_xvld( x , 0 ); __m256 v1 = (__m256)__lasx_xvld( x , 32 ); __m256 v2 = (__m256)__lasx_xvld( x , 64 ); __m256 v3 = (__m256)__lasx_xvld( x , 96 ); x += 32; // Compute max(abs(e)) for the block const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs, 0) ); max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); __m128 tmp = max4; max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x1 )); const float max_scalar = ((v4f32)max4)[0]; // Quantize these floats const float d = max_scalar / 127.f; y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = __lasx_xvreplfr2vr_s( id ); // Apply the multiplier v0 = __lasx_xvfmul_s( v0, mul ); v1 = __lasx_xvfmul_s( v1, mul ); v2 = __lasx_xvfmul_s( v2, mul ); v3 = __lasx_xvfmul_s( v3, mul ); // Round to nearest integer __m256i i0 = __lasx_xvftintrne_w_s( v0 ); __m256i i1 = __lasx_xvftintrne_w_s( v1 ); __m256i i2 = __lasx_xvftintrne_w_s( v2 ); __m256i i3 = __lasx_xvftintrne_w_s( v3 ); __m128i ni0 = lasx_extracti128(i0, 0); __m128i ni1 = lasx_extracti128( i0, 1); __m128i ni2 = lasx_extracti128( i1, 0); __m128i ni3 = lasx_extracti128( i1, 1); __m128i ni4 = lasx_extracti128( i2, 0 ); __m128i ni5 = lasx_extracti128( i2, 1); __m128i ni6 = lasx_extracti128( i3, 0); __m128i ni7 = lasx_extracti128( i3, 1); // Compute the sum of the quants and set y[i].s const __m128i s0 = __lsx_vadd_w(__lsx_vadd_w(ni0, ni1), __lsx_vadd_w(ni2, ni3)); const __m128i s1 = __lsx_vadd_w(__lsx_vadd_w(ni4, ni5), __lsx_vadd_w(ni6, ni7)); y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); // Convert int32 to int16 ni0 = lsx_packs_w( ni0, ni1 ); ni2 = lsx_packs_w( ni2, ni3 ); ni4 = lsx_packs_w( ni4, ni5 ); ni6 = lsx_packs_w( ni6, ni7 ); // Convert int16 to int8 ni0 = lsx_packs_h( ni0, ni2 ); ni4 = lsx_packs_h( ni4, ni6 ); __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Dot products ================================= // // Helper functions // #if defined(__loongarch_asx) // shuffles to pick the required scales in dot products static inline __m256i get_scale_shuffle_q3k(int i) { static const uint8_t k_shuffle[128] = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, }; return __lasx_xvld((const __m256i*)k_shuffle + i, 0); } static inline __m256i get_scale_shuffle_k4(int i) { static const uint8_t k_shuffle[256] = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 }; return __lasx_xvld((const __m256i*)k_shuffle + i, 0); } static inline __m128i get_scale_shuffle(int i) { static const uint8_t k_shuffle[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 }; return __lsx_vld((const __m128i*)k_shuffle + i, 0); } #endif void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__loongarch_asx) // Initialize accumulator with zeros __m256 acc = (__m256)__lasx_xvldi(0); // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ const __m256 d = __lasx_xvreplfr2vr_s( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); __m256i qx = bytes_from_nibbles_32(x[ib].qs); // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. const __m256i off = __lasx_xvreplgr2vr_b( 8 ); qx = __lasx_xvsub_b( qx, off ); __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); const __m256 q = mul_sum_i8_pairs_float(qx, qy); /* Multiply q with scale and accumulate */ acc = __lasx_xvfmadd_s( d, q, acc ); } sumf = hsum_float_8(acc); #elif defined(__loongarch_sx) // set constants const __m128i low_mask = __lsx_vreplgr2vr_b(0xF); const __m128i off = __lsx_vreplgr2vr_b(8); // Initialize accumulator with zeros __m128 acc_0 = (__m128)__lsx_vldi(0); __m128 acc_1 = (__m128)__lsx_vldi(0); __m128 acc_2 = (__m128)__lsx_vldi(0); __m128 acc_3 = (__m128)__lsx_vldi(0); for (; ib + 1 < nb; ib += 2) { // Compute combined scale for the block 0 and 1 const float ft0 = GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d); const __m128 d_0_1 = (__m128)(v4f32){ft0, ft0, ft0, ft0}; const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); __m128i bx_0 = __lsx_vand_v(low_mask, tmp_0_1); __m128i by_0 = __lsx_vld((const __m128i *)y[ib].qs, 0); bx_0 = __lsx_vsub_b(bx_0, off); const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); __m128i bx_1 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_0_1, 4)); __m128i by_1 = __lsx_vld((const __m128i *)(y[ib].qs + 16), 0); bx_1 = __lsx_vsub_b(bx_1, off); const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); // Compute combined scale for the block 2 and 3 const float ft1 = GGML_CPU_FP16_TO_FP32(x[ib + 1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d); const __m128 d_2_3 = (__m128)(v4f32){ft1, ft1, ft1, ft1}; const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); __m128i bx_2 = __lsx_vand_v(low_mask, tmp_2_3); __m128i by_2 = __lsx_vld((const __m128i *)y[ib + 1].qs, 0); bx_2 = __lsx_vsub_b(bx_2, off); const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); __m128i bx_3 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_2_3, 4)); __m128i by_3 = __lsx_vld((const __m128i *)(y[ib + 1].qs + 16), 0); bx_3 = __lsx_vsub_b(bx_3, off); const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); // Convert int32_t to float __m128 p0 = __lsx_vffint_s_w(i32_0); __m128 p1 = __lsx_vffint_s_w(i32_1); __m128 p2 = __lsx_vffint_s_w(i32_2); __m128 p3 = __lsx_vffint_s_w(i32_3); // Apply the scale __m128 p0_d = __lsx_vfmul_s( d_0_1, p0 ); __m128 p1_d = __lsx_vfmul_s( d_0_1, p1 ); __m128 p2_d = __lsx_vfmul_s( d_2_3, p2 ); __m128 p3_d = __lsx_vfmul_s( d_2_3, p3 ); // Acummulate acc_0 = __lsx_vfadd_s(p0_d, acc_0); acc_1 = __lsx_vfadd_s(p1_d, acc_1); acc_2 = __lsx_vfadd_s(p2_d, acc_2); acc_3 = __lsx_vfadd_s(p3_d, acc_3); } sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__loongarch_asx) // Initialize accumulator with zeros __m256 acc = (__m256)__lasx_xvldi(0); float summs = 0; // Main loop for (; ib < nb; ++ib) { const float d0 = GGML_CPU_FP16_TO_FP32(x[ib].d); const float d1 = GGML_CPU_FP16_TO_FP32(y[ib].d); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const __m256 d0v = __lasx_xvreplfr2vr_s( d0 ); const __m256 d1v = __lasx_xvreplfr2vr_s( d1 ); // Compute combined scales const __m256 d0d1 = __lasx_xvfmul_s( d0v, d1v ); // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes const __m256i qx = bytes_from_nibbles_32(x[ib].qs); const __m256i qy = __lasx_xvld( (const __m256i *)y[ib].qs, 0); const __m256 xy = mul_sum_us8_pairs_float(qx, qy); // Accumulate d0*d1*x*y acc = __lasx_xvfmadd_s( d0d1, xy, acc ); } sumf = hsum_float_8(acc) + summs; *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__loongarch_asx) // Initialize accumulator with zeros __m256 acc = (__m256)__lasx_xvldi(0); // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ const __m256 d = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); //FIXME __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = __lasx_xvandn_v(bxhi, __lasx_xvreplgr2vr_b((char)0xF0)); qx = __lasx_xvor_v(qx, bxhi); __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); const __m256 q = mul_sum_i8_pairs_float(qx, qy); /* Multiply q with scale and accumulate */ acc = __lasx_xvfmadd_s(d, q, acc); } sumf = hsum_float_8(acc); *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__loongarch_asx) // Initialize accumulator with zeros __m256 acc = (__m256)__lasx_xvldi(0); float summs = 0.0f; // Main loop for (; ib < nb; ++ib) { const __m256 dx = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d)); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = __lasx_xvand_v(bxhi, __lasx_xvreplgr2vr_b(0x10)); qx = __lasx_xvor_v(qx, bxhi); const __m256 dy = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); const __m256 q = mul_sum_us8_pairs_float(qx, qy); acc = __lasx_xvfmadd_s(q, __lasx_xvfmul_s(dx, dy), acc); } sumf = hsum_float_8(acc) + summs; *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__loongarch_asx) // Initialize accumulator with zeros __m256 acc = (__m256)__lasx_xvldi(0); // Main loop for (; ib < nb; ++ib) { // Compute combined scale for the block const __m256 d = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = __lasx_xvld((const __m256i *)x[ib].qs, 0); __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); const __m256 q = mul_sum_i8_pairs_float(qx, qy); // Multiply q with scale and accumulate acc = __lasx_xvfmadd_s( d, q, acc ); } sumf = hsum_float_8(acc); *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __loongarch_asx __m256 acc = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m128i mins_and_scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i*)y[i].bsums, 0)); acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); __m256i sumi = __lasx_xvldi(0); for (int j = 0; j < QK_K/128; ++j) { const __m256i q2bits = __lasx_xvld((const __m256i*)q2, 0); q2 += 32; const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); __m256i p0 = lasx_madd_h_b(q2_0, q8_0); __m256i p1 = lasx_madd_h_b(q2_1, q8_1); __m256i p2 = lasx_madd_h_b(q2_2, q8_2); __m256i p3 = lasx_madd_h_b(q2_3, q8_3); p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); p0 = __lasx_xvadd_w(p0, p1); p2 = __lasx_xvadd_w(p2, p3); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); } acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); } *s = hsum_float_8(acc); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __loongarch_asx const __m128i m32 = __lsx_vreplgr2vr_b(32); __m256 acc = (__m256)__lasx_xvldi(0); uint32_t aux[3]; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Set up scales memcpy(aux, x[i].scales, 12); __m128i scales128 = lsx_set_w( ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); scales128 = __lsx_vsub_b(scales128, m32); const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); // high bit const __m256i hbits = __lasx_xvld((const __m256i*)x[i].hmask, 0); // integer accumulator __m256i sumi = __lasx_xvldi(0); for (int j = 0; j < QK_K/128; ++j) { // load low 2 bits const __m256i q3bits = __lasx_xvld((const __m256i*)q3, 0); q3 += 32; // prepare low and high bits const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); // load Q8 quants const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); // multiply with scales p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); // accumulate p16_0 = __lasx_xvadd_w(p16_0, p16_1); p16_2 = __lasx_xvadd_w(p16_2, p16_3); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); } // multiply with block scale and accumulate acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); } *s = hsum_float_8(acc); #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __loongarch_asx __m256 acc = (__m256)__lasx_xvldi(0); __m128 acc_m = (__m128)__lsx_vldi(0); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); const __m128i prod = lsx_madd_h(mins128, q8s); acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); const __m256i scales = lasx_insertf128(scales128, scales128); __m256i sumi = __lasx_xvldi(0); for (int j = 0; j < QK_K/64; ++j) { const __m256i scale_l = lasx_xvrepl128vei_h(scales, 2 * j + 0); const __m256i scale_h = lasx_xvrepl128vei_h(scales, 2 * j + 1); const __m256i q4bits = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; const __m256i q4l = __lasx_xvandi_b(q4bits, 0xf); const __m256i q4h = __lasx_xvsrli_b(q4bits, 4); const __m256i q8l = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; __m256i p16l = lasx_madd_h_b(q4l, q8l); p16l = lasx_madd_h(scale_l, p16l); const __m256i q8h = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; __m256i p16h = lasx_madd_h_b(q4h, q8h); p16h = lasx_madd_h(scale_h, p16h); const __m256i sumj = __lasx_xvadd_w(p16l, p16h); sumi = __lasx_xvadd_w(sumi, sumj); } __m256 vd = __lasx_xvreplfr2vr_s(d); acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); } acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee)); __m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0); acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1); *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __loongarch_asx __m256 acc = (__m256)__lasx_xvldi(0); __m128 acc_m = (__m128)__lsx_vldi(0); for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); const __m128i prod = lsx_madd_h(mins128, q8s); acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); const __m256i scales = lasx_insertf128(scales128, scales128); const __m256i hbits = __lasx_xvld((const __m256i*)x[i].qh, 0); __m256i sumi = __lasx_xvldi(0); for (int j = 0; j < QK_K/64; ++j) { const __m256i scale_0 = lasx_xvrepl128vei_h(scales, 2 * j + 0); const __m256i scale_1 = lasx_xvrepl128vei_h(scales, 2 * j + 1); const __m256i q5bits = __lasx_xvld((const __m256i*)q5, 0); q5 += 32; const __m256i q5l_0 = __lasx_xvandi_b(q5bits, 0xf); const __m256i q5l_1 = __lasx_xvsrli_b(q5bits, 4); const __m256i q5h_0 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 0), 0), 0xef); const __m256i q5h_1 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 1), 0), 0xef); const __m256i q5_0 = __lasx_xvor_v(q5l_0, q5h_0); const __m256i q5_1 = __lasx_xvor_v(q5l_1, q5h_1); const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; __m256i p16_0 = lasx_madd_h_b(q5_0, q8_0); __m256i p16_1 = lasx_madd_h_b(q5_1, q8_1); p16_0 = lasx_madd_h(scale_0, p16_0); p16_1 = lasx_madd_h(scale_1, p16_1); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); } __m256 vd = __lasx_xvreplfr2vr_s(d); acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); } acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 8)); acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 4)); *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __loongarch_asx const __m256i m32s = __lasx_xvreplgr2vr_b(32); __m256 acc = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m128i scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); __m256i sumi = __lasx_xvldi(0); for (int j = 0; j < QK_K/128; ++j) { const __m256i q4bits1 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; const __m256i q4bits2 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; const __m256i q4bitsH = __lasx_xvld((const __m256i*)qh, 0); qh += 32; const __m256i q4h_0 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3), 4); const __m256i q4h_1 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3 << 2), 2); const __m256i q4h_2 = __lasx_xvandi_b(q4bitsH, 3 << 4); const __m256i q4h_3 = __lasx_xvsrli_b(__lasx_xvandi_b(q4bitsH, 3 << 6), 2); const __m256i q4_0 = __lasx_xvor_v(__lasx_xvandi_b(q4bits1, 0xf), q4h_0); const __m256i q4_1 = __lasx_xvor_v(__lasx_xvandi_b(q4bits2, 0xf), q4h_1); const __m256i q4_2 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits1, 4), q4h_2); const __m256i q4_3 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits2, 4), q4h_3); const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; __m256i p16_0 = lasx_madd_h_b(__lasx_xvsub_b(q4_0, m32s), q8_0); __m256i p16_1 = lasx_madd_h_b(__lasx_xvsub_b(q4_1, m32s), q8_1); __m256i p16_2 = lasx_madd_h_b(__lasx_xvsub_b(q4_2, m32s), q8_2); __m256i p16_3 = lasx_madd_h_b(__lasx_xvsub_b(q4_3, m32s), q8_3); p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_2, p16_3)); } acc = __lasx_xvfmadd_s((__m256)__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); } *s = hsum_float_8(acc); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #if defined(__loongarch_asx) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, }; #endif void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); const uint16_t ls1 = aux32[1] >> 28; const uint16_t ls2 = aux32[3] >> 28; const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); sumi1 = __lasx_xvadd_w(sumi1, p1); sumi2 = __lasx_xvadd_w(sumi2, p2); } accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) const __m256i mone = __lasx_xvreplgr2vr_b(1); static const char block_sign_shuffle_mask_1[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, }; static const char block_sign_shuffle_mask_2[32] = { 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, }; static const uint8_t bit_selector_mask_bytes[32] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m256i bit_selector_mask = __lasx_xvld((const __m256i*)bit_selector_mask_bytes, 0); const __m256i block_sign_shuffle_1 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_1, 0); const __m256i block_sign_shuffle_2 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_2, 0); static const uint8_t k_bit_helper[32] = { 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, }; const __m256i bit_helper = __lasx_xvld((const __m256i*)k_bit_helper, 0); const __m256i m511 = __lasx_xvreplgr2vr_h(511); const __m128i m4 = __lsx_vreplgr2vr_b(0xf); const __m128i m1 = __lsx_vreplgr2vr_b(1); uint64_t aux64; // somewhat hacky, but gives a significant boost in performance __m256i aux_gindex; const uint16_t * gindex = (const uint16_t *)&aux_gindex; __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(&aux64, x[i].scales, 8); __m128i stmp = __lsx_vreplgr2vr_d(aux64); stmp = __lsx_vilvl_b( __lsx_vand_v(__lsx_vsrli_h(stmp, 4), m4), __lsx_vand_v(stmp, m4)); const __m128i scales = __lsx_vadd_b(__lsx_vslli_h(stmp, 1), m1); __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { const __m256i q2_data = __lasx_xvld((const __m256i*)q2, 0); q2 += 16; aux_gindex = __lasx_xvand_v(q2_data, m511); const __m256i partial_sign_bits = __lasx_xvsrli_h(q2_data, 9); const __m256i partial_sign_bits_upper = __lasx_xvsrli_h(q2_data, 13); const __m256i partial_sign_bits_for_counting = __lasx_xvxor_v(partial_sign_bits, partial_sign_bits_upper); const __m256i odd_bits = lasx_shuffle_b(bit_helper, partial_sign_bits_for_counting); const __m256i full_sign_bits = __lasx_xvor_v(partial_sign_bits, odd_bits); const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_4 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q2_1 = lasx_set_d(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); const __m256i q2_2 = lasx_set_d(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); const __m256i q2_3 = lasx_set_d(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); const __m256i q2_4 = lasx_set_d(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); const __m128i full_signs_l = lasx_extracti128(full_sign_bits, 0); const __m128i full_signs_h = lasx_extracti128(full_sign_bits, 1); const __m256i full_signs_1 = lasx_insertf128(full_signs_l, full_signs_l); const __m256i full_signs_2 = lasx_insertf128(full_signs_h, full_signs_h); __m256i signs; signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_1); signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_1 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_1); signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_2); signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_2 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_2); signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_1); signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_3 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_3); signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_2); signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_4 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_4); const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); const __m256i dot3 = lasx_maddubs_h(q2_3, q8s_3); const __m256i dot4 = lasx_maddubs_h(q2_4, q8s_4); const __m256i sc1 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+0))); const __m256i sc2 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+1))); const __m256i sc3 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+2))); const __m256i sc4 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+3))); sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot1, sc1)); sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot2, sc2)); sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot3, sc3)); sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot4, sc4)); } accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m128i m4 = __lsx_vreplgr2vr_b(0xf); const __m128i m1 = __lsx_vreplgr2vr_b(1); const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); uint64_t aux64; __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const int8_t * GGML_RESTRICT q8 = y[i].qs; __m128i tmp1; memcpy(&aux64, x[i].scales, 8); tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64, 0); tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64 >> 4, 1); const __m128i scales8 = __lsx_vadd_b(__lsx_vslli_h(__lsx_vand_v(tmp1, m4), 1), m1); const __m256i scales16 = lasx_ext8_16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q2_1 = lasx_set_d(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); const __m256i q2_2 = lasx_set_d(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); qs += 8; __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | ((uint32_t) signs[1] << 16)); aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); aux256 = __lasx_xvreplgr2vr_w(signs[2] | ((uint32_t) signs[3] << 16)); aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); signs += 4; const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 const __m256i p1 = lasx_madd_h(dot1, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+0))); const __m256i p2 = lasx_madd_h(dot2, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+1))); sumi1 = __lasx_xvadd_w(sumi1, p1); sumi2 = __lasx_xvadd_w(sumi2, p2); } accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[2]; __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q2_1 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); q3 += 8; const __m256i q2_2 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); q3 += 8; memcpy(aux32, gas, 8); gas += 8; const __m256i s2_1 = lasx_set_d(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); const __m256i s2_2 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); const uint16_t ls1 = aux32[0] >> 28; const uint16_t ls2 = aux32[1] >> 28; const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); sumi1 = __lasx_xvadd_w(sumi1, p1); sumi2 = __lasx_xvadd_w(sumi2, p2); } accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); } *s = 0.25f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); __m256i idx_shift = lasx_set_w(1, 2, 3, 4, 5, 6, 7, 8); const __m256i idx_mask = __lasx_xvreplgr2vr_w(256); typedef union { __m256i vec[2]; uint32_t index[16]; } index_t; index_t idx; __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i idx_l = lasx_extu8_16(__lsx_vld(qs, 0)); qs += 16; idx.vec[0] = __lasx_xvreplgr2vr_w(qh[ib32+0]); idx.vec[1] = __lasx_xvreplgr2vr_w(qh[ib32+1]); idx.vec[0] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[0], idx_shift), idx_mask); idx.vec[1] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[1], idx_shift), idx_mask); idx.vec[0] = __lasx_xvor_v(idx.vec[0], lasx_ext16_32(lasx_extracti128(idx_l, 0))); idx.vec[1] = __lasx_xvor_v(idx.vec[1], lasx_ext16_32(lasx_extracti128(idx_l, 1))); // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); const __m256i q2_1 = lasx_set_w( iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] ); const __m256i q2_2 = lasx_set_w( iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] ); __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | (signs[1] << 16)); aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); aux256 = __lasx_xvreplgr2vr_w(signs[2] | (signs[3] << 16)); aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); signs += 4; const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; const uint16_t ls2 = x[i].scales[ib32/2] >> 4; const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); sumi1 = __lasx_xvadd_w(sumi1, p1); sumi2 = __lasx_xvadd_w(sumi2, p2); } accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); } *s = hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #if defined(__loongarch_asx) static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { const __m256i a = __lasx_xvmulwev_h_b(x, y); const __m256i b = __lasx_xvmulwod_h_b(x, y); return __lasx_xvadd_h(a, b); } #endif void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) __m256 accum = (__m256)__lasx_xvldi(0); float accum1 = 0; for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; __m256i sumi = __lasx_xvldi(0); int sumi1 = 0; for (int ib = 0; ib < QK_K/32; ib += 2) { __m256i q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)], 0); q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], 1); q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], 2); q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], 3); __m256i q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)], 0); q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], 1); q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], 2); q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], 3); qs += 8; const __m256i q8b_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i q8b_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; __m256i tmp1, tmp5, tmp6; tmp1 = __lasx_xvreplgr2vr_h(ls1); tmp5 = __lasx_xvmulwev_w_h(dot1, tmp1); tmp6 = __lasx_xvmulwod_w_h(dot1, tmp1); const __m256i p1 = __lasx_xvadd_w(tmp5, tmp6); tmp1 = __lasx_xvreplgr2vr_h(ls2); tmp5 = __lasx_xvmulwev_w_h(dot2, tmp1); tmp6 = __lasx_xvmulwod_w_h(dot2, tmp1); const __m256i p2 = __lasx_xvadd_w(tmp5, tmp6); sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p1, p2)); sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), accum); accum1 += d * sumi1; } *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined (__loongarch_asx) const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); const __m128i m4b = __lsx_vreplgr2vr_b(0x0f); const __m256i mone = __lasx_xvreplgr2vr_h(1); __m256 accum1 = (__m256)__lasx_xvldi(0); __m256 accum2 = (__m256)__lasx_xvldi(0); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = __lsx_vld((const __m128i*)x[ib + 0].qs, 0); const __m128i q4bits_2 = __lsx_vld((const __m128i*)x[ib + 1].qs, 0); const __m256i q8b_1 = __lasx_xvld((const __m256i *)y[ib + 0].qs, 0); const __m256i q8b_2 = __lasx_xvld((const __m256i *)y[ib + 1].qs, 0); const __m256i q4b_1 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_1, 4), m4b)), lsx_shuffle_b(values128, __lsx_vand_v(q4bits_1, m4b))); const __m256i q4b_2 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_2, 4), m4b)), lsx_shuffle_b(values128, __lsx_vand_v(q4bits_2, m4b))); const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const __m256i p_1 = lasx_madd_h(p16_1, mone); const __m256i p_2 = lasx_madd_h(p16_2, mone); accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_CPU_FP16_TO_FP32(x[ib + 0].d)), __lasx_xvffint_s_w(p_1), accum1); accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_CPU_FP16_TO_FP32(x[ib + 1].d)), __lasx_xvffint_s_w(p_2), accum2); } sumf = hsum_float_8(__lasx_xvfadd_s(accum1, accum2)); #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__loongarch_asx) const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); __m256 accum = (__m256)__lasx_xvldi(0); for (int ibl = 0; ibl < nb; ++ibl) { const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; uint16_t sh = x[ibl].scales_h; __m256i sumi1 = __lasx_xvldi(0); __m256i sumi2 = __lasx_xvldi(0); for (int ib = 0; ib < QK_K/32; ib += 2) { const __m128i q4bits_1 = __lsx_vld((const __m128i*)qs, 0); qs += 16; const __m128i q4bits_2 = __lsx_vld((const __m128i*)qs, 0); qs += 16; const __m256i q8b_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q8b_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; const __m256i q4b_1 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_1, 4)), __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_1, 0xf))); const __m256i q4b_2 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_2, 4)), __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_2, 0xf))); const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; sh >>= 4; const __m256i p_1 = lasx_madd_h(p16_1, __lasx_xvreplgr2vr_h(ls1)); const __m256i p_2 = lasx_madd_h(p16_2, __lasx_xvreplgr2vr_h(ls2)); sumi1 = __lasx_xvadd_w(p_1, sumi1); sumi2 = __lasx_xvadd_w(p_2, sumi2); } accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accum); } *s = hsum_float_8(accum); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/powerpc/000077500000000000000000000000001512524704700212625ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/powerpc/cpu-feats.cpp000066400000000000000000000040321512524704700236540ustar00rootroot00000000000000# include "ggml-backend-impl.h" #if defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) #if defined(__linux__) #include #endif #include struct powerpc_features { std::string platform = ""; int power_version = -1; bool has_vsx = false; powerpc_features() { #if defined(__linux__) unsigned long auxval = getauxval(AT_PLATFORM); if (auxval) { platform = std::string(reinterpret_cast(auxval)); // TBD: Do systems exist that return this in uppercase? if (platform.substr(0, 5) == "power") { // Extractt a numeric suffix, if one exists int vpos = -1; for (int i = platform.length() - 1; i >= 0; i--) { if (std::isdigit(platform[i])) { vpos = i; } else { break; } } if (vpos > -1) { power_version = std::stoi(platform.substr(vpos)); } } } #endif if (power_version >= 9) { has_vsx = true; } } }; static int ggml_backend_cpu_powerpc_score() { int score = 1; powerpc_features pf; // Platform scores #if defined(GGML_USE_POWER7) if (pf.power_version < 7) { return 0; } score += 1<<1; #endif #if defined(GGML_USE_POWER8) if (pf.power_version < 8) { return 0; } score += 1<<2; #endif #if defined(GGML_USE_POWER9) if (pf.power_version < 9) { return 0; } score += 1<<3; #endif #if defined(GGML_USE_POWER10) if (pf.power_version < 10) { return 0; } score += 1<<4; #endif #if defined(GGML_USE_POWER11) if (pf.power_version < 11) { return 0; } score += 1<<5; #endif // Feature scores #if defined(GGML_USE_VSX) if (!pf.has_vsx) { return 0; } score += 1<<6; #endif return score; } GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_powerpc_score) #endif // defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) ggml-org-ggml-3678254/src/ggml-cpu/arch/powerpc/quants.c000066400000000000000000002773021512524704700227540ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__POWER9_VECTOR__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) #define B8(c,s ) B7(c,s, c), B7(c,s, s) // precomputed tables for expanding 8bits to 8 bytes: static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) for (int i = 0; i < nb; i++) { vector float srcv [8]; vector float asrcv[8]; vector float amaxv[8]; vector signed int vi[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const vector float v = vec_round(vec_mul(srcv[j], vid)); vi[j] = vec_cts(v, 0); } vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) for (int i = 0; i < nb; i++) { vector float srcv [8]; vector float asrcv[8]; vector float amaxv[8]; vector signed int vi[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); y[i].d = GGML_CPU_FP32_TO_FP16(d); vector int accv = vec_splats(0); for (int j = 0; j < 8; j++) { const vector float v = vec_round(vec_mul(srcv[j], vid)); vi[j] = vec_cts(v, 0); accv = vec_add(accv, vi[j]); } vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); accv = vec_add(accv, vec_sld(accv, accv, 4)); accv = vec_add(accv, vec_sld(accv, accv, 8)); y[i].s = GGML_CPU_FP32_TO_FP16(d * vec_extract(accv, 0)); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector signed char v8 = vec_splats((signed char)0x8); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed char q4x0 = vec_and(qxs, lowMask); vector signed char q4x1 = vec_sr(qxs, v4); q4x0 = vec_sub(q4x0, v8); q4x1 = vec_sub(q4x1, v8); vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); vector signed int vsumi0 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi0 = vec_sum4s(qv1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask); vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4); vector signed int vsumi0 = v0; vsumi0 = vec_msum(q8y0, q4x0, vsumi0); vsumi0 = vec_msum(q8y1, q4x1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_MXFP4 == 0); static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); const block_mxfp4 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK_MXFP4; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector unsigned char vshift4 = vec_splats((unsigned char)4); vector float vsumf0 = vec_splats(0.0f); vector signed char kv = vec_xl(0, (const signed char *)kvalues_mxfp4); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d) * GGML_E8M0_TO_FP32_HALF(x[ib].e)); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed char qxs = (vector signed char)vec_xl(0, x[ib].qs); vector unsigned char lo_nibbles = (vector unsigned char)vec_and(qxs, lowMask); vector unsigned char hi_nibbles = (vector unsigned char)vec_sr(qxs, vshift4); vector signed char q4x0 = vec_perm(kv, kv, lo_nibbles); vector signed char q4x1 = vec_perm(kv, kv, hi_nibbles); vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); vector signed int vsumi0 = vec_splats((int32_t)0); vsumi0 = vec_sum4s(qv0, vsumi0); vsumi0 = vec_sum4s(qv1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vyd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_mxfp4_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector unsigned char v4 = vec_splats((unsigned char)4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])}; vector signed char qh0 = (vector signed char)aux64x2_0; vector signed char qh1 = (vector signed char)aux64x2_1; vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl( 16, y[ib].qs); vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); qv0 = vec_add(qv0, qv1); vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])}; vector signed char qh0 = (vector signed char)aux64x2_0; vector signed char qh1 = (vector signed char)aux64x2_1; vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0); vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl( 16, y[ib].qs); vector signed int vsumi0 = v0; vsumi0 = vec_msum(q8y0, q5x0, vsumi0); vsumi0 = vec_msum(q8y1, q5x1, vsumi0); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char q8x0 = vec_xl( 0, x[ib].qs); vector signed char q8x1 = vec_xl(16, x[ib].qs); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed short qv0 = vec_mule(q8x0, q8y0); vector signed short qv1 = vec_mulo(q8x0, q8y0); vector signed short qv2 = vec_mule(q8x1, q8y1); vector signed short qv3 = vec_mulo(q8x1, q8y1); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi1 = vec_sum4s(qv1, vsumi1); vsumi0 = vec_sum4s(qv2, vsumi0); vsumi1 = vec_sum4s(qv3, vsumi1); vsumi0 = vec_add(vsumi0, vsumi1); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); } vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0x3); const vector signed char lowScaleMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); vector signed char vscales = vec_and(q2xmins, lowScaleMask); q2xmins = vec_sr(q2xmins, v4); vector signed short q2xmins0 = vec_unpackh(q2xmins); vector signed short q2xmins1 = vec_unpackl(q2xmins); vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); vector signed char qxs1 = (vector signed char)vec_xl(16, q2); q2 += 32; vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y02 = vec_xl( 64, q8); vector signed char q8y12 = vec_xl( 80, q8); vector signed char q8y03 = vec_xl( 96, q8); vector signed char q8y13 = vec_xl(112, q8); q8 += 128; vector signed int qv0 = vec_msum(q8y00, q2x00, v0); vector signed int qv1 = vec_msum(q8y01, q2x01, v0); vector signed int qv2 = vec_msum(q8y02, q2x02, v0); vector signed int qv3 = vec_msum(q8y03, q2x03, v0); vector signed int qv4 = vec_msum(q8y10, q2x10, v0); vector signed int qv5 = vec_msum(q8y11, q2x11, v0); vector signed int qv6 = vec_msum(q8y12, q2x12, v0); vector signed int qv7 = vec_msum(q8y13, q2x13, v0); vector signed short vscales_07 = vec_unpackh(vscales); vector signed int vscales_03 = vec_unpackh(vscales_07); vector signed int vscales_47 = vec_unpackl(vscales_07); vector signed int vs0 = vec_splat(vscales_03, 0); vector signed int vs1 = vec_splat(vscales_03, 1); vector signed int vs2 = vec_splat(vscales_03, 2); vector signed int vs3 = vec_splat(vscales_03, 3); vector signed int vs4 = vec_splat(vscales_47, 0); vector signed int vs5 = vec_splat(vscales_47, 1); vector signed int vs6 = vec_splat(vscales_47, 2); vector signed int vs7 = vec_splat(vscales_47, 3); vscales = vec_sld(vscales, vscales, 8); vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0x3); const vector signed char lowMask1 = vec_splats((int8_t)0xf); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector signed char v1 = vec_splats((signed char)0x1); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector signed char off = vec_splats((signed char)0x20); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); UNUSED(kmask1); UNUSED(kmask2); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(u0, lowMask1); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); vector signed char u31 = vec_and(u3, lowMask2); u1 = vec_or(u1, u30); u2 = vec_or(vec_sr(u0, v4), u31); vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); vscales = vec_sub(vscales, off); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); vector signed char qxs1 = (vector signed char)vec_xl(16, q3); q3 += 32; //the low 2 bits vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); //the 3rd bit vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); qxhs0 = vec_sr(qxhs0, v4); qxhs1 = vec_sr(qxhs1, v4); vector signed char q3x00 = vec_sub(qxs00, qxh00); vector signed char q3x01 = vec_sub(qxs01, qxh01); vector signed char q3x02 = vec_sub(qxs02, qxh02); vector signed char q3x03 = vec_sub(qxs03, qxh03); vector signed char q3x10 = vec_sub(qxs10, qxh10); vector signed char q3x11 = vec_sub(qxs11, qxh11); vector signed char q3x12 = vec_sub(qxs12, qxh12); vector signed char q3x13 = vec_sub(qxs13, qxh13); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y02 = vec_xl( 64, q8); vector signed char q8y12 = vec_xl( 80, q8); vector signed char q8y03 = vec_xl( 96, q8); vector signed char q8y13 = vec_xl(112, q8); q8 += 128; vector signed short vscales_h = vec_unpackh(vscales); vector signed short vs0 = vec_splat(vscales_h, 0); vector signed short vs1 = vec_splat(vscales_h, 1); vector signed short vs2 = vec_splat(vscales_h, 2); vector signed short vs3 = vec_splat(vscales_h, 3); vector signed short vs4 = vec_splat(vscales_h, 4); vector signed short vs5 = vec_splat(vscales_h, 5); vector signed short vs6 = vec_splat(vscales_h, 6); vector signed short vs7 = vec_splat(vscales_h, 7); vscales = vec_sld(vscales, vscales, 8); vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); vsumi0 = vec_msum(qv00, vs0, vsumi0); vsumi1 = vec_msum(qv01, vs2, vsumi1); vsumi2 = vec_msum(qv02, vs4, vsumi2); vsumi3 = vec_msum(qv03, vs6, vsumi3); vsumi4 = vec_msum(qv10, vs1, vsumi4); vsumi5 = vec_msum(qv11, vs3, vsumi5); vsumi6 = vec_msum(qv12, vs5, vsumi6); vsumi7 = vec_msum(qv13, vs7, vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed char lowMask1 = vec_splats((int8_t)0x3f); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((uint8_t)2); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = vec_sr(u2, v4); vector signed char u30 = u1; vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); u1 = vec_and(u0, lowMask1); u2 = vec_or(u30, u31); vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); vector signed short vscales = vec_unpackh(utmps); vector signed short q4xmins = vec_unpackl(utmps); vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; j+=2) { __builtin_prefetch(q4, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); vector signed char qxs1 = (vector signed char)vec_xl(16, q4); vector signed char qxs2 = (vector signed char)vec_xl(32, q4); vector signed char qxs3 = (vector signed char)vec_xl(48, q4); q4 += 64; vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask); vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4); vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask); vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4); vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask); vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4); vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask); vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y01 = vec_xl( 32, q8); vector signed char q8y11 = vec_xl( 48, q8); vector signed char q8y20 = vec_xl( 64, q8); vector signed char q8y30 = vec_xl( 80, q8); vector signed char q8y21 = vec_xl( 96, q8); vector signed char q8y31 = vec_xl(112, q8); q8 += 128; vector signed int qv00 = vec_msum(q8y00, q4x00, v0); vector signed int qv01 = vec_msum(q8y01, q4x01, v0); vector signed int qv10 = vec_msum(q8y10, q4x10, v0); vector signed int qv11 = vec_msum(q8y11, q4x11, v0); vector signed int qv20 = vec_msum(q8y20, q4x20, v0); vector signed int qv21 = vec_msum(q8y21, q4x21, v0); vector signed int qv30 = vec_msum(q8y30, q4x30, v0); vector signed int qv31 = vec_msum(q8y31, q4x31, v0); vector signed int vscales_h = vec_unpackh(vscales); vector signed int vs0 = vec_splat(vscales_h, 0); vector signed int vs1 = vec_splat(vscales_h, 1); vector signed int vs2 = vec_splat(vscales_h, 2); vector signed int vs3 = vec_splat(vscales_h, 3); vscales = vec_sld(vscales, vscales, 8); vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1); vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2); vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3); vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1); vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2); vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed char lowMask1 = vec_splats((int8_t)0x3f); const vector signed char lowMask2 = vec_splats((int8_t)0x30); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v1 = vec_splats((unsigned char)0x1); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); vector signed char u3 = vec_sr(u2, v4); vector signed char u30 = u1; vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); u1 = vec_and(u0, lowMask1); u2 = vec_or(u30, u31); vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); vector signed short q8ysums1 = vec_xl(16, y[i].bsums); vector signed short vscales = vec_unpackh(utmps); vector signed short q5xmins = vec_unpackl(utmps); vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; ++j) { __builtin_prefetch(q5, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); vector signed char qxs1 = (vector signed char)vec_xl(16, q5); q5 += 32; vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_sr(qxs0, v4); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_sr(qxs1, v4); vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); qxhs0 = vec_sr(qxhs0, v2); qxhs1 = vec_sr(qxhs1, v2); vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00); vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01); vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10); vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl(16, q8); vector signed char q8y01 = vec_xl(32, q8); vector signed char q8y11 = vec_xl(48, q8); q8 += 64; vector signed int qv00 = vec_msum(q8y00, q5x00, v0); vector signed int qv01 = vec_msum(q8y01, q5x01, v0); vector signed int qv10 = vec_msum(q8y10, q5x10, v0); vector signed int qv11 = vec_msum(q8y11, q5x11, v0); vector signed int vscales_h = vec_unpackh(vscales); vector signed int vs0 = vec_splat(vscales_h, 0); vector signed int vs1 = vec_splat(vscales_h, 1); vscales = vec_sld(vscales, vscales, 12); vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1); vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2); vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v2 = vec_splats((unsigned char)0x2); const vector unsigned char v3 = vec_splats((unsigned char)0x3); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector unsigned char v6 = vec_splats((unsigned char)0x6); const vector signed char off = vec_splats((signed char)0x20); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; vector signed int vsumi4 = v0; vector signed int vsumi5 = v0; vector signed int vsumi6 = v0; vector signed int vsumi7 = v0; const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT qs = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/128; ++j) { __builtin_prefetch(q6, 0, 0); __builtin_prefetch(qh, 0, 0); __builtin_prefetch(q8, 0, 0); vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); vector signed char qxs1 = (vector signed char)vec_xl(16, q6); vector signed char qxs2 = (vector signed char)vec_xl(32, q6); vector signed char qxs3 = (vector signed char)vec_xl(48, q6); q6 += 64; vector signed char qxs00 = vec_and(qxs0, lowMask); vector signed char qxs01 = vec_sr(qxs0, v4); vector signed char qxs10 = vec_and(qxs1, lowMask); vector signed char qxs11 = vec_sr(qxs1, v4); vector signed char qxs20 = vec_and(qxs2, lowMask); vector signed char qxs21 = vec_sr(qxs2, v4); vector signed char qxs30 = vec_and(qxs3, lowMask); vector signed char qxs31 = vec_sr(qxs3, v4); vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); qh += 32; vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); vector signed char q8y00 = vec_xl( 0, q8); vector signed char q8y10 = vec_xl( 16, q8); vector signed char q8y20 = vec_xl( 32, q8); vector signed char q8y30 = vec_xl( 48, q8); vector signed char q8y01 = vec_xl( 64, q8); vector signed char q8y11 = vec_xl( 80, q8); vector signed char q8y21 = vec_xl( 96, q8); vector signed char q8y31 = vec_xl(112, q8); q8 += 128; vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); qs += 8; vector signed short vs0 = vec_splat(vscales, 0); vector signed short vs1 = vec_splat(vscales, 1); vector signed short vs2 = vec_splat(vscales, 2); vector signed short vs3 = vec_splat(vscales, 3); vector signed short vs4 = vec_splat(vscales, 4); vector signed short vs5 = vec_splat(vscales, 5); vector signed short vs6 = vec_splat(vscales, 6); vector signed short vs7 = vec_splat(vscales, 7); vsumi0 = vec_msum(qv00, vs0, vsumi0); vsumi1 = vec_msum(qv01, vs4, vsumi1); vsumi2 = vec_msum(qv10, vs1, vsumi2); vsumi3 = vec_msum(qv11, vs5, vsumi3); vsumi4 = vec_msum(qv20, vs2, vsumi4); vsumi5 = vec_msum(qv21, vs6, vsumi5); vsumi6 = vec_msum(qv30, vs3, vsumi6); vsumi7 = vec_msum(qv31, vs7, vsumi7); } vsumi0 = vec_add(vsumi0, vsumi4); vsumi1 = vec_add(vsumi1, vsumi5); vsumi2 = vec_add(vsumi2, vsumi6); vsumi3 = vec_add(vsumi3, vsumi7); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #if defined (__POWER9_VECTOR__) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, }; #endif void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = aux32[1] >> 28; const uint16_t ls1 = aux32[3] >> 28; vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/64; ++j) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; q2 += 8; vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); const uint16_t ls3 = (uint16_t)(sc[1] >> 4); sc += 2; vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); vsumi0 = vec_msum(qv0, vscales0, vsumi0); vsumi1 = vec_msum(qv1, vscales1, vsumi1); vsumi2 = vec_msum(qv2, vscales2, vsumi2); vsumi3 = vec_msum(qv3, vscales3, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector unsigned char mask0 = vec_xl( 0, k_mask1); const vector unsigned char mask1 = vec_xl(16, k_mask1); const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q2, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; q2 += 8; qh += 2; vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); signs += 4; vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); const uint16_t ls3 = (uint16_t)(sc[1] >> 4); sc += 2; vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); vsumi0 = vec_msum(qv0, vscales0, vsumi0); vsumi1 = vec_msum(qv1, vscales1, vsumi1); vsumi2 = vec_msum(qv2, vscales2, vsumi2); vsumi3 = vec_msum(qv3, vscales3, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.125f * vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4); const int8_t * GGML_RESTRICT q8 = y[i].qs; #pragma GCC unroll 1 for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; q3 += 16; vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); const uint16_t ls0 = (uint16_t)(signs[0] >> 28); const uint16_t ls1 = (uint16_t)(signs[1] >> 28); signs += 2; vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = 0.25f * vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; const vector int v0 = vec_splats((int32_t)0); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector unsigned char mask0 = vec_xl( 0, k_mask1); const vector unsigned char mask1 = vec_xl(16, k_mask1); const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs); const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q3, 0, 1); __builtin_prefetch(q8, 0, 1); vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; q3 += 16; qh += 2; vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); signs += 4; vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); const uint16_t ls1 = (uint16_t)(sc[0] >> 4); sc ++; vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector unsigned char v0 = vec_splats((unsigned char)0x0); const vector unsigned short vsign = vec_splats((unsigned short)0x8000); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = vec_splats((int32_t)0); vector signed int vsumi1 = vec_splats((int32_t)0); vector signed int vsumi2 = vec_splats((int32_t)0); vector signed int vsumi3 = vec_splats((int32_t)0); vector signed int vsumi8 = vec_splats((int32_t)0); const uint8_t * GGML_RESTRICT q1 = x[i].qs; const uint16_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const int16_t * GGML_RESTRICT qs = y[i].bsums; for (int j = 0; j < QK_K/32; j += 2) { __builtin_prefetch(q1, 0, 1); __builtin_prefetch(qh, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; q1 += 8; vector signed char q1x0 = (vector signed char)aux64x2_0; vector signed char q1x1 = (vector signed char)aux64x2_1; vector signed char q1x2 = (vector signed char)aux64x2_2; vector signed char q1x3 = (vector signed char)aux64x2_3; vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); vector signed short vscales = vec_sld(vscales23, vscales01, 8); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); vector signed short q8ysums = vec_xl_len(qs, 8); qs += 4; q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); qh += 2; vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector signed int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); const vector signed char values = vec_xl( 0, kvalues_iq4nl); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); vector signed char q4x0 = vec_and(qxs, lowMask); vector signed char q4x1 = vec_sr(qxs, v4); q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); vector signed char q8y0 = vec_xl( 0, y[ib].qs); vector signed char q8y1 = vec_xl(16, y[ib].qs); vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vsumi0 = vec_sum4s(qv0, vsumi0); vsumi1 = vec_sum4s(qv1, vsumi1); vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); } vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); sumf = vec_extract(vsumf0, 0); *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_iq4_nl_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__POWER9_VECTOR__) const vector signed char lowMask = vec_splats((signed char)0xF); const vector int v0 = vec_splats((int32_t)0); const vector unsigned char v4 = vec_splats((unsigned char)0x4); vector float vsumf0 = vec_splats(0.0f); vector float vsumf1 = vec_splats(0.0f); vector float vsumf2 = vec_splats(0.0f); vector float vsumf3 = vec_splats(0.0f); const vector signed char values = vec_xl( 0, kvalues_iq4nl); for (int ibl = 0; ibl < nb; ++ibl) { vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ibl].d)); vector float vyd = vec_splats(y[ibl].d); vector float vd = vec_mul(vxd, vyd); vector signed int vsumi0 = v0; vector signed int vsumi1 = v0; vector signed int vsumi2 = v0; vector signed int vsumi3 = v0; uint16_t h = x[ibl].scales_h; const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l; const int8_t * GGML_RESTRICT q8 = y[ibl].qs; for (int ib = 0; ib < QK_K/64; ib ++ ) { __builtin_prefetch(q4, 0, 1); __builtin_prefetch(q8, 0, 1); vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); vector signed char qxs1 = (vector signed char)vec_xl(16, q4); q4 += 32; vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); vector signed char q8y0 = vec_xl( 0, q8); vector signed char q8y1 = vec_xl(16, q8); vector signed char q8y2 = vec_xl(32, q8); vector signed char q8y3 = vec_xl(48, q8); q8 += 64; vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); h >>= 4; sc ++; vector signed short vscales01 = vec_splats((int16_t)ls0); vector signed short vscales23 = vec_splats((int16_t)ls1); vsumi0 = vec_msum(qv0, vscales01, vsumi0); vsumi1 = vec_msum(qv1, vscales01, vsumi1); vsumi2 = vec_msum(qv2, vscales23, vsumi2); vsumi3 = vec_msum(qv3, vscales23, vsumi3); } vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); } vsumf0 = vec_add(vsumf0, vsumf2); vsumf1 = vec_add(vsumf1, vsumf3); vsumf0 = vec_add(vsumf0, vsumf1); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); *s = vec_extract(vsumf0, 0); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/riscv/000077500000000000000000000000001512524704700207315ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/riscv/cpu-feats.cpp000066400000000000000000000014471512524704700233320ustar00rootroot00000000000000#include "ggml-backend-impl.h" #if defined(__riscv) && __riscv_xlen == 64 #include #include #include struct riscv64_features { bool has_rvv = false; riscv64_features() { struct riscv_hwprobe probe; probe.key = RISCV_HWPROBE_KEY_IMA_EXT_0; probe.value = 0; int ret = syscall(__NR_riscv_hwprobe, &probe, 1, 0, NULL, 0); if (0 == ret) { has_rvv = !!(probe.value & RISCV_HWPROBE_IMA_V); } } }; static int ggml_backend_cpu_riscv64_score() { int score = 1; riscv64_features rf; #ifdef GGML_USE_RVV if (!rf.has_rvv) { return 0; } score += 1 << 1; #endif return score; } GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_riscv64_score) #endif // __riscv && __riscv_xlen == 64 ggml-org-ggml-3678254/src/ggml-cpu/arch/riscv/quants.c000066400000000000000000002306501512524704700224160ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__riscv_v) size_t vl = QK8_0; for (int i = 0; i < nb; i++) { // load elements vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_0, vl); vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); // convert to integer vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); // store result __riscv_vse8_v_i8m2(y[i].qs , vs, vl); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__riscv_v) size_t vl = QK8_1; for (int i = 0; i < nb; i++) { // load elements vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_1, vl); vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); // convert to integer vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); // store result __riscv_vse8_v_i8m2(y[i].qs , vs, vl); // compute sum for y[i].s vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); vint16m1_t vwrs = __riscv_vwredsum_vs_i8m2_i16m1(vs, tmp2, vl); // set y[i].s int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); y[i].s = GGML_CPU_FP32_TO_FP16(sum*d); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { #if defined(__riscv_v) const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; size_t vl = qk / 2; for (; ib < nb; ++ib) { // load elements vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); // mask and store lower part of x, and then upper part vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a); vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l); // subtract offset vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl); vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl); vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; #else ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { #if defined(__riscv_v) const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; size_t vl = qk / 2; for (; ib < nb; ++ib) { // load elements vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); // mask and store lower part of x, and then upper part vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a); vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l); vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; #else ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { #if defined(__riscv_v) const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; size_t vl; size_t vlenb = __riscv_vlenb(); for (; ib < nb; ++ib) { vl = qk / 2; vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); vint8m2_t v0c; if (vlenb == 16) { v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); } else { v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); } vl = qk; vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); qh = __riscv_vmnand_mm_b4(qh, qh, vl); vint8m2_t v0f = __riscv_vsub_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; #else ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { #if defined(__riscv_v) const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; size_t vl; size_t vlenb = __riscv_vlenb(); for (; ib < nb; ++ib) { vl = qk / 2; vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); vint8m2_t v0c; if (vlenb == 16) { v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); } else { v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); } vl = qk; vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); vint8m2_t v0f = __riscv_vor_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; #else ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__riscv_v) size_t vl = qk; for (; ib < nb; ++ib) { // load elements vint8m2_t bx_0 = __riscv_vle8_v_i8m2(x[ib].qs, vl); vint8m2_t by_0 = __riscv_vle8_v_i8m2(y[ib].qs, vl); vint16m4_t vw_mul = __riscv_vwmul_vv_i16m4(bx_0, by_0, vl); vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); vint32m1_t v_sum = __riscv_vwredsum_vs_i16m4_i32m1(vw_mul, v_zero, vl); int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __riscv_xtheadvector float sumf = 0; uint8_t atmp[16]; for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); uint8_t *patmp = atmp; int vsums; int tmp; __asm__ __volatile__( "th.vsetvli zero, %[vl16], e8, m1\n\t" "th.vmv.v.x v8, zero\n\t" "th.vlb.v v1, (%[sc])\n\t" "th.vand.vi v0, v1, 0xF\n\t" "th.vsrl.vi v1, v1, 4\n\t" "th.vsb.v v0, (%[scale])\n\t" "th.vwaddu.vx v16, v1, zero\n\t" "th.vsetvli zero, %[vl16], e16, m2\n\t" "th.vlh.v v2, (%[bsums])\n\t" "th.vwmul.vv v4, v16, v2\n\t" "th.vsetvli zero, %[vl16], e32, m4\n\t" "th.vredsum.vs v8, v4, v8\n\t" "th.vmv.x.s %[vsums], v8" : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) , [vl16] "r" (16) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); sumf += dmin * vsums; int isum = 0; for (int j = 0; j < QK_K/128; ++j) { __asm__ __volatile__( "th.vsetvli zero, %[vl32], e8, m2\n\t" "th.vlb.v v0, (%[q2])\n\t" "th.vsrl.vi v2, v0, 2\n\t" "th.vsrl.vi v4, v0, 4\n\t" "th.vsrl.vi v6, v0, 6\n\t" "th.vand.vi v0, v0, 0x3\n\t" "th.vand.vi v2, v2, 0x3\n\t" "th.vand.vi v4, v4, 0x3\n\t" "th.vsetvli zero, %[vl128], e8, m8\n\t" "th.vlb.v v8, (%[q8])\n\t" "th.vsetvli zero, %[vl64], e8, m4\n\t" "th.vwmul.vv v16, v0, v8\n\t" "th.vwmul.vv v24, v4, v12\n\t" "th.vsetvli zero, %[vl16], e16, m2\n\t" "th.vmv.v.x v0, zero\n\t" "th.vwredsum.vs v10, v16, v0\n\t" "th.vwredsum.vs v9, v18, v0\n\t" "th.vwredsum.vs v8, v20, v0\n\t" "th.vwredsum.vs v7, v22, v0\n\t" "th.vwredsum.vs v11, v24, v0\n\t" "th.vwredsum.vs v12, v26, v0\n\t" "th.vwredsum.vs v13, v28, v0\n\t" "th.vwredsum.vs v14, v30, v0\n\t" "li %[tmp], 4\n\t" "th.vsetvli zero, %[tmp], e32, m1\n\t" "th.vslideup.vi v10, v9, 1\n\t" "th.vslideup.vi v8, v7, 1\n\t" "th.vslideup.vi v11, v12, 1\n\t" "th.vslideup.vi v13, v14, 1\n\t" "th.vslideup.vi v10, v8, 2\n\t" "th.vslideup.vi v11, v13, 2\n\t" "li %[tmp], 8\n\t" "th.vsetvli zero, %[tmp], e32, m2\n\t" "th.vlbu.v v12, (%[scale])\n\t" "th.vmul.vv v10, v10, v12\n\t" "th.vredsum.vs v0, v10, v0\n\t" "th.vmv.x.s %[tmp], v0\n\t" "add %[isum], %[isum], %[tmp]" : [tmp] "=&r" (tmp), [isum] "+&r" (isum) : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) , [vl16] "r" (16), [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q2 += 32; q8 += 128; patmp += 8; } sumf += dall * isum; } *s = sumf; #elif defined __riscv_v float sumf = 0; uint8_t atmp[16]; const int vector_length = __riscv_vlenb() * 8; uint8_t temp_01[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; switch (vector_length) { case 256: for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); size_t vl = 16; vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); vl = 32; vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); uint8_t is = 0; int isum = 0; for (int j = 0; j < QK_K / 128; ++j) { // load Q2 vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); // duplicate scale elements for product vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); // load Q8 vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); isum += __riscv_vmv_x_s_i32m1_i32(isum1); q2 += 32; q8 += 128; is = 8; } sumf += dall * isum; } break; case 128: for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); uint8_t *patmp = atmp; int vsums; int tmp, t1, t2, t3, t4, t5, t6, t7; __asm__ __volatile__( "vsetivli zero, 16, e8, m1\n\t" "vmv.v.x v8, zero\n\t" "lb zero, 15(%[sc])\n\t" "vle8.v v1, (%[sc])\n\t" "vle8.v v2, (%[bsums])\n\t" "addi %[tmp], %[bsums], 16\n\t" "vand.vi v0, v1, 0xF\n\t" "vsrl.vi v1, v1, 4\n\t" "vle8.v v3, (%[tmp])\n\t" "vse8.v v0, (%[scale])\n\t" "vsetivli zero, 16, e16, m2\n\t" "vzext.vf2 v0, v1\n\t" "vwmul.vv v4, v0, v2\n\t" "vsetivli zero, 16, e32, m4\n\t" "vredsum.vs v8, v4, v8\n\t" "vmv.x.s %[vsums], v8" : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); sumf += dmin * vsums; int isum = 0; for (int j = 0; j < QK_K/128; ++j) { __asm__ __volatile__( "lb zero, 31(%[q2])\n\t" "addi %[tmp], %[q2], 16\n\t" "addi %[t1], %[q8], 16\n\t" "vsetivli zero, 16, e8, m1\n\t" "vle8.v v0, (%[q2])\n\t" "vle8.v v1, (%[tmp])\n\t" "vsrl.vi v2, v0, 2\n\t" "vsrl.vi v3, v1, 2\n\t" "vsrl.vi v4, v0, 4\n\t" "addi %[tmp], %[q8], 32\n\t" "vle8.v v8, (%[q8])\n\t" "vle8.v v9, (%[t1])\n\t" "addi %[t1], %[t1], 32\n\t" "vsrl.vi v5, v1, 4\n\t" "vsrl.vi v6, v0, 6\n\t" "vsrl.vi v7, v1, 6\n\t" "vle8.v v10, (%[tmp])\n\t" "vle8.v v11, (%[t1])\n\t" "addi %[tmp], %[tmp], 32\n\t" "addi %[t1], %[t1], 32\n\t" "vand.vi v0, v0, 0x3\n\t" "vand.vi v1, v1, 0x3\n\t" "vand.vi v2, v2, 0x3\n\t" "vle8.v v12, (%[tmp])\n\t" "vle8.v v13, (%[t1])\n\t" "addi %[tmp], %[tmp], 32\n\t" "addi %[t1], %[t1], 32\n\t" "vand.vi v3, v3, 0x3\n\t" "vand.vi v4, v4, 0x3\n\t" "vand.vi v5, v5, 0x3\n\t" "vle8.v v14, (%[tmp])\n\t" "vle8.v v15, (%[t1])\n\t" "vwmul.vv v16, v0, v8\n\t" "vwmul.vv v18, v1, v9\n\t" "vwmul.vv v20, v2, v10\n\t" "vwmul.vv v22, v3, v11\n\t" "vwmul.vv v24, v4, v12\n\t" "vwmul.vv v26, v5, v13\n\t" "vwmul.vv v28, v6, v14\n\t" "vwmul.vv v30, v7, v15\n\t" "vsetivli zero, 8, e16, m1\n\t" "vmv.v.x v0, zero\n\t" "lbu %[tmp], 0(%[scale])\n\t" "vwredsum.vs v8, v16, v0\n\t" "vwredsum.vs v9, v18, v0\n\t" "lbu %[t1], 1(%[scale])\n\t" "vwredsum.vs v10, v20, v0\n\t" "vwredsum.vs v11, v22, v0\n\t" "lbu %[t2], 2(%[scale])\n\t" "vwredsum.vs v12, v24, v0\n\t" "vwredsum.vs v13, v26, v0\n\t" "lbu %[t3], 3(%[scale])\n\t" "vwredsum.vs v14, v28, v0\n\t" "vwredsum.vs v15, v30, v0\n\t" "lbu %[t4], 4(%[scale])\n\t" "vwredsum.vs v8, v17, v8\n\t" "vwredsum.vs v9, v19, v9\n\t" "lbu %[t5], 5(%[scale])\n\t" "vwredsum.vs v10, v21, v10\n\t" "vwredsum.vs v11, v23, v11\n\t" "lbu %[t6], 6(%[scale])\n\t" "vwredsum.vs v12, v25, v12\n\t" "vwredsum.vs v13, v27, v13\n\t" "lbu %[t7], 7(%[scale])\n\t" "vwredsum.vs v14, v29, v14\n\t" "vwredsum.vs v15, v31, v15\n\t" "vsetivli zero, 4, e32, m1\n\t" "vmul.vx v0, v8, %[tmp]\n\t" "vmul.vx v1, v9, %[t1]\n\t" "vmacc.vx v0, %[t2], v10\n\t" "vmacc.vx v1, %[t3], v11\n\t" "vmacc.vx v0, %[t4], v12\n\t" "vmacc.vx v1, %[t5], v13\n\t" "vmacc.vx v0, %[t6], v14\n\t" "vmacc.vx v1, %[t7], v15\n\t" "vmv.x.s %[tmp], v0\n\t" "vmv.x.s %[t1], v1\n\t" "add %[isum], %[isum], %[tmp]\n\t" "add %[isum], %[isum], %[t1]" : [tmp] "=&r" (tmp), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3) , [t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r" (t6), [t7] "=&r" (t7) , [isum] "+&r" (isum) : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q2 += 32; q8 += 128; patmp += 8; } sumf += dall * isum; } break; default: assert(false && "Unsupported vector length"); break; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __riscv_xtheadvector uint32_t utmp[4]; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * restrict q3 = x[i].qs; const uint8_t * restrict qh = x[i].hmask; const int8_t * restrict q8 = y[i].qs; int8_t * scale = (int8_t *)utmp; int tmp; __asm__ __volatile__( "li %[tmp], 12\n\t" "th.vsetvli zero, %[tmp], e8, m1\n\t" "th.vlb.v v0, (%[s6b])\n\t" "th.vmv.v.v v2, v0\n\t" "li %[tmp], 2\n\t" "th.vsetvli zero, %[tmp], e64, m1\n\t" "th.vmv.v.x v9, %[sh]\n\t"\ "th.vslidedown.vi v1, v0, 1\n\t" "th.vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} "th.vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} "li %[tmp], 4\n\t" "th.vsetvli zero, %[tmp], e32, m1\n\t" "th.vid.v v9\n\t" "th.vmv.x.s %[tmp], v1\n\t" "th.vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} "th.vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} "th.vsrl.vv v4, v1, v9\n\t" "th.vsrl.vv v2, v0, v8\n\t" "th.vand.vx v5, v4, %[kmask1]\n\t" "th.vand.vx v3, v2, %[kmask2]\n\t" "th.vsll.vi v6, v5, 4\n\t" "th.vor.vv v7, v6, v3\n\t" "li %[tmp], 16\n\t" "th.vsetvli zero, %[tmp], e8, m1\n\t" "th.vsub.vx v0, v7, %[c]\n\t" "th.vsb.v v0, (%[scale])" : [tmp] "=&r" (tmp) : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); uint8_t m = 1; int isum = 0; for (int j = 0; j < QK_K; j += 128) { __asm__ __volatile__( // fixme: use v0p7 mask layout directly "th.vsetvli zero, %[vl32], e8, m2\n\t" "th.vlb.v v8, (%[q3])\n\t" "th.vsrl.vi v10, v8, 2\n\t" "th.vsrl.vi v12, v8, 4\n\t" "th.vsrl.vi v14, v8, 6\n\t" "th.vand.vi v8, v8, 3\n\t" "th.vand.vi v10, v10, 3\n\t" "th.vand.vi v12, v12, 3\n\t" "th.vlb.v v2, (%[qh])\n\t" "th.vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "th.vmseq.vx v0, v4, zero\n\t" "th.vadd.vi v8, v8, -4, v0.t\n\t" "th.vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "th.vmseq.vx v0, v4, zero\n\t" "th.vadd.vi v10, v10, -4, v0.t\n\t" "th.vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "th.vmseq.vx v0, v4, zero\n\t" "th.vadd.vi v12, v12, -4, v0.t\n\t" "th.vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "th.vmseq.vx v0, v4, zero\n\t" "th.vadd.vi v14, v14, -4, v0.t\n\t" "th.vsetvli zero, %[vl128], e8, m8\n\t" "th.vlb.v v0, (%[q8])\n\t" "th.vsetvli zero, %[vl64], e8, m4\n\t" "th.vwmul.vv v16, v0, v8\n\t" "th.vwmul.vv v24, v4, v12\n\t" "li %[tmp], 16\n\t" "th.vsetvli zero, %[tmp], e16, m2\n\t" "th.vmv.v.x v0, zero\n\t" "th.vwredsum.vs v10, v16, v0\n\t" "th.vwredsum.vs v9, v18, v0\n\t" "th.vwredsum.vs v8, v20, v0\n\t" "th.vwredsum.vs v7, v22, v0\n\t" "th.vwredsum.vs v11, v24, v0\n\t" "th.vwredsum.vs v12, v26, v0\n\t" "th.vwredsum.vs v13, v28, v0\n\t" "th.vwredsum.vs v14, v30, v0\n\t" "li %[tmp], 4\n\t" "th.vsetvli zero, %[tmp], e32, m1\n\t" "th.vslideup.vi v10, v9, 1\n\t" "th.vslideup.vi v8, v7, 1\n\t" "th.vslideup.vi v11, v12, 1\n\t" "th.vslideup.vi v13, v14, 1\n\t" "th.vslideup.vi v10, v8, 2\n\t" "th.vslideup.vi v11, v13, 2\n\t" "li %[tmp], 8\n\t" "th.vsetvli zero, %[tmp], e32, m2\n\t" "th.vlb.v v12, (%[scale])\n\t" "th.vmul.vv v10, v10, v12\n\t" "th.vredsum.vs v0, v10, v0\n\t" "th.vmv.x.s %[tmp], v0\n\t" "add %[isum], %[isum], %[tmp]" : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q3 += 32; q8 += 128; scale += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d * isum; } *s = sumf; #elif defined __riscv_v uint32_t utmp[4]; float sumf = 0; uint32_t aux[3]; const int vector_length = __riscv_vlenb() * 8; switch (vector_length) { case 256: for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].hmask; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(aux, x[i].scales, 12); utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); int8_t * scale = (int8_t *)utmp; for (int j = 0; j < 16; ++j) scale[j] -= 32; size_t vl = 32; uint8_t m = 1; vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); int sum_t = 0; for (int j = 0; j < QK_K; j += 128) { vl = 32; // load Q3 vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl)); vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl)); vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl)); // compute mask for subtraction vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); m <<= 1; vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); m <<= 1; vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); m <<= 1; vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); m <<= 1; // load Q8 and take product with Q3 vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl); vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl); vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl); vl = 16; // retrieve lane to multiply with scale vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); q3 += 32; q8 += 128; scale += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d*sum_t; } break; case 128: for (int i = 0; i < nb; ++i) { const uint8_t * restrict q3 = x[i].qs; const uint8_t * restrict qh = x[i].hmask; const int8_t * restrict q8 = y[i].qs; int8_t * scale = (int8_t *)utmp; int tmp, t1, t2, t3, t4, t5, t6, t7; __asm__ __volatile__( "vsetivli zero, 12, e8, m1\n\t" "vle8.v v0, (%[s6b])\n\t" "vmv1r.v v2, v0\n\t" "vsetivli zero, 2, e64, m1\n\t" "vmv.v.x v9, %[sh]\n\t"\ "vslidedown.vi v1, v0, 1\n\t" "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} "vsetivli zero, 4, e32, m1\n\t" "vid.v v9\n\t" "vmv.x.s %[tmp], v1\n\t" "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} "vsrl.vv v4, v1, v9\n\t" "vsrl.vv v2, v0, v8\n\t" "vand.vx v5, v4, %[kmask1]\n\t" "vand.vx v3, v2, %[kmask2]\n\t" "vsll.vi v6, v5, 4\n\t" "vor.vv v7, v6, v3\n\t" "vsetivli zero, 16, e8, m1\n\t" "vsub.vx v0, v7, %[c]\n\t" "vse8.v v0, (%[scale])" : [tmp] "=&r" (tmp) : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); uint8_t m = 1; int isum = 0; for (int j = 0; j < QK_K; j += 128) { __asm__ __volatile__( "lb zero, 31(%[q3])\n\t" "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" "vle8.v v8, (%[q3])\n\t" "vsrl.vi v10, v8, 2\n\t" "vsrl.vi v12, v8, 4\n\t" "vsrl.vi v14, v8, 6\n\t" "lb zero, 64(%[q8])\n\t" "vand.vi v8, v8, 3\n\t" "vand.vi v10, v10, 3\n\t" "vand.vi v12, v12, 3\n\t" "vle8.v v2, (%[qh])\n\t" "lb zero, 127(%[q8])\n\t" "vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "vmseq.vx v0, v4, zero\n\t" "vadd.vi v8, v8, -4, v0.t\n\t" "lb zero, 0(%[q8])\n\t" "vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "vmseq.vx v0, v4, zero\n\t" "vadd.vi v10, v10, -4, v0.t\n\t" "vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "vmseq.vx v0, v4, zero\n\t" "vadd.vi v12, v12, -4, v0.t\n\t" "vand.vx v4, v2, %[m]\n\t" "slli %[m], %[m], 1\n\t" "vmseq.vx v0, v4, zero\n\t" "vadd.vi v14, v14, -4, v0.t\n\t" "vsetvli zero, %[vl128], e8, m8\n\t" "vle8.v v0, (%[q8])\n\t" "lb %[tmp], 0(%[scale])\n\t" "lb %[t1], 1(%[scale])\n\t" "lb %[t2], 2(%[scale])\n\t" "lb %[t3], 3(%[scale])\n\t" "vsetvli zero, %[vl64], e8, m4\n\t" "vwmul.vv v16, v0, v8\n\t" "vwmul.vv v24, v4, v12\n\t" "vsetivli zero, 16, e16, m2\n\t" "vmv.v.x v0, zero\n\t" "vwredsum.vs v8, v16, v0\n\t" "lb %[t4], 4(%[scale])\n\t" "lb %[t5], 5(%[scale])\n\t" "vwredsum.vs v9, v18, v0\n\t" "vwredsum.vs v10, v20, v0\n\t" "vwredsum.vs v11, v22, v0\n\t" "vwredsum.vs v12, v24, v0\n\t" "lb %[t6], 6(%[scale])\n\t" "lb %[t7], 7(%[scale])\n\t" "vwredsum.vs v13, v26, v0\n\t" "vwredsum.vs v14, v28, v0\n\t" "vwredsum.vs v15, v30, v0\n\t" "vsetivli zero, 4, e32, m1\n\t" "vmul.vx v0, v8, %[tmp]\n\t" "vmul.vx v1, v9, %[t1]\n\t" "vmacc.vx v0, %[t2], v10\n\t" "vmacc.vx v1, %[t3], v11\n\t" "vmacc.vx v0, %[t4], v12\n\t" "vmacc.vx v1, %[t5], v13\n\t" "vmacc.vx v0, %[t6], v14\n\t" "vmacc.vx v1, %[t7], v15\n\t" "vmv.x.s %[tmp], v0\n\t" "vmv.x.s %[t1], v1\n\t" "add %[isum], %[isum], %[tmp]\n\t" "add %[isum], %[isum], %[t1]" : [tmp] "=&r" (tmp), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3) , [t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r" (t6), [t7] "=&r" (t7) , [m] "+&r" (m), [isum] "+&r" (isum) : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q3 += 32; q8 += 128; scale += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d * isum; } break; default: assert(false && "Unsupported vector length"); break; } *s = sumf; #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __riscv_xtheadvector const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int tmp, tmp2, sumi; __asm__ __volatile__( "li %[t1], 12\n\t" "th.vsetvli zero, %[t1], e8, m1\n\t" "th.vlb.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} "li %[t1], 4\n\t" "th.vsetvli zero, %[t1], e32, m1\n\t" "th.vslidedown.vi v2, v1, 2\n\t" "th.vmv.v.v v3, v2\n\t" "th.vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} "li %[t1], 2\n\t" "th.vsetvli zero, %[t1], e32, m1\n\t" "th.vmv.v.i v4, 4\n\t" "th.vand.vx v8, v1, %[kmask1]\n\t" "th.vslide1up.vx v5, v4, zero\n\t" // {0, 4} "th.vsrl.vi v6, v1, 6\n\t" "th.vsrl.vv v7, v2, v5\n\t" "th.vand.vx v0, v6, %[kmask3]\n\t" "th.vand.vx v2, v7, %[kmask2]\n\t" "th.vsll.vi v6, v0, 4\n\t" "li %[t2], 8\n\t" "addi %[t1], %[utmp], 4\n\t" "th.vor.vv v1, v6, v2\n\t" "th.vssw.v v8, (%[utmp]), %[t2]\n\t" "th.vssw.v v1, (%[t1]), %[t2]\n\t" "th.vsetvli zero, zero, e32, m2\n\t" // vl == 8 "th.vlw.v v2, (%[bsums])\n\t" "th.vsetvli zero, %[t2], e16, m1\n\t" "th.vnsrl.vi v0, v2, 0\n\t" "th.vnsrl.vi v1, v2, 16\n\t" "th.vadd.vv v2, v0, v1\n\t" "th.vlbu.v v4, (%[mins])\n\t" "th.vwmul.vv v6, v4, v2\n\t" "th.vmv.v.x v0, zero\n\t" "th.vsetvli zero, %[t2], e32, m2\n\t" "th.vredsum.vs v0, v6, v0\n\t" "th.vmv.x.s %[sumi], v0" : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); sumf -= dmin * sumi; const uint8_t * restrict q4 = x[i].qs; const int8_t * restrict q8 = y[i].qs; sumi = 0; const uint8_t * scale = scales; for (int j = 0; j < QK_K/128; ++j) { int vl128 = 128, vl64 = 64, vl32 = 32; __asm__ __volatile__( "th.vsetvli zero, %[vl128], e8, m8\n\t" "th.vlb.v v8, (%[q8])\n\t" "th.vsetvli zero, %[vl64], e8, m4\n\t" "th.vlb.v v0, (%[q4])\n\t" "th.vsrl.vi v4, v0, 4\n\t" "th.vand.vi v0, v0, 0xF\n\t" "th.vsetvli zero, %[vl32], e8, m2\n\t" "th.vwmul.vv v28, v6, v14\n\t" "th.vwmul.vv v20, v4, v10\n\t" "th.vwmul.vv v24, v2, v12\n\t" "th.vwmul.vv v16, v0, v8\n\t" "li %[tmp], 4\n\t" "th.vsetvli zero, %[tmp], e32, m1\n\t" "th.vlbu.v v1, (%[scale])\n\t" "th.vmv.v.x v0, zero\n\t" "th.vsetvli zero, %[vl32], e16, m4\n\t" "th.vwredsum.vs v6, v24, v0\n\t" "th.vwredsum.vs v7, v28, v0\n\t" "th.vwredsum.vs v4, v16, v0\n\t" "th.vwredsum.vs v5, v20, v0\n\t" "th.vsetvli zero, %[tmp], e32, m1\n\t" "th.vslideup.vi v6, v7, 1\n\t" "th.vslideup.vi v4, v5, 1\n\t" "th.vslideup.vi v4, v6, 2\n\t" "th.vmul.vv v8, v4, v1\n\t" "th.vredsum.vs v0, v8, v0\n\t" "th.vmv.x.s %[tmp], v0\n\t" "add %[sumi], %[sumi], %[tmp]" : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q4 += 64; q8 += 128; scale += 4; } sumf += d * sumi; } *s = sumf; #elif defined __riscv_v const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; float sumf = 0; const int vector_length = __riscv_vlenb() * 8; switch (vector_length) { case 256: for (int i = 0; i < nb; ++i) { size_t vl = 8; const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; vl = 32; int32_t sum_1 = 0; int32_t sum_2 = 0; vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); for (int j = 0; j < QK_K/64; ++j) { // load Q4 vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); // load Q8 and multiply it with lower Q4 nibble vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl); vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl); sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0]; // load Q8 and multiply it with upper Q4 nibble vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl); vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl); sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1]; q4 += 32; q8 += 64; } sumf += d*(sum_1 + sum_2); } break; case 128: for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); float ftmp, ft2; const uint8_t * restrict q40; const uint8_t * restrict q41; const uint8_t * restrict q42; const uint8_t * restrict q43; const int8_t * restrict q80; const int8_t * restrict q81; const int8_t * restrict q82; const int8_t * restrict q83; int s0, s1, s2, s3; __asm__ __volatile__( "li %[s1], 8\n\t" "vsetivli zero, 4, e32, m1, ta, ma\n\t" "vle32.v v1, (%[s6b])\n\t" "vslide1down.vx v1, v1, zero\n\t" "vmv.v.x v16, zero\n\t" "vslidedown.vi v2, v1, 2\n\t" "vmv1r.v v3, v2\n\t" "vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} "vsetivli zero, 2, e32, m1, ta, ma\n\t" "vmv.v.i v4, 4\n\t" "vand.vx v8, v1, %[kmask1]\n\t" "vslide1up.vx v5, v4, zero\n\t" // {0, 4} "vsrl.vi v6, v1, 6\n\t" "vsrl.vv v7, v2, v5\n\t" "vsse32.v v8, (%[utmp]), %[s1]\n\t" "vand.vx v0, v6, %[kmask3]\n\t" "vand.vx v2, v7, %[kmask2]\n\t" "vsll.vi v6, v0, 4\n\t" "addi %[s0], %[utmp], 4\n\t" "vor.vv v1, v6, v2\n\t" "vsse32.v v1, (%[s0]), %[s1]\n\t" "vsetivli zero, 8, e16, m1, ta, ma\n\t" "vle32.v v2, (%[bsums])\n\t" "vnsrl.wi v0, v2, 0\n\t" "vnsrl.wi v1, v2, 16\n\t" "vadd.vv v2, v0, v1\n\t" "vle8.v v3, (%[mins])\n\t" "vzext.vf2 v4, v3\n\t" "vwmul.vv v6, v4, v2\n\t" "vsetivli zero, 4, e32, m1, ta, ma\n\t" "vredsum.vs v0, v6, v16\n\t" "vredsum.vs v0, v7, v0\n\t" "vfcvt.f.x.v v0, v0\n\t" "vfmv.f.s %[ftmp], v0\n\t" "vsetivli zero, 16, e8, m1, ta, ma\n\t" "vle8.v v0, (%[xs])\n\t" "fnmsub.s %[sumf], %[dmin], %[ftmp], %[sumf]\n\t" "addi %[q40], %[xs], 64\n\t" "addi %[q41], %[xs], 16\n\t" "addi %[q42], %[xs], 32\n\t" "addi %[q43], %[xs], 48\n\t" "addi %[q80], %[ys], 64\n\t" "vle8.v v1, (%[q41])\n\t" "vle8.v v2, (%[q42])\n\t" "addi %[q81], %[ys], 16\n\t" "addi %[q41], %[q41], 64\n\t" "addi %[q82], %[ys], 32\n\t" "vle8.v v3, (%[q43])\n\t" "vle8.v v8, (%[ys])\n\t" "addi %[q42], %[q42], 64\n\t" "addi %[q83], %[ys], 48\n\t" "addi %[q43], %[q43], 64\n\t" "vsrl.vi v4, v0, 4\n\t" "vle8.v v9, (%[q81])\n\t" "vle8.v v10, (%[q82])\n\t" "vand.vi v0, v0, 0xF\n\t" "addi %[q81], %[q81], 64\n\t" "vsrl.vi v5, v1, 4\n\t" "addi %[q82], %[q82], 64\n\t" "vle8.v v11, (%[q83])\n\t" "vle8.v v12, (%[q80])\n\t" "vand.vi v1, v1, 0xF\n\t" "addi %[q83], %[q83], 64\n\t" "vsrl.vi v6, v2, 4\n\t" "addi %[q80], %[q80], 64\n\t" "vle8.v v13, (%[q81])\n\t" "vle8.v v14, (%[q82])\n\t" "vand.vi v2, v2, 0xF\n\t" "addi %[q81], %[q81], 64\n\t" "vsrl.vi v7, v3, 4\n\t" "addi %[q82], %[q82], 64\n\t" "vwmul.vv v16, v0, v8\n\t" "vle8.v v15, (%[q83])\n\t" "vle8.v v0, (%[q40])\n\t" "vand.vi v3, v3, 0xF\n\t" "addi %[q83], %[q83], 64\n\t" "vwmul.vv v24, v2, v12\n\t" "vwmul.vv v20, v4, v10\n\t" "vwmul.vv v28, v6, v14\n\t" "vwmacc.vv v16, v1, v9\n\t" "vle8.v v1, (%[q41])\n\t" "vle8.v v2, (%[q42])\n\t" "vwmacc.vv v24, v3, v13\n\t" "vwmacc.vv v20, v5, v11\n\t" "vwmacc.vv v28, v7, v15\n\t" "addi %[q40], %[q80], 64\n\t" "addi %[q41], %[q81], 64\n\t" "vle8.v v3, (%[q43])\n\t" "vle8.v v8, (%[q80])\n\t" "addi %[q42], %[q82], 64\n\t" "addi %[q43], %[q83], 64\n\t" "vsrl.vi v4, v0, 4\n\t" "vle8.v v9, (%[q81])\n\t" "vle8.v v10, (%[q82])\n\t" "vand.vi v0, v0, 0xF\n\t" "vsrl.vi v5, v1, 4\n\t" "vsrl.vi v7, v3, 4\n\t" "vand.vi v3, v3, 0xF\n\t" "vle8.v v11, (%[q83])\n\t" "vle8.v v12, (%[q40])\n\t" "vand.vi v1, v1, 0xF\n\t" "vsrl.vi v6, v2, 4\n\t" "vand.vi v2, v2, 0xF\n\t" "vwmul.vv v18, v0, v8\n\t" "vle8.v v13, (%[q41])\n\t" "vle8.v v14, (%[q42])\n\t" "vwmul.vv v26, v2, v12\n\t" "vwmul.vv v22, v4, v10\n\t" "vwmul.vv v30, v6, v14\n\t" "vwmacc.vv v18, v1, v9\n\t" "vle8.v v15, (%[q43])\n\t" "vwmacc.vv v26, v3, v13\n\t" "vwmacc.vv v22, v5, v11\n\t" "vwmacc.vv v30, v7, v15\n\t" "vmv.v.x v0, zero\n\t" "vsetivli zero, 16, e16, m2, ta, ma\n\t" "vwredsum.vs v4, v16, v0\n\t" "lbu %[s0], 0(%[scale])\n\t" "vwredsum.vs v5, v20, v0\n\t" "lbu %[s1], 1(%[scale])\n\t" "vwredsum.vs v6, v24, v0\n\t" "lbu %[s2], 2(%[scale])\n\t" "vwredsum.vs v7, v28, v0\n\t" "lbu %[s3], 3(%[scale])\n\t" "vwredsum.vs v8, v18, v0\n\t" "lbu %[q40], 4(%[scale])\n\t" "vwredsum.vs v9, v22, v0\n\t" "lbu %[q41], 5(%[scale])\n\t" "vwredsum.vs v10, v26, v0\n\t" "lbu %[q42], 6(%[scale])\n\t" "vwredsum.vs v11, v30, v0\n\t" "lbu %[q43], 7(%[scale])\n\t" "vsetivli zero, 4, e32, m1, ta, ma\n\t" "vmul.vx v0, v4, %[s0]\n\t" "vmul.vx v1, v8, %[q40]\n\t" "vmacc.vx v0, %[s1], v5\n\t" "vmacc.vx v1, %[q41], v9\n\t" "vmacc.vx v0, %[s2], v6\n\t" "vmacc.vx v1, %[q42], v10\n\t" "vmacc.vx v0, %[s3], v7\n\t" "vmacc.vx v1, %[q43], v11\n\t" "vfcvt.f.x.v v0, v0\n\t" "vfcvt.f.x.v v1, v1\n\t" "vfmv.f.s %[ft2], v0\n\t" "vfmv.f.s %[ftmp], v1\n\t" "fadd.s %[ft2], %[ft2], %[ftmp]\n\t" "fmadd.s %[sumf], %[d], %[ft2], %[sumf]" : [ftmp] "=&f" (ftmp), [sumf] "+&f" (sumf), [ft2] "=&f" (ft2) , [s0] "=&r" (s0), [s1] "=&r" (s1), [s2] "=&r" (s2), [s3] "=&r" (s3) , [q40] "=&r" (q40), [q41] "=&r" (q41), [q42] "=&r" (q42), [q43] "=&r" (q43) , [q80] "=&r" (q80), [q81] "=&r" (q81), [q82] "=&r" (q82), [q83] "=&r" (q83) : [d] "f" (d), [ys] "r" (y[i].qs), [xs] "r" (x[i].qs), [scale] "r" (scales) , [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) , [s6b] "r" (&x[i]), [kmask1] "r" (kmask1), [dmin] "f" (dmin) , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } break; default: assert(false && "Unsupported vector length"); break; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(nb); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __riscv_v const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; float sumf = 0; float sums = 0.0; size_t vl; for (int i = 0; i < nb; ++i) { vl = 8; const uint8_t * GGML_RESTRICT q5 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; vint16m1_t q8sums_0 = __riscv_vlse16_v_i16m1(y[i].bsums, 4, vl); vint16m1_t q8sums_1 = __riscv_vlse16_v_i16m1(y[i].bsums+1, 4, vl); vint16m1_t q8sums = __riscv_vadd_vv_i16m1(q8sums_0, q8sums_1, vl); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; vuint8mf2_t mins8 = __riscv_vle8_v_u8mf2(mins, vl); vint16m1_t v_mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, v_mins, vl); vint32m1_t sumi = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); vl = 32; int32_t aux32 = 0; int is = 0; uint8_t m = 1; vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); vuint8m2_t vqh = __riscv_vle8_v_u8m2(hm, vl); for (int j = 0; j < QK_K/64; ++j) { // load Q5 and Q8 vuint8m2_t q5_x = __riscv_vle8_v_u8m2(q5, vl); vint8m2_t q8_y1 = __riscv_vle8_v_i8m2(q8, vl); vint8m2_t q8_y2 = __riscv_vle8_v_i8m2(q8+32, vl); // compute mask for addition vint8m2_t q5_a = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vand_vx_u8m2(q5_x, 0x0F, vl)); vuint8m2_t qh_m1 = __riscv_vand_vx_u8m2(vqh, m, vl); vbool4_t vmask_1 = __riscv_vmsne_vx_u8m2_b4(qh_m1, 0, vl); vint8m2_t q5_m1 = __riscv_vadd_vx_i8m2_mu(vmask_1, q5_a, q5_a, 16, vl); m <<= 1; vint8m2_t q5_l = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vsrl_vx_u8m2(q5_x, 0x04, vl)); vuint8m2_t qh_m2 = __riscv_vand_vx_u8m2(vqh, m, vl); vbool4_t vmask_2 = __riscv_vmsne_vx_u8m2_b4(qh_m2, 0, vl); vint8m2_t q5_m2 = __riscv_vadd_vx_i8m2_mu(vmask_2, q5_l, q5_l, 16, vl); m <<= 1; vint16m4_t v0 = __riscv_vwmul_vv_i16m4(q5_m1, q8_y1, vl); vint16m4_t v1 = __riscv_vwmul_vv_i16m4(q5_m2, q8_y2, vl); vint32m8_t vs1 = __riscv_vwmul_vx_i32m8(v0, scales[is++], vl); vint32m8_t vs2 = __riscv_vwmul_vx_i32m8(v1, scales[is++], vl); vint32m1_t vacc1 = __riscv_vredsum_vs_i32m8_i32m1(vs1, vzero, vl); vint32m1_t vacc2 = __riscv_vredsum_vs_i32m8_i32m1(vs2, vacc1, vl); aux32 += __riscv_vmv_x_s_i32m1_i32(vacc2); q5 += 32; q8 += 64; } sums += aux32 * d; } *s = sumf+sums; #else UNUSED(x); UNUSED(y); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(nb); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __riscv_xtheadvector float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; const int8_t * restrict q8 = y[i].qs; const int8_t * restrict scale = x[i].scales; int sum_t = 0; int t0; for (int j = 0; j < QK_K/128; ++j) { __asm__ __volatile__( "th.vsetvli zero, %[vl32], e8, m2\n\t" // vl == 32 "th.vlb.v v4, (%[qh])\n\t" "th.vsll.vi v0, v4, 4\n\t" "th.vsll.vi v2, v4, 2\n\t" "th.vsrl.vi v6, v4, 2\n\t" "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 "th.vlb.v v8, (%[q6])\n\t" "th.vsrl.vi v12, v8, 4\n\t" "th.vand.vi v8, v8, 0xF\n\t" "th.vsetvli zero, %[vl128], e8, m8\n\t" // vl == 128 "th.vand.vx v0, v0, %[mask]\n\t" "th.vor.vv v8, v8, v0\n\t" "th.vlb.v v0, (%[q8])\n\t" "th.vsub.vx v8, v8, %[vl32]\n\t" "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 "th.vwmul.vv v16, v0, v8\n\t" "th.vwmul.vv v24, v4, v12\n\t" "li %[t0], 16\n\t" "th.vsetvli zero, %[t0], e16, m2\n\t" // vl == 16 "th.vmv.v.x v0, zero\n\t" "th.vwredsum.vs v10, v16, v0\n\t" "th.vwredsum.vs v9, v18, v0\n\t" "th.vwredsum.vs v8, v20, v0\n\t" "th.vwredsum.vs v7, v22, v0\n\t" "th.vwredsum.vs v11, v24, v0\n\t" "th.vwredsum.vs v12, v26, v0\n\t" "th.vwredsum.vs v13, v28, v0\n\t" "th.vwredsum.vs v14, v30, v0\n\t" "li %[t0], 4\n\t" "th.vsetvli zero, %[t0], e32, m1\n\t" // vl == 4 "th.vslideup.vi v10, v9, 1\n\t" "th.vslideup.vi v8, v7, 1\n\t" "th.vslideup.vi v11, v12, 1\n\t" "th.vslideup.vi v13, v14, 1\n\t" "th.vslideup.vi v10, v8, 2\n\t" "th.vslideup.vi v11, v13, 2\n\t" "li %[t0], 8\n\t" "th.vsetvli zero, %[t0], e32, m2\n\t" // vl == 8 "th.vlb.v v4, (%[scale])\n\t" "th.vmul.vv v2, v4, v10\n\t" "th.vredsum.vs v0, v2, v0\n\t" "th.vmv.x.s %[t0], v0\n\t" "add %[sumi], %[sumi], %[t0]" : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) , [mask] "r" (0x30) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); q6 += 64; qh += 32; q8 += 128; scale += 8; } sumf += d * sum_t; } *s = sumf; #elif defined __riscv_v float sumf = 0; const int vector_length = __riscv_vlenb() * 8; switch (vector_length) { case 256: for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const int8_t * GGML_RESTRICT scale = x[i].scales; size_t vl; vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); int sum_t = 0; int is = 0; for (int j = 0; j < QK_K/128; ++j) { vl = 32; // load qh vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl); // load Q6 vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl); vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl); vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl); vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl); vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl); vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl); vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl); vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl); vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl); vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl); vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl); vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl); vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl); vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl); vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl); vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl); vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl); vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl); // load Q8 and take product vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl); vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl); vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl); vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl); vl = 16; vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl); vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl); vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl); vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl); vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl); vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl); vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl); vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl); vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl); vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl); vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl); vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl); sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); q6 += 64; qh += 32; q8 += 128; is=8; } sumf += d * sum_t; } break; case 128: for (int i = 0; i < nb; ++i) { __builtin_prefetch(&x[i + 1].d, 0, 1); const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; const int8_t * restrict q8 = y[i].qs; const int8_t * restrict scale = x[i].scales; int q6h; float ftmp; for (int j = 0; j < QK_K/128; ++j) { __asm__ __volatile__( "addi %[q6h], %[q6], 32\n\t" "ld t0, 0(%[scale])\n\t" "addi %[scale], %[scale], 8\n\t" "slli t6, t0, 1 * 8\n\t" "lb zero, 0(%[q6])\n\t" "slli t5, t0, 2 * 8\n\t" "slli t4, t0, 3 * 8\n\t" "lb zero, 0(%[q6h])\n\t" "slli t3, t0, 4 * 8\n\t" "slli t2, t0, 5 * 8\n\t" "lb zero, 0(%[qh])\n\t" "lb zero, 31(%[q6h])\n\t" "slli t1, t0, 6 * 8\n\t" "srai a7, t0, 56\n\t" "vsetvli zero, %[vl32], e8, m2\n\t" "vle8.v v8, (%[q6])\n\t" "srai t6, t6, 56\n\t" "srai t5, t5, 56\n\t" "srai t4, t4, 56\n\t" "srai t3, t3, 56\n\t" "vle8.v v10, (%[q6h])\n\t" "addi %[q6], %[q6], 64\n\t" "slli t0, t0, 7 * 8\n\t" "srai t2, t2, 56\n\t" "srai t1, t1, 56\n\t" "srai t0, t0, 56\n\t" "vle8.v v4, (%[qh])\n\t" "vsrl.vi v12, v8, 4\n\t" "vsrl.vi v14, v10, 4\n\t" "lb zero, 0(%[q8])\n\t" "vand.vi v8, v8, 0xF\n\t" "vand.vi v10, v10, 0xF\n\t" "lb zero, 32(%[q8])\n\t" "vsll.vi v0, v4, 4\n\t" "vsll.vi v2, v4, 2\n\t" "lb zero, 64(%[q8])\n\t" "vsrl.vi v6, v4, 2\n\t" "vand.vx v0, v0, %[mask]\n\t" "lb zero, 96(%[q8])\n\t" "vand.vx v2, v2, %[mask]\n\t" "vand.vx v4, v4, %[mask]\n\t" "vand.vx v6, v6, %[mask]\n\t" "vor.vv v8, v8, v0\n\t" "lb zero, 127(%[q8])\n\t" "vor.vv v10, v10, v2\n\t" "vor.vv v12, v12, v4\n\t" "vor.vv v14, v14, v6\n\t" "vsetvli zero, %[vl128], e8, m8\n\t" "vle8.v v0, (%[q8])\n\t" "vsub.vx v8, v8, %[vl32]\n\t" "vsetvli zero, %[vl64], e8, m4\n\t" "vwmul.vv v16, v0, v8\n\t" "vwmul.vv v24, v4, v12\n\t" "vsetivli zero, 16, e16, m2\n\t" "vmv.v.x v0, zero\n\t" "vwredsum.vs v10, v16, v0\n\t" "vwredsum.vs v9, v18, v0\n\t" "vwredsum.vs v8, v20, v0\n\t" "vwredsum.vs v7, v22, v0\n\t" "vwredsum.vs v11, v24, v0\n\t" "vwredsum.vs v12, v26, v0\n\t" "vwredsum.vs v13, v28, v0\n\t" "vwredsum.vs v14, v30, v0\n\t" "vsetivli zero, 4, e32, m1\n\t" "vmul.vx v0, v10, t0\n\t" "vmul.vx v1, v9, t1\n\t" "vmacc.vx v0, t2, v8\n\t" "vmacc.vx v1, t3, v7\n\t" "vmacc.vx v0, t4, v11\n\t" "vmacc.vx v1, t5, v12\n\t" "vmacc.vx v0, t6, v13\n\t" "vmacc.vx v1, a7, v14\n\t" "vadd.vv v0, v0, v1\n\t" "vfcvt.f.x.v v0, v0\n\t" "vfmv.f.s %[ftmp], v0\n\t" "fmadd.s %[sumf], %[d], %[ftmp], %[sumf]" : [q6] "+&r" (q6), [q6h] "=&r" (q6h) , [scale] "+&r" (scale) , [sumf] "+&f" (sumf), [ftmp] "=&f" (ftmp) : [qh] "r" (qh), [q8] "r" (q8) , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) , [mask] "r" (0x30), [d] "f" (d) : "memory" , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" , "t0", "t1", "t2", "t3", "t4", "t5", "t6", "a7" , "a6", "a5", "a4", "a3" ); qh += 32; q8 += 128; } } break; default: assert(false && "Unsupported vector length"); break; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/riscv/repack.cpp000066400000000000000000000527341512524704700227150ustar00rootroot00000000000000#define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "ggml-backend-impl.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "traits.h" #include #include #include #include // for qsort #include // for GGML_ASSERT #define GGML_CPU_CLANG_WORKAROUND #include "../../repack.h" #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #endif #define UNUSED GGML_UNUSED void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined __riscv_v if (__riscv_vlenb() >= QK4_0) { const size_t vl = QK4_0; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); vfloat32m1_t sumf = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); for (int l = 0; l < nb; l++) { const int64_t a0 = *(const int64_t *)&a_ptr[l].qs[0]; const int64_t a1 = *(const int64_t *)&a_ptr[l].qs[8]; const int64_t a2 = *(const int64_t *)&a_ptr[l].qs[16]; const int64_t a3 = *(const int64_t *)&a_ptr[l].qs[24]; __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment constraints const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a0, vl / 4)); const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a1, vl / 4)); const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a2, vl / 4)); const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a3, vl / 4)); const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_hi_m)); const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); // vector version needs Zvfhmin extension const float a_scale = GGML_CPU_FP16_TO_FP32(a_ptr[l].d); const float b_scales[8] = { GGML_CPU_FP16_TO_FP32(b_ptr[l].d[0]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[1]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[2]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[3]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[4]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[5]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[6]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[7]) }; const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scale, vl / 4); sumf = __riscv_vfmacc_vv_f32m1(sumf, tmp1, b_scales_vec, vl / 4); } __riscv_vse32_v_f32m1(s + x * ncols_interleaved, sumf, vl / 4); } return; } #endif ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined __riscv_v if (__riscv_vlenb() >= QK4_0) { const size_t vl = QK4_0; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); vfloat32m1_t sumf0 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); vfloat32m1_t sumf1 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); vfloat32m1_t sumf2 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); vfloat32m1_t sumf3 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); for (int l = 0; l < nb; l++) { const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); // vector version needs Zvfhmin extension const float a_scales[4] = { GGML_CPU_FP16_TO_FP32(a_ptr[l].d[0]), GGML_CPU_FP16_TO_FP32(a_ptr[l].d[1]), GGML_CPU_FP16_TO_FP32(a_ptr[l].d[2]), GGML_CPU_FP16_TO_FP32(a_ptr[l].d[3]) }; const float b_scales[8] = { GGML_CPU_FP16_TO_FP32(b_ptr[l].d[0]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[1]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[2]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[3]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[4]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[5]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[6]), GGML_CPU_FP16_TO_FP32(b_ptr[l].d[7]) }; const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); const int64_t A0 = *(const int64_t *)&a_ptr[l].qs[0]; const int64_t A4 = *(const int64_t *)&a_ptr[l].qs[32]; const int64_t A8 = *(const int64_t *)&a_ptr[l].qs[64]; const int64_t Ac = *(const int64_t *)&a_ptr[l].qs[96]; __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment vint16m4_t sumi_l0; { const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A0, vl / 4)); const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A4, vl / 4)); const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A8, vl / 4)); const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ac, vl / 4)); const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); sumi_l0 = sumi_hi_m; } { const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l0)); const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[0], vl / 4); sumf0 = __riscv_vfmacc_vv_f32m1(sumf0, tmp1, b_scales_vec, vl / 4); } const int64_t A1 = *(const int64_t *)&a_ptr[l].qs[8]; const int64_t A5 = *(const int64_t *)&a_ptr[l].qs[40]; const int64_t A9 = *(const int64_t *)&a_ptr[l].qs[72]; const int64_t Ad = *(const int64_t *)&a_ptr[l].qs[104]; __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment vint16m4_t sumi_l1; { const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A1, vl / 4)); const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A5, vl / 4)); const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A9, vl / 4)); const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ad, vl / 4)); const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); sumi_l1 = sumi_hi_m; } { const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l1)); const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[1], vl / 4); sumf1 = __riscv_vfmacc_vv_f32m1(sumf1, tmp1, b_scales_vec, vl / 4); } const int64_t A2 = *(const int64_t *)&a_ptr[l].qs[16]; const int64_t A6 = *(const int64_t *)&a_ptr[l].qs[48]; const int64_t Aa = *(const int64_t *)&a_ptr[l].qs[80]; const int64_t Ae = *(const int64_t *)&a_ptr[l].qs[112]; __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment vint16m4_t sumi_l2; { const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A2, vl / 4)); const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A6, vl / 4)); const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Aa, vl / 4)); const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ae, vl / 4)); const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); sumi_l2 = sumi_hi_m; } { const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l2)); const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[2], vl / 4); sumf2 = __riscv_vfmacc_vv_f32m1(sumf2, tmp1, b_scales_vec, vl / 4); } const int64_t A3 = *(const int64_t *)&a_ptr[l].qs[24]; const int64_t A7 = *(const int64_t *)&a_ptr[l].qs[56]; const int64_t Ab = *(const int64_t *)&a_ptr[l].qs[88]; const int64_t Af = *(const int64_t *)&a_ptr[l].qs[120]; __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment vint16m4_t sumi_l3; { const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A3, vl / 4)); const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A7, vl / 4)); const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ab, vl / 4)); const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Af, vl / 4)); const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); sumi_l3 = sumi_hi_m; } { const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l3)); const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[3], vl / 4); sumf3 = __riscv_vfmacc_vv_f32m1(sumf3, tmp1, b_scales_vec, vl / 4); } } __riscv_vse32_v_f32m1(&s[(y * 4 + 0) * bs + x * ncols_interleaved], sumf0, vl / 4); __riscv_vse32_v_f32m1(&s[(y * 4 + 1) * bs + x * ncols_interleaved], sumf1, vl / 4); __riscv_vse32_v_f32m1(&s[(y * 4 + 2) * bs + x * ncols_interleaved], sumf2, vl / 4); __riscv_vse32_v_f32m1(&s[(y * 4 + 3) * bs + x * ncols_interleaved], sumf3, vl / 4); } } return; } #endif ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } ggml-org-ggml-3678254/src/ggml-cpu/arch/s390/000077500000000000000000000000001512524704700203015ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/s390/cpu-feats.cpp000066400000000000000000000017711512524704700227020ustar00rootroot00000000000000#include "ggml-backend-impl.h" #if defined(__s390x__) #include // find hwcap bits in asm/elf.h #ifndef HWCAP_VXRS_EXT2 #define HWCAP_VXRS_EXT2 (1 << 15) #endif #ifndef HWCAP_NNPA #define HWCAP_NNPA (1 << 20) #endif struct s390x_features { bool has_vxe2 = false; bool has_nnpa = false; s390x_features() { uint32_t hwcap = getauxval(AT_HWCAP); // NOTE: use hwcap2 with DFLT for z17 and later // uint32_t hwcap2 = getauxval(AT_HWCAP2); has_vxe2 = !!(hwcap & HWCAP_VXRS_EXT2); has_nnpa = !!(hwcap & HWCAP_NNPA); } }; static int ggml_backend_cpu_s390x_score() { int score = 1; s390x_features sf; // IBM z15 / LinuxONE 3 #ifdef GGML_USE_VXE2 if (!sf.has_vxe2) { return 0; } score += 1 << 1; #endif // IBM z16 / LinuxONE 4 and z17 / LinuxONE 5 #ifdef GGML_USE_NNPA if (!sf.has_nnpa) { return 0; } score += 1 << 2; #endif return score; } GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_s390x_score) #endif // __s390x__ ggml-org-ggml-3678254/src/ggml-cpu/arch/s390/quants.c000066400000000000000000001516041512524704700217670ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__VXE__) || defined(__VXE2__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) #define B8(c,s ) B7(c,s, c), B7(c,s, s) // precomputed tables for expanding 8bits to 8 bytes: static const __attribute__((aligned(16))) uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b ) << 4 static const __attribute__((aligned(16))) uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 // permute mask for byteswapping static const uint8x16_t v_kperm = (const uint8x16_t){ 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; #endif void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__VXE__) || defined(__VXE2__) for (int i = 0; i < nb; i++) { float32x4_t srcv [8]; float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f / d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const float32x4_t v = vec_mul(srcv[j], vec_splats(id)); /* Uses non-default rounding for vec_signed or vec_round */ const int32x4_t vi = vec_signed(__builtin_s390_vfisb(v, 4, 1)); y[i].qs[4*j + 0] = vec_extract(vi, 0); y[i].qs[4*j + 1] = vec_extract(vi, 1); y[i].qs[4*j + 2] = vec_extract(vi, 2); y[i].qs[4*j + 3] = vec_extract(vi, 3); } } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__VXE__) || defined(__VXE2__) for (int i = 0; i < nb; i++) { float32x4_t srcv [8]; float32x4_t asrcv[8]; float32x4_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f / d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); int32x4_t acc = vec_splats(0); for (int j = 0; j < 8; j++) { const float32x4_t v = vec_mul(srcv[j], vec_splats(id)); /* Uses non-default rounding for vec_signed or vec_round */ const int32x4_t vi = vec_signed(__builtin_s390_vfisb(v, 4, 1)); y[i].qs[4*j + 0] = vec_extract(vi, 0); y[i].qs[4*j + 1] = vec_extract(vi, 1); y[i].qs[4*j + 2] = vec_extract(vi, 2); y[i].qs[4*j + 3] = vec_extract(vi, 3); acc = vec_add(acc, vi); } y[i].s = GGML_CPU_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__VXE__) || defined(__VXE2__) float32x4_t acc = vec_splats(0.0f); const uint8x16_t v_m = vec_splats((const uint8_t)0x0F); const int8x16_t v_s = vec_splats( (const int8_t)0x08); for (; ib < nb; ++ib) { const uint8x16_t v_x = vec_xl(0, x[ib].qs); const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); const int8x16_t v_xh = (const int8x16_t)(v_x >> 4); const int8x16_t v_xls = vec_sub(v_xl, v_s); const int8x16_t v_xhs = vec_sub(v_xh, v_s); const int8x16_t v_yl = vec_xl(0 , y[ib].qs); const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs); const int16x8_t v_xylso = vec_mulo(v_xls, v_yl); const int16x8_t v_xylse = vec_mule(v_xls, v_yl); const int16x8_t v_xyhso = vec_mulo(v_xhs, v_yh); const int16x8_t v_xyhse = vec_mule(v_xhs, v_yh); int16x8_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_); const float32x4_t v_xy = vec_float(vec_unpackh(v_xy_)); const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } sumf = vec_hsum_f32x4(acc); *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__VXE__) || defined(__VXE2__) float summs = 0; float32x4_t acc = vec_splats(0.0f); const uint8x16_t v_m = vec_splat_u8(0x0F); #pragma GCC unroll 4 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const uint8x16_t v_x = vec_xl(0, x[ib].qs); const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); const int8x16_t v_xh = (const int8x16_t)(v_x >> 4); const int8x16_t v_yl = vec_xl(0 , y[ib].qs); const int8x16_t v_yh = vec_xl(QK8_1/2, y[ib].qs); const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); const float32x4_t v_xy = vec_float(v_xy_); const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } sumf = vec_hsum_f32x4(acc) + summs; *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_MXFP4 == 0); static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); const int qk = QK_MXFP4; const int nb = n / qk; const block_mxfp4 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0.0f; #if defined(__VXE__) || defined(__VXE2__) const int8x16_t v_k = vec_xl(0, kvalues_mxfp4); const uint8x16_t v_m = vec_splats((const uint8_t)0x0F); float32x4_t v_acc = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib + 1 < nb; ib += 2) { const block_mxfp4 * GGML_RESTRICT x0 = &x[ib + 0]; const block_mxfp4 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; const uint8x16_t v_x0 = vec_xl(0, x0->qs); const uint8x16_t v_x1 = vec_xl(0, x1->qs); int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l); v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h); v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l); v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h); const int8x16_t v_y0l = vec_xl(0, y0->qs); const int8x16_t v_y0h = vec_xl(QK8_0/2, y0->qs); const int8x16_t v_y1l = vec_xl(0, y1->qs); const int8x16_t v_y1h = vec_xl(QK8_0/2, y1->qs); const int32x4_t v_xy0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0l), v_x0h, v_y0h); const int32x4_t v_xy1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y1l), v_x1h, v_y1h); const float32x4_t v_xy0f = vec_float(v_xy0); const float32x4_t v_xy1f = vec_float(v_xy1); const float32x4_t v_d0 = vec_splats(GGML_E8M0_TO_FP32_HALF(x0->e) * GGML_CPU_FP16_TO_FP32(y0->d)); const float32x4_t v_d1 = vec_splats(GGML_E8M0_TO_FP32_HALF(x1->e) * GGML_CPU_FP16_TO_FP32(y1->d)); v_acc = vec_madd(v_xy0f, v_d0, v_acc); v_acc = vec_madd(v_xy1f, v_d1, v_acc); } for (; ib < nb; ++ib) { const block_mxfp4 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const uint8x16_t v_x = vec_xl(0, x0->qs); int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl); v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh); const int8x16_t v_yl = vec_xl(0, y0->qs); const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); const float32x4_t v_xyf = vec_float(v_xy); const float32x4_t v_d = vec_splats(GGML_E8M0_TO_FP32_HALF(x0->e) * GGML_CPU_FP16_TO_FP32(y0->d)); v_acc = vec_madd(v_xyf, v_d, v_acc); } sumf = vec_hsum_f32x4(v_acc); *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_mxfp4_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0.0f; #if defined(__VXE__) || defined(__VXE2__) float32x4_t v_sum0 = vec_splats(0.0f); float32x4_t v_sum1 = vec_splats(0.0f); uint32_t qh0, qh1; uint64_t tmp0[4], tmp1[4]; const uint8x16_t v_m = vec_splats((uint8_t)0x0F); #pragma GCC unroll 4 for (; ib + 1 < nb; ib += 2) { const block_q5_0 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; memcpy(&qh0, x0->qh, sizeof(qh0)); memcpy(&qh1, x1->qh, sizeof(qh1)); tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; tmp0[3] = table_b2b_1[(qh0 >> 24) ]; tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; tmp1[3] = table_b2b_1[(qh1 >> 24) ]; int8x16_t v_qh0l = vec_xl(0, (const int8_t *)(tmp0 + 0)); int8x16_t v_qh0h = vec_xl(0, (const int8_t *)(tmp0 + 2)); int8x16_t v_qh1l = vec_xl(0, (const int8_t *)(tmp1 + 0)); int8x16_t v_qh1h = vec_xl(0, (const int8_t *)(tmp1 + 2)); // required for fixing the byteorder v_qh0l = vec_perm(v_qh0l, v_qh0l, v_kperm); v_qh0h = vec_perm(v_qh0h, v_qh0h, v_kperm); v_qh1l = vec_perm(v_qh1l, v_qh1l, v_kperm); v_qh1h = vec_perm(v_qh1h, v_qh1h, v_kperm); const uint8x16_t v_x0 = vec_xl(0, (const uint8_t *)x0->qs); const uint8x16_t v_x1 = vec_xl(0, (const uint8_t *)x1->qs); int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); const int8x16_t v_x0lf = vec_sub(v_x0l, v_qh0l); const int8x16_t v_x0hf = vec_sub(v_x0h, v_qh0h); const int8x16_t v_x1lf = vec_sub(v_x1l, v_qh1l); const int8x16_t v_x1hf = vec_sub(v_x1h, v_qh1h); const int8x16_t v_y0l = vec_xl(0, (const int8_t *)y0->qs); const int8x16_t v_y0h = vec_xl(QK8_0/2, (const int8_t *)y0->qs); const int8x16_t v_y1l = vec_xl(0, (const int8_t *)y1->qs); const int8x16_t v_y1h = vec_xl(QK8_0/2, (const int8_t *)y1->qs); const int32x4_t v_xy0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0lf, v_y0l), v_x0hf, v_y0h); const int32x4_t v_xy1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1lf, v_y1l), v_x1hf, v_y1h); const float32x4_t v_xy0f = vec_float(v_xy0); const float32x4_t v_xy1f = vec_float(v_xy1); const float32x4_t v_d0 = vec_splats(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)); const float32x4_t v_d1 = vec_splats(GGML_CPU_FP16_TO_FP32(x1->d) * GGML_CPU_FP16_TO_FP32(y1->d)); v_sum0 = vec_madd(v_xy0f, v_d0, v_sum0); v_sum1 = vec_madd(v_xy1f, v_d1, v_sum1); } sumf += vec_hsum_f32x4(v_sum0) + vec_hsum_f32x4(v_sum1); #pragma GCC unroll 4 for (; ib < nb; ++ib) { const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; uint32_t qh; memcpy(&qh, x0->qh, sizeof(qh)); uint64_t tmp[4]; tmp[0] = table_b2b_1[(qh >> 0) & 0xFF]; tmp[1] = table_b2b_1[(qh >> 8) & 0xFF]; tmp[2] = table_b2b_1[(qh >> 16) & 0xFF]; tmp[3] = table_b2b_1[(qh >> 24) ]; int8x16_t v_qhl = vec_xl(0, (const int8_t *)(tmp + 0)); int8x16_t v_qhh = vec_xl(0, (const int8_t *)(tmp + 2)); // required for fixing the byteorder v_qhl = vec_perm(v_qhl, v_qhl, v_kperm); v_qhh = vec_perm(v_qhh, v_qhh, v_kperm); const uint8x16_t v_x = vec_xl(0, (const uint8_t *)x0->qs); int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); const int8x16_t v_xlf = vec_sub(v_xl, v_qhl); const int8x16_t v_xhf = vec_sub(v_xh, v_qhh); const int8x16_t v_yl = vec_xl(0, (const int8_t *)y0->qs); const int8x16_t v_yh = vec_xl(QK8_0/2, (const int8_t *)y0->qs); const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xlf, v_yl), v_xhf, v_yh); const float32x4_t v_xyf = vec_float(v_xy); const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)); const float32x4_t v_acc = vec_madd(v_xyf, v_d, vec_splats(0.0f)); sumf += vec_hsum_f32x4(v_acc); } *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0.0f; #if defined(__VXE__) || defined(__VXE2__) float32x4_t v_sum0 = vec_splats(0.0f); float32x4_t v_sum1 = vec_splats(0.0f); float summs0 = 0.0f; float summs1 = 0.0f; uint32_t qh0; uint32_t qh1; uint64_t tmp0[4]; uint64_t tmp1[4]; const uint8x16_t v_m = vec_splats((uint8_t)0x0F); #pragma GCC unroll 4 for (; ib + 1 < nb; ib += 2) { const block_q5_1 * GGML_RESTRICT x0 = &x[ib + 0]; const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; summs0 += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); summs1 += GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); memcpy(&qh0, x0->qh, sizeof(qh0)); memcpy(&qh1, x1->qh, sizeof(qh1)); tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; tmp0[3] = table_b2b_0[(qh0 >> 24) ]; tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; tmp1[3] = table_b2b_0[(qh1 >> 24) ]; int8x16_t v_qh0l = vec_xl(0, (const int8_t *)(tmp0 + 0)); int8x16_t v_qh0h = vec_xl(0, (const int8_t *)(tmp0 + 2)); int8x16_t v_qh1l = vec_xl(0, (const int8_t *)(tmp1 + 0)); int8x16_t v_qh1h = vec_xl(0, (const int8_t *)(tmp1 + 2)); // required for fixing the byteorder v_qh0l = vec_perm(v_qh0l, v_qh0l, v_kperm); v_qh0h = vec_perm(v_qh0h, v_qh0h, v_kperm); v_qh1l = vec_perm(v_qh1l, v_qh1l, v_kperm); v_qh1h = vec_perm(v_qh1h, v_qh1h, v_kperm); const uint8x16_t v_x0 = vec_xl(0, x0->qs); const uint8x16_t v_x1 = vec_xl(0, x1->qs); const int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); const int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); const int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); const int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); const int8x16_t v_x0lf = vec_or(v_x0l, v_qh0l); const int8x16_t v_x0hf = vec_or(v_x0h, v_qh0h); const int8x16_t v_x1lf = vec_or(v_x1l, v_qh1l); const int8x16_t v_x1hf = vec_or(v_x1h, v_qh1h); const int8x16_t v_y0l = vec_xl(0 , y0->qs); const int8x16_t v_y0h = vec_xl(QK8_1/2, y0->qs); const int8x16_t v_y1l = vec_xl(0 , y1->qs); const int8x16_t v_y1h = vec_xl(QK8_1/2, y1->qs); const int32x4_t v_xy0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0lf, v_y0l), v_x0hf, v_y0h); const int32x4_t v_xy1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1lf, v_y1l), v_x1hf, v_y1h); const float32x4_t v_xy0f = vec_float(v_xy0); const float32x4_t v_xy1f = vec_float(v_xy1); const float32x4_t v_d0 = vec_splats(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)); const float32x4_t v_d1 = vec_splats(GGML_CPU_FP16_TO_FP32(x1->d) * GGML_CPU_FP16_TO_FP32(y1->d)); v_sum0 = vec_madd(v_xy0f, v_d0, v_sum0); v_sum1 = vec_madd(v_xy1f, v_d1, v_sum1); } sumf += vec_hsum_f32x4(v_sum0) + vec_hsum_f32x4(v_sum1) + summs0 + summs1; #pragma GCC unroll 4 for (; ib < nb; ++ib) { const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; float summs = GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); uint32_t qh; memcpy(&qh, x0->qh, sizeof(qh)); uint64_t tmp[4]; tmp[0] = table_b2b_0[(qh >> 0) & 0xFF]; tmp[1] = table_b2b_0[(qh >> 8) & 0xFF]; tmp[2] = table_b2b_0[(qh >> 16) & 0xFF]; tmp[3] = table_b2b_0[(qh >> 24) ]; int8x16_t v_qhl = vec_xl(0, (const int8_t *)(tmp + 0)); int8x16_t v_qhh = vec_xl(0, (const int8_t *)(tmp + 2)); // required for fixing the byteorder v_qhl = vec_perm(v_qhl, v_qhl, v_kperm); v_qhh = vec_perm(v_qhh, v_qhh, v_kperm); const uint8x16_t v_x = vec_xl(0, x0->qs); const int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); const int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); const int8x16_t v_xlf = vec_or(v_xl, v_qhl); const int8x16_t v_xhf = vec_or(v_xh, v_qhh); const int8x16_t v_yl = vec_xl(0 , y0->qs); const int8x16_t v_yh = vec_xl(QK8_1/2, y0->qs); const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xlf, v_yl), v_xhf, v_yh); const float32x4_t v_xyf = vec_float(v_xy); const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)); const float32x4_t v_acc = vec_madd(v_xyf, v_d, v_acc); sumf += vec_hsum_f32x4(v_acc) + summs; } *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__VXE__) || defined(__VXE2__) float32x4_t acc = vec_splats(0.0f); #pragma GCC unroll 8 for (; ib < nb; ++ib) { __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); const int8x16_t v_xl = vec_xl(0 , x[ib].qs); const int8x16_t v_xh = vec_xl(QK8_0/2, x[ib].qs); const int8x16_t v_yl = vec_xl(0 , y[ib].qs); const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs); const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); const float32x4_t v_xy = vec_float(v_xy_); const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } sumf = vec_hsum_f32x4(acc); *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__VXE__) || defined(__VXE2__) uint32_t aux[3]; uint32_t utmp[4]; const int32x4_t v_z = vec_splat_s32(0); const uint8x16_t v_3m = vec_splat_u8(0x03); const uint8x16_t v_0c = vec_splat_u8(1); const uint8x16_t v_1c = vec_sl(v_0c, 1); const uint8x16_t v_2c = vec_sl(v_0c, 2); const uint8x16_t v_3c = vec_sl(v_0c, 3); uint8x16_t q3h[4]; uint8x16_t q3b[2]; int8x16_t q3bytes[4]; int8x16_t q8bytes[8]; uint8x16_t qhbits[2]; float sum = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * restrict x0l = x[i].qs; const uint8_t * restrict x0h = x[i].hmask; const int8_t * restrict y0 = y[i].qs; qhbits[0] = vec_xl(0 , x0h); qhbits[1] = vec_xl(16, x0h); int32_t isum = 0; memcpy(aux, x[i].scales, 12); utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); int8_t * scale = (int8_t *)utmp; for (int j = 0; j < 16; ++j) scale[j] -= 32; for (int j = 0; j < QK_K/128; ++j) { int32x4_t isum0, isum1, isum2, isum3; q3b[0] = vec_xl(0 , x0l); q3b[1] = vec_xl(16, x0l); x0l += 32; q8bytes[0] = vec_xl(0 , y0); q8bytes[1] = vec_xl(16 , y0); q8bytes[2] = vec_xl(32 , y0); q8bytes[3] = vec_xl(48 , y0); q8bytes[4] = vec_xl(64 , y0); q8bytes[5] = vec_xl(80 , y0); q8bytes[6] = vec_xl(96 , y0); q8bytes[7] = vec_xl(112, y0); y0 += 128; q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[0]); isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[1]); isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[2]); isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[3]); isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; scale += 4; q3h[0] = vec_andc(v_2c, qhbits[0]); q3h[1] = vec_andc(v_2c, qhbits[1]); q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[4]); isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[5]); isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[6]); isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[7]); isum += vec_hsum_i32x4(isum0) * scale[0]; isum += vec_hsum_i32x4(isum1) * scale[1]; isum += vec_hsum_i32x4(isum2) * scale[2]; isum += vec_hsum_i32x4(isum3) * scale[3]; scale += 4; if (j == 0) { qhbits[0] = vec_sr(qhbits[0], 4); qhbits[1] = vec_sr(qhbits[1], 4); } } sum += d * isum; } *s = sum; #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__VXE__) || defined(__VXE2__) const uint8x16_t v_lm = vec_splat_u8(0x0F); const int32x4_t v_z = vec_splat_s32(0); uint8x16_t v_x[2]; int8x16_t v_xl[2]; int8x16_t v_y[2]; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); memcpy(utmp, x[i].scales, 12); uint32x4_t v_mins8 = { 0 }; v_mins8 = vec_insert(utmp[1] & kmask1, v_mins8, 0); v_mins8 = vec_insert(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), v_mins8, 1); utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[0] &= kmask1; const int16x8_t v_minsh = (int16x8_t)vec_unpackh((uint8x16_t)v_mins8); const int32x4_t v_minso = vec_mulo(v_ysums, v_minsh); const int32x4_t v_minse = vec_mule(v_ysums, v_minsh); const int32x4_t v_mins = v_minso + v_minse; sumf -= dmin * (v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]); const uint8_t * scales = (const uint8_t *)utmp; const uint8_t * GGML_RESTRICT x0 = x[i].qs; const int8_t * GGML_RESTRICT y0 = y[i].qs; int32_t sumi1 = 0; int32_t sumi2 = 0; for (int j = 0; j < QK_K/64; ++j) { v_x[0] = vec_xl(0 , x0); v_x[1] = vec_xl(16, x0); x0 += 32; v_y[0] = vec_xl(0 , y0); v_y[1] = vec_xl(16, y0); y0 += 32; v_xl[0] = (int8x16_t)vec_and(v_x[0], v_lm); v_xl[1] = (int8x16_t)vec_and(v_x[1], v_lm); const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); sumi1 += vec_hsum_i32x4(p1) * scales[2*j+0]; v_y[0] = vec_xl(0 , y0); v_y[1] = vec_xl(16, y0); y0 += 32; v_xl[0] = (int8x16_t)vec_sr(v_x[0], 4); v_xl[1] = (int8x16_t)vec_sr(v_x[1], 4); const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); sumi2 += vec_hsum_i32x4(p2) * scales[2*j+1]; } sumf += d * (sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined(__VXE__) || defined(__VXE2__) const uint8x16_t v_lm = vec_splat_u8(0x0F); const uint8x16_t v_1m = vec_splat_u8(0x01); const uint8x16_t v_2m = vec_splat_u8(0x02); const int32x4_t v_z = vec_splat_s32(0); const uchar8x16_t v_minsm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int8x16_t q5b[4]; uint8x16_t q5h[4]; uint8x16_t v_xl[2]; uint8x16_t v_xh[2]; int8x16_t v_y[4]; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const uint8x16_t v_mins16 = vec_xl(0, (const uint8_t *)utmp); const uint8x16_t v_mins8 = vec_perm(v_mins16, v_mins16, v_minsm); const int16x8_t v_minsh = (int16x8_t)vec_unpackh(v_mins8); const int32x4_t v_minsho = vec_mulo(v_ysums, v_minsh); const int32x4_t v_minshe = vec_mule(v_ysums, v_minsh); const int32x4_t v_mins = vec_add(v_minsho, v_minshe); const int32_t mins = vec_hsum_i32x4(v_mins); const uint8_t * scales = (const uint8_t *)utmp; const uint8_t * GGML_RESTRICT x0l = x[i].qs; const uint8_t * GGML_RESTRICT x0h = x[i].qh; const int8_t * GGML_RESTRICT y0 = y[i].qs; v_xh[0] = vec_xl(0 , x0h); v_xh[1] = vec_xl(16, x0h); int32_t sumi = 0; for (int j = 0; j < QK_K/64; ++j) { v_xl[0] = vec_xl(0 , x0l); v_xl[1] = vec_xl(16, x0l); x0l += 32; v_y[0] = vec_xl(0 , y0); v_y[1] = vec_xl(16, y0); v_y[2] = vec_xl(32, y0); v_y[3] = vec_xl(48, y0); y0 += 64; q5h[0] = vec_sl(vec_and(v_1m, v_xh[0]), 4); q5h[1] = vec_sl(vec_and(v_1m, v_xh[1]), 4); q5h[2] = vec_sl(vec_and(v_2m, v_xh[0]), 3); q5h[3] = vec_sl(vec_and(v_2m, v_xh[1]), 3); v_xh[0] = vec_sr(v_xh[0], 2); v_xh[1] = vec_sr(v_xh[1], 2); q5b[0] = (int8x16_t)vec_or(vec_and(v_xl[0], v_lm), q5h[0]); q5b[1] = (int8x16_t)vec_or(vec_and(v_xl[1], v_lm), q5h[1]); q5b[2] = (int8x16_t)vec_or(vec_sr(v_xl[0], 4), q5h[2]); q5b[3] = (int8x16_t)vec_or(vec_sr(v_xl[1], 4), q5h[3]); int32x4_t sumi0 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[0], v_y[0]), q5b[1], v_y[1]); int32x4_t sumi1 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[2], v_y[2]), q5b[3], v_y[3]); sumi += vec_hsum_i32x4(sumi0) * *scales++; sumi += vec_hsum_i32x4(sumi1) * *scales++; } sumf += d * sumi - dmin * mins; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__VXE__) || defined(__VXE2__) float sum = 0; // Lower 4-bit and upper 2-bit masks const uint8x16_t v_lm = vec_splat_u8(0x0F); const uint8x16_t v_um = vec_splat_u8(0x03); const int32x4_t v_z = vec_splat_s32(0); int8x16_t q6b[4]; uint8x16_t q6h[4]; uint8x16_t v_xl[4]; uint8x16_t v_xh[2]; int8x16_t v_y[4]; for (int i = 0; i < nb; ++i) { const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT x0l = x[i].ql; const uint8_t * GGML_RESTRICT x0h = x[i].qh; const int8_t * GGML_RESTRICT y0 = y[i].qs; const int8_t * GGML_RESTRICT scale = x[i].scales; const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); const int8x16_t v_scale = vec_xl(0, scale); const int16x8_t v_scalel = vec_unpackh(v_scale); const int16x8_t v_scaleh = vec_unpackl(v_scale); const int32x4_t v_minslo = vec_mulo(v_ysumsl, v_scalel); const int32x4_t v_minsle = vec_mule(v_ysumsl, v_scalel); const int32x4_t v_minsho = vec_mulo(v_ysumsh, v_scaleh); const int32x4_t v_minshe = vec_mule(v_ysumsh, v_scaleh); const int32x4_t v_mins = v_minslo + v_minsle + v_minsho + v_minshe; const int32_t mins = vec_hsum_i32x4(v_mins); int32_t isum = 0; for (int j = 0; j < QK_K/128; ++j) { // Load model upper 2 bits v_xh[0] = vec_xl(0 , x0h); v_xh[1] = vec_xl(16, x0h); x0h += 32; // Load model lower 4 bits v_xl[0] = vec_xl(0 , x0l); v_xl[1] = vec_xl(16, x0l); v_xl[2] = vec_xl(32, x0l); v_xl[3] = vec_xl(48, x0l); x0l += 64; // Load activation quants v_y[0] = vec_xl(0 , y0); v_y[1] = vec_xl(16, y0); v_y[2] = vec_xl(32, y0); v_y[3] = vec_xl(48, y0); y0 += 64; q6h[0] = vec_sl(vec_and(v_um, v_xh[0]), 4); q6h[1] = vec_sl(vec_and(v_um, v_xh[1]), 4); uint8x16_t shifted = vec_sr(v_xh[0], 2); q6h[2] = vec_sl(vec_and(v_um, shifted), 4); shifted = vec_sr(v_xh[1], 2); q6h[3] = vec_sl(vec_and(v_um, shifted), 4); q6b[0] = (int8x16_t)(vec_or(vec_and(v_xl[0], v_lm), q6h[0])); q6b[1] = (int8x16_t)(vec_or(vec_and(v_xl[1], v_lm), q6h[1])); q6b[2] = (int8x16_t)(vec_or(vec_and(v_xl[2], v_lm), q6h[2])); q6b[3] = (int8x16_t)(vec_or(vec_and(v_xl[3], v_lm), q6h[3])); int32x4_t summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); int32x4_t summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); int32x4_t summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); int32x4_t summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); isum += vec_hsum_i32x4(summs0) * scale[0] + vec_hsum_i32x4(summs1) * scale[1] + vec_hsum_i32x4(summs2) * scale[2] + vec_hsum_i32x4(summs3) * scale[3]; scale += 4; // Load activation quants v_y[0] = vec_xl(0 , y0); v_y[1] = vec_xl(16, y0); v_y[2] = vec_xl(32, y0); v_y[3] = vec_xl(48, y0); y0 += 64; shifted = vec_sr(v_xh[0], 4); q6h[0] = vec_sl(vec_and(v_um, shifted), 4); shifted = vec_sr(v_xh[1], 4); q6h[1] = vec_sl(vec_and(v_um, shifted), 4); shifted = vec_sr(v_xh[0], 6); q6h[2] = vec_sl(vec_and(v_um, shifted), 4); shifted = vec_sr(v_xh[1], 6); q6h[3] = vec_sl(vec_and(v_um, shifted), 4); q6b[0] = (int8x16_t)(vec_or(vec_sr(v_xl[0], 4), q6h[0])); q6b[1] = (int8x16_t)(vec_or(vec_sr(v_xl[1], 4), q6h[1])); q6b[2] = (int8x16_t)(vec_or(vec_sr(v_xl[2], 4), q6h[2])); q6b[3] = (int8x16_t)(vec_or(vec_sr(v_xl[3], 4), q6h[3])); summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); isum += vec_hsum_i32x4(summs0) * scale[0] + vec_hsum_i32x4(summs1) * scale[1] + vec_hsum_i32x4(summs2) * scale[2] + vec_hsum_i32x4(summs3) * scale[3]; scale += 4; } sum += d_all * y[i].d * (isum - 32 * mins); } *s = sum; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } // #if defined(__VXE__) || defined(__VXE2__) // static const int8_t keven_signs_q2xs[1024] = { // 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, // 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, // 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, // 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, // 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, // 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, // 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, // 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, // 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, // 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, // 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, // 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, // 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, // 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, // 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, // 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, // 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, // 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, // 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, // 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, // 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, // 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, // 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, // 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, // 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, // 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, // 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, // 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, // 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, // 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, // 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, // 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, // }; // #endif // void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { // assert(n % QK_K == 0); // assert(nrc == 1); // UNUSED(nrc); // UNUSED(bx); // UNUSED(by); // UNUSED(bs); // const block_iq2_xxs * GGML_RESTRICT x = vx; // const block_q8_K * GGML_RESTRICT y = vy; // const int nb = n / QK_K; // #if defined(__VXE__) || defined(__VXE2__) // const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; // uint32_t aux32[4]; // const uint8_t * aux8 = (const uint8_t *)aux32; // float sumf = 0; // for (int i = 0; i < nb; ++i) { // const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; // const uint16_t * GGML_RESTRICT q2 = x[i].qs; // const int8_t * GGML_RESTRICT q8 = y[i].qs; // float sumf1 = 0, sumf2 = 0; // for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { // int8x16_t q8b0 = vec_xl( 0, q8); // int8x16_t qb81 = vec_xl(16, q8); // int8x16_t q8b2 = vec_xl(32, q8); // int8x16_t q8b3 = vec_xl(48, q8); // q8 += 64; // memcpy(aux32, q2, 4 * sizeof(uint32_t)); // q2 += 8; // int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; // int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; // int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; // int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; // int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; // int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; // int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; // int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; // q2u0 = vec_mul(q2u0, q2s0); // q2u1 = vec_mul(q2u1, q2s1); // q2u2 = vec_mul(q2u2, q2s2); // q2u3 = vec_mul(q2u3, q2s3); // const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); // const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); // sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); // sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); // } // sumf += d * (sumf1 + sumf2); // } // *s = 0.25f * sumf; // #else // uint32_t aux32[2]; // const uint8_t * aux8 = (const uint8_t *)aux32; // float sumf = 0.f; // for (int i = 0; i < nb; ++i) { // const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; // const uint16_t * GGML_RESTRICT q2 = x[i].qs; // const int8_t * GGML_RESTRICT q8 = y[i].qs; // int32_t bsum = 0; // for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { // memcpy(aux32, q2, 2*sizeof(uint32_t)); // q2 += 4; // const uint32_t ls = 2*(aux32[1] >> 28) + 1; // int32_t sumi = 0; // for (int l = 0; l < 4; ++l) { // const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); // const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; // for (int j = 0; j < 8; ++j) { // sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); // } // q8 += 8; // } // bsum += sumi * ls; // } // sumf += d * bsum; // } // *s = 0.125f * sumf; // #endif // } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined(__VXE__) || defined(__VXE2__) const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); const uint8x16_t v_m = vec_splat_u8(0x0F); for (; ib < nb; ++ib) { const block_iq4_nl * GGML_RESTRICT x0 = &x[ib]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; const uint8x16_t v_x = vec_xl(0, x0->qs); int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl); v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh); const int8x16_t v_yl = vec_xl(0 , y0->qs); const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); sumf += GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d) * vec_hsum_i32x4(v_xy); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_iq4_nl_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__VXE__) || defined(__VXE2__) const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); const uint8x16_t v_m = vec_splat_u8(0x0F); float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; const int8_t * GGML_RESTRICT q8 = y[ibl].qs; uint16_t h = x[ibl].scales_h; int sumi1 = 0, sumi2 = 0; for (int ib = 0; ib < QK_K/64; ++ib) { const uint8x16_t v_x0 = vec_xl(0 , q4); const uint8x16_t v_x1 = vec_xl(QK4_NL/2, q4); q4 += 32; int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l); v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h); v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l); v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h); const int8x16_t v_y0 = vec_xl( 0, q8); const int8x16_t v_y1 = vec_xl(16, q8); const int8x16_t v_y2 = vec_xl(32, q8); const int8x16_t v_y3 = vec_xl(48, q8); q8 += 64; int32x4_t vsumi0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0), v_x0h, v_y1); int32x4_t vsumi1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y2), v_x1h, v_y3); int ls1 = ((x[ibl].scales_l[ib] & 0xF) | ((h << 4) & 0x30)) - 32; int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; h >>= 4; sumi1 += vec_hsum_i32x4(vsumi0) * ls1; sumi2 += vec_hsum_i32x4(vsumi1) * ls2; } sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/wasm/000077500000000000000000000000001512524704700205525ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/wasm/quants.c000066400000000000000000001264021512524704700222360ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED #if defined(__wasm_simd128__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) #define B8(c,s ) B7(c,s, c), B7(c,s, s) // precomputed tables for expanding 8bits to 8 bytes: static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined __wasm_simd128__ for (int i = 0; i < nb; i++) { v128_t srcv [8]; v128_t asrcv[8]; v128_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)), MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); } } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined __wasm_simd128__ for (int i = 0; i < nb; i++) { v128_t srcv [8]; v128_t asrcv[8]; v128_t amaxv[8]; for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)), MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3))); const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_CPU_FP32_TO_FP16(d); v128_t accv = wasm_i32x4_splat(0); for (int j = 0; j < 8; j++) { const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); accv = wasm_i32x4_add(accv, vi); } y[i].s = GGML_CPU_FP32_TO_FP16( d * (wasm_i32x4_extract_lane(accv, 0) + wasm_i32x4_extract_lane(accv, 1) + wasm_i32x4_extract_lane(accv, 2) + wasm_i32x4_extract_lane(accv, 3))); } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } //===================================== Q8_K ============================================== void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { #ifdef __wasm_simd128__ assert(k % QK_K == 0); const int64_t nb = k / QK_K; block_q8_K * GGML_RESTRICT yc = y; // Cast to proper type for (int i = 0; i < nb; i++) { const float * x_block = x + i * QK_K; v128_t min_vec = wasm_v128_load(x_block); v128_t max_vec = min_vec; for (int j = 4; j < QK_K; j += 4) { v128_t x_vec = wasm_v128_load(x_block + j); max_vec = wasm_f32x4_pmax(max_vec, x_vec); min_vec = wasm_f32x4_pmin(min_vec, x_vec); } max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 2, 3, 0, 1)); max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 1, 0, 3, 2)); min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 2, 3, 0, 1)); min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 1, 0, 3, 2)); float max = wasm_f32x4_extract_lane(max_vec, 0); float min = wasm_f32x4_extract_lane(min_vec, 0); float amax = -min > max ? min : max; if (amax == 0.0f) { yc[i].d = 0.0f; const v128_t zero = wasm_i8x16_splat(0); for (int j = 0; j < QK_K; j += 16) { wasm_v128_store(yc[i].qs + j, zero); } continue; } const float iscale = -127.0f / amax; const v128_t scale_vec = wasm_f32x4_splat(iscale); // Process 16 elements per iteration for (int j = 0, jb = 0; j < QK_K; j += 16, jb++) { // Load and quantize 16 floats v128_t x0 = wasm_v128_load(x_block + j); v128_t x1 = wasm_v128_load(x_block + j + 4); v128_t x2 = wasm_v128_load(x_block + j + 8); v128_t x3 = wasm_v128_load(x_block + j + 12); v128_t q0 = wasm_f32x4_nearest(wasm_f32x4_mul(x0, scale_vec)); v128_t q1 = wasm_f32x4_nearest(wasm_f32x4_mul(x1, scale_vec)); v128_t q2 = wasm_f32x4_nearest(wasm_f32x4_mul(x2, scale_vec)); v128_t q3 = wasm_f32x4_nearest(wasm_f32x4_mul(x3, scale_vec)); // Convert to i32 with saturation v128_t i0 = wasm_i32x4_trunc_sat_f32x4(q0); v128_t i1 = wasm_i32x4_trunc_sat_f32x4(q1); v128_t i2 = wasm_i32x4_trunc_sat_f32x4(q2); v128_t i3 = wasm_i32x4_trunc_sat_f32x4(q3); // Pack into 16 i8 values v128_t i8 = wasm_i8x16_narrow_i16x8( wasm_i16x8_narrow_i32x4(i0, i1), wasm_i16x8_narrow_i32x4(i2, i3) ); wasm_v128_store(yc[i].qs + j, i8); // Calculate bsums using SIMD v128_t sum16 = wasm_i16x8_add( wasm_i16x8_extend_low_i8x16(i8), wasm_i16x8_extend_high_i8x16(i8) ); v128_t sum32 = wasm_i32x4_add( wasm_i32x4_extend_low_i16x8(sum16), wasm_i32x4_extend_high_i16x8(sum16) ); sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 2, 3, 0, 1)); sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 1, 0, 3, 2)); yc[i].bsums[jb] = wasm_i32x4_extract_lane(sum32, 0); } yc[i].d = 1.0f / iscale; } #else quantize_row_q8_K_ref(x, y, k); #endif } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined __wasm_simd128__ v128_t sumv = wasm_f32x4_splat(0.0f); const v128_t m4b = wasm_i8x16_splat(0x0F); const v128_t s8b = wasm_i8x16_splat(0x8); for (; ib + 1 < nb; ib += 2) { const block_q4_0 * GGML_RESTRICT x0 = &x[ib]; const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; // Load and process x0 v128_t v0_0 = wasm_v128_load(x0->qs); v128_t v0_0l = wasm_v128_and(v0_0, m4b); v128_t v0_0h = wasm_u8x16_shr(v0_0, 4); v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b); v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b); // Load y0 vectors v128_t y0_l = wasm_v128_load(y0->qs); v128_t y0_h = wasm_v128_load(y0->qs + 16); // Extend to i16x8 and compute dot products v128_t dx0l = wasm_i16x8_extend_low_i8x16(v0_0ls); v128_t dx0h = wasm_i16x8_extend_high_i8x16(v0_0ls); v128_t dx0hl = wasm_i16x8_extend_low_i8x16(v0_0hs); v128_t dx0hh = wasm_i16x8_extend_high_i8x16(v0_0hs); v128_t dy0ll = wasm_i16x8_extend_low_i8x16(y0_l); v128_t dy0lh = wasm_i16x8_extend_high_i8x16(y0_l); v128_t dy0hl = wasm_i16x8_extend_low_i8x16(y0_h); v128_t dy0hh = wasm_i16x8_extend_high_i8x16(y0_h); v128_t dp0 = wasm_i32x4_add( wasm_i32x4_add( wasm_i32x4_dot_i16x8(dx0l, dy0ll), wasm_i32x4_dot_i16x8(dx0h, dy0lh) ), wasm_i32x4_add( wasm_i32x4_dot_i16x8(dx0hl, dy0hl), wasm_i32x4_dot_i16x8(dx0hh, dy0hh) ) ); // Load and process x1 v128_t v0_1 = wasm_v128_load(x1->qs); v128_t v0_1l = wasm_v128_and(v0_1, m4b); v128_t v0_1h = wasm_u8x16_shr(v0_1, 4); v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b); v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b); // Load y1 vectors v128_t y1_l = wasm_v128_load(y1->qs); v128_t y1_h = wasm_v128_load(y1->qs + 16); // Extend to i16x8 and compute dot products v128_t dx1l = wasm_i16x8_extend_low_i8x16(v0_1ls); v128_t dx1h = wasm_i16x8_extend_high_i8x16(v0_1ls); v128_t dx1hl = wasm_i16x8_extend_low_i8x16(v0_1hs); v128_t dx1hh = wasm_i16x8_extend_high_i8x16(v0_1hs); v128_t dy1ll = wasm_i16x8_extend_low_i8x16(y1_l); v128_t dy1lh = wasm_i16x8_extend_high_i8x16(y1_l); v128_t dy1hl = wasm_i16x8_extend_low_i8x16(y1_h); v128_t dy1hh = wasm_i16x8_extend_high_i8x16(y1_h); v128_t dp1 = wasm_i32x4_add( wasm_i32x4_add( wasm_i32x4_dot_i16x8(dx1l, dy1ll), wasm_i32x4_dot_i16x8(dx1h, dy1lh) ), wasm_i32x4_add( wasm_i32x4_dot_i16x8(dx1hl, dy1hl), wasm_i32x4_dot_i16x8(dx1hh, dy1hh) ) ); // Accumulate results with scaling float scale0 = GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d); float scale1 = GGML_CPU_FP16_TO_FP32(x1->d) * GGML_CPU_FP16_TO_FP32(y1->d); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0))); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined __wasm_simd128__ v128_t sumv = wasm_f32x4_splat(0.0f); uint32_t qh_; uint64_t tmp[4]; // TODO: check if unrolling this is better for (; ib < nb; ++ib) { const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; const v128_t m4b = wasm_i8x16_splat(0x0F); // extract the 5th bit memcpy(&qh_, x0->qh, sizeof(qh_)); tmp[0] = table_b2b_1[(qh_ >> 0) & 0xFF]; tmp[1] = table_b2b_1[(qh_ >> 8) & 0xFF]; tmp[2] = table_b2b_1[(qh_ >> 16) & 0xFF]; tmp[3] = table_b2b_1[(qh_ >> 24) ]; const v128_t qhl = wasm_v128_load(tmp + 0); const v128_t qhh = wasm_v128_load(tmp + 2); const v128_t v0 = wasm_v128_load(x0->qs); // 4-bit -> 8-bit const v128_t v0l = wasm_v128_and (v0, m4b); const v128_t v0h = wasm_u8x16_shr(v0, 4); // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); // load y const v128_t v1l = wasm_v128_load(y0->qs); const v128_t v1h = wasm_v128_load(y0->qs + 16); // int8x16 -> int16x8 const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); // dot product sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( wasm_i32x4_add( wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined __wasm_simd128__ v128_t sumv = wasm_f32x4_splat(0.0f); float summs = 0.0f; uint32_t qh_; uint64_t tmp[4]; // TODO: check if unrolling this is better for (; ib < nb; ++ib) { const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); const v128_t m4b = wasm_i8x16_splat(0x0F); // extract the 5th bit memcpy(&qh_, x0->qh, sizeof(qh_)); tmp[0] = table_b2b_0[(qh_ >> 0) & 0xFF]; tmp[1] = table_b2b_0[(qh_ >> 8) & 0xFF]; tmp[2] = table_b2b_0[(qh_ >> 16) & 0xFF]; tmp[3] = table_b2b_0[(qh_ >> 24) ]; const v128_t qhl = wasm_v128_load(tmp + 0); const v128_t qhh = wasm_v128_load(tmp + 2); const v128_t v0 = wasm_v128_load(x0->qs); // 4-bit -> 8-bit const v128_t v0l = wasm_v128_and (v0, m4b); const v128_t v0h = wasm_u8x16_shr(v0, 4); // add high bit const v128_t v0lf = wasm_v128_or(v0l, qhl); const v128_t v0hf = wasm_v128_or(v0h, qhh); // load y const v128_t v1l = wasm_v128_load(y0->qs); const v128_t v1h = wasm_v128_load(y0->qs + 16); // int8x16 -> int16x8 const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); // dot product sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; *s = sumf; #else UNUSED(nb); UNUSED(ib); UNUSED(sumf); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined __wasm_simd128__ v128_t sumv = wasm_f32x4_splat(0.0f); for (; ib < nb; ++ib) { const block_q8_0 * GGML_RESTRICT x0 = &x[ib]; const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; const v128_t x0_0 = wasm_v128_load(x0->qs); const v128_t x0_1 = wasm_v128_load(x0->qs + 16); const v128_t y0_0 = wasm_v128_load(y0->qs); const v128_t y0_1 = wasm_v128_load(y0->qs + 16); // Extend 8-bit to 16-bit const v128_t x0_0l = wasm_i16x8_extend_low_i8x16(x0_0); const v128_t x0_0h = wasm_i16x8_extend_high_i8x16(x0_0); const v128_t x0_1l = wasm_i16x8_extend_low_i8x16(x0_1); const v128_t x0_1h = wasm_i16x8_extend_high_i8x16(x0_1); const v128_t y0_0l = wasm_i16x8_extend_low_i8x16(y0_0); const v128_t y0_0h = wasm_i16x8_extend_high_i8x16(y0_0); const v128_t y0_1l = wasm_i16x8_extend_low_i8x16(y0_1); const v128_t y0_1h = wasm_i16x8_extend_high_i8x16(y0_1); // Compute dot products const v128_t dx0_0 = wasm_i32x4_dot_i16x8(x0_0l, y0_0l); const v128_t dx0_1 = wasm_i32x4_dot_i16x8(x0_0h, y0_0h); const v128_t dx1_0 = wasm_i32x4_dot_i16x8(x0_1l, y0_1l); const v128_t dx1_1 = wasm_i32x4_dot_i16x8(x0_1h, y0_1h); // Sum all dot products const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1)); // Convert to float and accumulate const float scale = GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); *s = sumf; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); UNUSED(sumf); ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __wasm_simd128__ float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; // Vectorized summs calculation v128_t summs_vec = wasm_i32x4_splat(0); { v128_t sc_vec = wasm_v128_load(sc); v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); summs_vec = wasm_i32x4_add( wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), wasm_i32x4_dot_i16x8(sc_high, bsums2)), summs_vec ); summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); } int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); // Vectorized isum calculation int32_t isum = 0; const uint8_t * sc_ptr = sc; const int k_iters = QK_K/128; for (int k = 0; k < k_iters; ++k) { v128_t isum_vec = wasm_i32x4_splat(0); int shift = 0; for (int j = 0; j < 4; ++j) { const int d0 = (sc_ptr[0] & 0xF); const int d1 = (sc_ptr[1] & 0xF); sc_ptr += 2; // Process first 16 elements v128_t q2_0 = wasm_v128_load(q2); v128_t q8_0 = wasm_v128_load(q8); v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); // Process next 16 elements v128_t q2_1 = wasm_v128_load(q2 + 16); v128_t q8_1 = wasm_v128_load(q8 + 16); v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); // Calculate dot products v128_t p0 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q8_0), wasm_i16x8_extend_low_i8x16(q2_bits_0) ); v128_t p1 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q8_0), wasm_i16x8_extend_high_i8x16(q2_bits_0) ); v128_t p2 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q8_1), wasm_i16x8_extend_low_i8x16(q2_bits_1) ); v128_t p3 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q8_1), wasm_i16x8_extend_high_i8x16(q2_bits_1) ); // Accumulate scaled results v128_t scaled = wasm_i32x4_add( wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1)) ); isum_vec = wasm_i32x4_add(isum_vec, scaled); q8 += 32; shift += 2; } q2 += 32; // Horizontal sum of isum_vec isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); isum += wasm_i32x4_extract_lane(isum_vec, 0); } const float dall = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf += dall * isum - dmin * summs; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __wasm_simd128__ int8_t aux8[QK_K]; float sums[8] = {0}; uint32_t auxs[4]; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].hmask; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Process blocks with SIMD int8_t * a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K; j += 128) { for (int shift = 0; shift <= 6; shift += 2) { v128_t v_m = wasm_i8x16_splat(m); for (int l = 0; l < 32; l += 16) { v128_t v_q3 = wasm_v128_load(q3 + l); v128_t v_shift = wasm_i8x16_shr(v_q3, shift); v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); v128_t v_hm = wasm_v128_load(hm + l); v128_t v_mask = wasm_v128_and(v_hm, v_m); v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); wasm_v128_store(a + l, v_low2); } a += 32; m <<= 1; } q3 += 32; } // Extract scales memcpy(auxs, x[i].scales, 12); uint32_t tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); const int8_t * scales = (const int8_t *)auxs; // SIMD dot product with register accumulators v128_t v_acc0 = wasm_i32x4_splat(0); v128_t v_acc1 = wasm_i32x4_splat(0); a = aux8; for (int j = 0; j < QK_K/16; ++j) { const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); // Process 16 elements per iteration for (int k = 0; k < 2; ++k) { const v128_t v_q8 = wasm_i16x8_load8x8(q8); const v128_t v_a = wasm_i16x8_load8x8(a); v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); v_prod = wasm_i16x8_mul(v_prod, v_scale); v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); q8 += 8; a += 8; } } // Accumulate results const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const v128_t v_d = wasm_f32x4_splat(d); v128_t v_sum = wasm_f32x4_add( wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d) ); // Accumulate into sums vector wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); } // Horizontal sum v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); sumf = wasm_f32x4_extract_lane(v_sum, 0) + wasm_f32x4_extract_lane(v_sum, 1) + wasm_f32x4_extract_lane(v_sum, 2) + wasm_f32x4_extract_lane(v_sum, 3); *s = sumf; #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __wasm_simd128__ const uint8_t * scales = (const uint8_t*)&utmp[0]; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); // Corrected sign const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Process scales and mins memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; // Sum mins * q8sums int32_t sumi = 0; const int16_t * GGML_RESTRICT q8sums = y[i].bsums; const uint8_t * m = (const uint8_t *)&utmp[2]; for (int j = 0; j < 16; j += 2) { sumi += (q8sums[j] + q8sums[j+1]) * m[j/2]; } sumf -= dmin * sumi; int32_t sumi1 = 0; int32_t sumi2 = 0; for (int j = 0; j < QK_K/64; ++j) { // Load 64 4-bit weights (32 bytes) const v128_t q4x0 = wasm_v128_load(q4); const v128_t q4x1 = wasm_v128_load(q4 + 16); q4 += 32; // Split into low/high nibbles const v128_t q4l0 = wasm_v128_and(q4x0, wasm_i8x16_splat(0x0F)); const v128_t q4h0 = wasm_u8x16_shr(q4x0, 4); const v128_t q4l1 = wasm_v128_and(q4x1, wasm_i8x16_splat(0x0F)); const v128_t q4h1 = wasm_u8x16_shr(q4x1, 4); // Load 64 8-bit values (64 bytes) const v128_t q8x0 = wasm_v128_load(q8); const v128_t q8x1 = wasm_v128_load(q8 + 16); const v128_t q8x2 = wasm_v128_load(q8 + 32); const v128_t q8x3 = wasm_v128_load(q8 + 48); q8 += 64; // Low nibble products v128_t vacc1 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q4l0), wasm_i16x8_extend_low_i8x16(q8x0) ); vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q4l0), wasm_i16x8_extend_high_i8x16(q8x0) )); vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q4l1), wasm_i16x8_extend_low_i8x16(q8x1) )); vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q4l1), wasm_i16x8_extend_high_i8x16(q8x1) )); // High nibble products v128_t vacc2 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q4h0), wasm_i16x8_extend_low_i8x16(q8x2) ); vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q4h0), wasm_i16x8_extend_high_i8x16(q8x2) )); vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q4h1), wasm_i16x8_extend_low_i8x16(q8x3) )); vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q4h1), wasm_i16x8_extend_high_i8x16(q8x3) )); // Accumulate scaled results int32_t vacc1_sum = wasm_i32x4_extract_lane(vacc1, 0) + wasm_i32x4_extract_lane(vacc1, 1) + wasm_i32x4_extract_lane(vacc1, 2) + wasm_i32x4_extract_lane(vacc1, 3); sumi1 += vacc1_sum * scales[2*j]; int32_t vacc2_sum = wasm_i32x4_extract_lane(vacc2, 0) + wasm_i32x4_extract_lane(vacc2, 1) + wasm_i32x4_extract_lane(vacc2, 2) + wasm_i32x4_extract_lane(vacc2, 3); sumi2 += vacc2_sum * scales[2*j+1]; } sumf += d * (sumi1 + sumi2); } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __wasm_simd128__ //const uint8_t * scales = (const uint8_t*)&utmp[0]; float sumf = 0; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); // Fixed sign const uint8_t * GGML_RESTRICT q5 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Process scales and mins memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; // Sum mins * q8sums int32_t sumi_mins = 0; const int16_t * GGML_RESTRICT q8sums = y[i].bsums; const uint8_t * m = (const uint8_t *)&utmp[2]; for (int j = 0; j < 16; j += 2) { sumi_mins += (q8sums[j] + q8sums[j+1]) * m[j/2]; } sumf -= dmin * sumi_mins; // Correct subtraction v128_t qh0 = wasm_v128_load(qh); v128_t qh1 = wasm_v128_load(qh + 16); const uint8_t * sc = (const uint8_t *)utmp; int32_t sumi = 0; for (int j = 0; j < QK_K/64; ++j) { const int shift = j * 2; v128_t qh_shift0 = wasm_u8x16_shr(qh0, shift); v128_t qh_shift1 = wasm_u8x16_shr(qh1, shift); v128_t qh_low0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x01)), 4); v128_t qh_high0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x02)), 3); v128_t qh_low1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x01)), 4); v128_t qh_high1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x02)), 3); v128_t q5_0 = wasm_v128_load(q5); v128_t q5_1 = wasm_v128_load(q5 + 16); q5 += 32; v128_t q5l_0 = wasm_v128_or(wasm_v128_and(q5_0, wasm_i8x16_splat(0x0F)), qh_low0); v128_t q5h_0 = wasm_v128_or(wasm_u8x16_shr(q5_0, 4), qh_high0); v128_t q5l_1 = wasm_v128_or(wasm_v128_and(q5_1, wasm_i8x16_splat(0x0F)), qh_low1); v128_t q5h_1 = wasm_v128_or(wasm_u8x16_shr(q5_1, 4), qh_high1); v128_t q8_0 = wasm_v128_load(q8); v128_t q8_1 = wasm_v128_load(q8 + 16); v128_t q8_2 = wasm_v128_load(q8 + 32); v128_t q8_3 = wasm_v128_load(q8 + 48); q8 += 64; // Process low quants v128_t pl0 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q5l_0), wasm_i16x8_extend_low_i8x16(q8_0) ); pl0 = wasm_i32x4_add(pl0, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q5l_0), wasm_i16x8_extend_high_i8x16(q8_0) )); v128_t pl1 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q5l_1), wasm_i16x8_extend_low_i8x16(q8_1) ); pl1 = wasm_i32x4_add(pl1, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q5l_1), wasm_i16x8_extend_high_i8x16(q8_1) )); v128_t sum_low = wasm_i32x4_add(pl0, pl1); // Process high quants v128_t ph0 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q5h_0), wasm_i16x8_extend_low_i8x16(q8_2) ); ph0 = wasm_i32x4_add(ph0, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q5h_0), wasm_i16x8_extend_high_i8x16(q8_2) )); v128_t ph1 = wasm_i32x4_dot_i16x8( wasm_i16x8_extend_low_i8x16(q5h_1), wasm_i16x8_extend_low_i8x16(q8_3) ); ph1 = wasm_i32x4_add(ph1, wasm_i32x4_dot_i16x8( wasm_i16x8_extend_high_i8x16(q5h_1), wasm_i16x8_extend_high_i8x16(q8_3) )); v128_t sum_high = wasm_i32x4_add(ph0, ph1); // Accumulate with scale factors int32_t sl = wasm_i32x4_extract_lane(sum_low, 0) + wasm_i32x4_extract_lane(sum_low, 1) + wasm_i32x4_extract_lane(sum_low, 2) + wasm_i32x4_extract_lane(sum_low, 3); int32_t sh = wasm_i32x4_extract_lane(sum_high, 0) + wasm_i32x4_extract_lane(sum_high, 1) + wasm_i32x4_extract_lane(sum_high, 2) + wasm_i32x4_extract_lane(sum_high, 3); sumi += sl * sc[2*j] + sh * sc[2*j+1]; } sumf += d * sumi; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __wasm_simd128__ int8_t aux8[QK_K] __attribute__((aligned(16))); int32_t aux32[8] __attribute__((aligned(16))) = {0}; float sums[8] __attribute__((aligned(16))) = {0}; for (int i = 0; i < nb; ++i) { // Unpack 6-bit quantized data into aux8 (unchanged) const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; int8_t * a = aux8; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; } a += 128; q4 += 64; qh += 32; } const int8_t * GGML_RESTRICT a_ptr = aux8; const int8_t * GGML_RESTRICT q8 = y[i].qs; v128_t acc0 = wasm_i32x4_splat(0); v128_t acc1 = wasm_i32x4_splat(0); for (int j = 0; j < QK_K/16; ++j) { const int scale = x[i].scales[j]; const v128_t vscale = wasm_i32x4_splat(scale); // Load 16 elements from a and q8 const v128_t a_vec = wasm_v128_load(a_ptr); const v128_t q8_vec = wasm_v128_load(q8); // Process low 8 elements v128_t a_low = wasm_i16x8_extend_low_i8x16(a_vec); v128_t q8_low = wasm_i16x8_extend_low_i8x16(q8_vec); v128_t prod_low = wasm_i16x8_mul(a_low, q8_low); v128_t prod_lo_lo = wasm_i32x4_extend_low_i16x8(prod_low); v128_t prod_lo_hi = wasm_i32x4_extend_high_i16x8(prod_low); // Process high 8 elements v128_t a_high = wasm_i16x8_extend_high_i8x16(a_vec); v128_t q8_high = wasm_i16x8_extend_high_i8x16(q8_vec); v128_t prod_high = wasm_i16x8_mul(a_high, q8_high); v128_t prod_hi_lo = wasm_i32x4_extend_low_i16x8(prod_high); v128_t prod_hi_hi = wasm_i32x4_extend_high_i16x8(prod_high); // Scale and accumulate prod_lo_lo = wasm_i32x4_mul(prod_lo_lo, vscale); prod_lo_hi = wasm_i32x4_mul(prod_lo_hi, vscale); prod_hi_lo = wasm_i32x4_mul(prod_hi_lo, vscale); prod_hi_hi = wasm_i32x4_mul(prod_hi_hi, vscale); acc0 = wasm_i32x4_add(acc0, wasm_i32x4_add(prod_lo_lo, prod_hi_lo)); acc1 = wasm_i32x4_add(acc1, wasm_i32x4_add(prod_lo_hi, prod_hi_hi)); a_ptr += 16; q8 += 16; } // Store accumulated results wasm_v128_store(&aux32[0], acc0); wasm_v128_store(&aux32[4], acc1); const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) { sums[l] += d * aux32[l]; } } // Sum final results float sumf = 0; for (int l = 0; l < 8; ++l) { sumf += sums[l]; } *s = sumf; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/x86/000077500000000000000000000000001512524704700202305ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/arch/x86/cpu-feats.cpp000066400000000000000000000247471512524704700226410ustar00rootroot00000000000000#include "ggml-backend-impl.h" #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) #ifdef _MSC_VER #include #endif #include #include #include #include #include // ref: https://cdrdv2-public.intel.com/782156/325383-sdm-vol-2abcd.pdf struct cpuid_x86 { bool SSE3(void) { return f_1_ecx[0]; } bool PCLMULQDQ(void) { return f_1_ecx[1]; } bool MONITOR(void) { return f_1_ecx[3]; } bool SSSE3(void) { return f_1_ecx[9]; } bool FMA(void) { return f_1_ecx[12]; } bool CMPXCHG16B(void) { return f_1_ecx[13]; } bool SSE41(void) { return f_1_ecx[19]; } bool SSE42(void) { return f_1_ecx[20]; } bool MOVBE(void) { return f_1_ecx[22]; } bool POPCNT(void) { return f_1_ecx[23]; } bool AES(void) { return f_1_ecx[25]; } bool XSAVE(void) { return f_1_ecx[26]; } bool OSXSAVE(void) { return f_1_ecx[27]; } bool AVX(void) { return f_1_ecx[28]; } bool F16C(void) { return f_1_ecx[29]; } bool RDRAND(void) { return f_1_ecx[30]; } bool MSR(void) { return f_1_edx[5]; } bool CX8(void) { return f_1_edx[8]; } bool SEP(void) { return f_1_edx[11]; } bool CMOV(void) { return f_1_edx[15]; } bool CLFSH(void) { return f_1_edx[19]; } bool MMX(void) { return f_1_edx[23]; } bool FXSR(void) { return f_1_edx[24]; } bool SSE(void) { return f_1_edx[25]; } bool SSE2(void) { return f_1_edx[26]; } bool FSGSBASE(void) { return f_7_ebx[0]; } bool BMI1(void) { return f_7_ebx[3]; } bool HLE(void) { return is_intel && f_7_ebx[4]; } bool AVX2(void) { return f_7_ebx[5]; } bool BMI2(void) { return f_7_ebx[8]; } bool ERMS(void) { return f_7_ebx[9]; } bool INVPCID(void) { return f_7_ebx[10]; } bool RTM(void) { return is_intel && f_7_ebx[11]; } bool AVX512F(void) { return f_7_ebx[16]; } bool AVX512DQ(void) { return f_7_ebx[17]; } bool RDSEED(void) { return f_7_ebx[18]; } bool ADX(void) { return f_7_ebx[19]; } bool AVX512PF(void) { return f_7_ebx[26]; } bool AVX512ER(void) { return f_7_ebx[27]; } bool AVX512CD(void) { return f_7_ebx[28]; } bool AVX512BW(void) { return f_7_ebx[30]; } bool AVX512VL(void) { return f_7_ebx[31]; } bool SHA(void) { return f_7_ebx[29]; } bool PREFETCHWT1(void) { return f_7_ecx[0]; } bool LAHF(void) { return f_81_ecx[0]; } bool LZCNT(void) { return is_intel && f_81_ecx[5]; } bool ABM(void) { return is_amd && f_81_ecx[5]; } bool SSE4a(void) { return is_amd && f_81_ecx[6]; } bool XOP(void) { return is_amd && f_81_ecx[11]; } bool TBM(void) { return is_amd && f_81_ecx[21]; } bool SYSCALL(void) { return is_intel && f_81_edx[11]; } bool MMXEXT(void) { return is_amd && f_81_edx[22]; } bool RDTSCP(void) { return is_intel && f_81_edx[27]; } bool _3DNOWEXT(void) { return is_amd && f_81_edx[30]; } bool _3DNOW(void) { return is_amd && f_81_edx[31]; } bool AVX512_VBMI(void) { return f_7_ecx[1]; } bool AVX512_VNNI(void) { return f_7_ecx[11]; } bool AVX512_FP16(void) { return f_7_edx[23]; } bool AVX512_BF16(void) { return f_7_1_eax[5]; } bool AVX_VNNI(void) { return f_7_1_eax[4]; } bool AMX_TILE(void) { return f_7_edx[24]; } bool AMX_INT8(void) { return f_7_edx[25]; } bool AMX_FP16(void) { return f_7_1_eax[21]; } bool AMX_BF16(void) { return f_7_edx[22]; } #ifdef _MSC_VER static void cpuid(int cpu_info[4], int eax) { __cpuid(cpu_info, eax); } static void cpuidex(int cpu_info[4], int eax, int ecx) { __cpuidex(cpu_info, eax, ecx); } #else static void cpuid(int cpu_info[4], int eax) { __asm__ __volatile__( "cpuid" : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) : "a"(eax), "c"(0)); } static void cpuidex(int cpu_info[4], int eax, int ecx) { __asm__ __volatile__( "cpuid" : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) : "a"(eax), "c"(ecx)); } #endif cpuid_x86() { std::array cpui; std::vector> data; // calling __cpuid with 0x0 as the function_id argument // gets the number of the highest valid function ID. cpuid(cpui.data(), 0); int n_ids = cpui[0]; for (int i = 0; i <= n_ids; ++i) { cpuidex(cpui.data(), i, 0); data.push_back(cpui); } // capture vendor string char vendor[0x20] = {}; *reinterpret_cast(vendor) = data[0][1]; *reinterpret_cast(vendor + 4) = data[0][3]; *reinterpret_cast(vendor + 8) = data[0][2]; this->vendor = vendor; if (this->vendor == "GenuineIntel") { is_intel = true; } else if (this->vendor == "AuthenticAMD") { is_amd = true; } // load bitset with flags for function 0x00000001 if (n_ids >= 1) { f_1_ecx = data[1][2]; f_1_edx = data[1][3]; } // load bitset with flags for function 0x00000007 if (n_ids >= 7) { f_7_ebx = data[7][1]; f_7_ecx = data[7][2]; f_7_edx = data[7][3]; cpuidex(cpui.data(), 7, 1); f_7_1_eax = cpui[0]; } // calling __cpuid with 0x80000000 as the function_id argument // gets the number of the highest valid extended ID. cpuid(cpui.data(), 0x80000000); unsigned int n_ex_ids = cpui[0]; std::vector> ext_data; for (unsigned int i = 0x80000000; i <= n_ex_ids; ++i) { cpuidex(cpui.data(), i, 0); ext_data.push_back(cpui); } // load bitset with flags for function 0x80000001 if (n_ex_ids >= 0x80000001) { f_81_ecx = ext_data[1][2]; f_81_edx = ext_data[1][3]; } // interpret CPU brand string if reported char brand[0x40] = {}; if (n_ex_ids >= 0x80000004) { std::memcpy(brand, ext_data[2].data(), sizeof(cpui)); std::memcpy(brand + 16, ext_data[3].data(), sizeof(cpui)); std::memcpy(brand + 32, ext_data[4].data(), sizeof(cpui)); this->brand = brand; } } bool is_intel = false; bool is_amd = false; std::string vendor; std::string brand; std::bitset<32> f_1_ecx; std::bitset<32> f_1_edx; std::bitset<32> f_7_ebx; std::bitset<32> f_7_ecx; std::bitset<32> f_7_edx; std::bitset<32> f_7_1_eax; std::bitset<32> f_81_ecx; std::bitset<32> f_81_edx; }; #if 0 void test_x86_is() { cpuid_x86 is; printf("CPU Vendor: %s\n", is.vendor.c_str()); printf("Brand: %s\n", is.brand.c_str()); printf("is_intel: %d\n", is.is_intel); printf("is_amd: %d\n", is.is_amd); printf("sse3: %d\n", is.SSE3()); printf("pclmulqdq: %d\n", is.PCLMULQDQ()); printf("ssse3: %d\n", is.SSSE3()); printf("fma: %d\n", is.FMA()); printf("cmpxchg16b: %d\n", is.CMPXCHG16B()); printf("sse41: %d\n", is.SSE41()); printf("sse42: %d\n", is.SSE42()); printf("movbe: %d\n", is.MOVBE()); printf("popcnt: %d\n", is.POPCNT()); printf("aes: %d\n", is.AES()); printf("xsave: %d\n", is.XSAVE()); printf("osxsave: %d\n", is.OSXSAVE()); printf("avx: %d\n", is.AVX()); printf("f16c: %d\n", is.F16C()); printf("rdrand: %d\n", is.RDRAND()); printf("msr: %d\n", is.MSR()); printf("cx8: %d\n", is.CX8()); printf("sep: %d\n", is.SEP()); printf("cmov: %d\n", is.CMOV()); printf("clflush: %d\n", is.CLFSH()); printf("mmx: %d\n", is.MMX()); printf("fxsr: %d\n", is.FXSR()); printf("sse: %d\n", is.SSE()); printf("sse2: %d\n", is.SSE2()); printf("fsgsbase: %d\n", is.FSGSBASE()); printf("bmi1: %d\n", is.BMI1()); printf("hle: %d\n", is.HLE()); printf("avx2: %d\n", is.AVX2()); printf("bmi2: %d\n", is.BMI2()); printf("erms: %d\n", is.ERMS()); printf("invpcid: %d\n", is.INVPCID()); printf("rtm: %d\n", is.RTM()); printf("avx512f: %d\n", is.AVX512F()); printf("rdseed: %d\n", is.RDSEED()); printf("adx: %d\n", is.ADX()); printf("avx512pf: %d\n", is.AVX512PF()); printf("avx512er: %d\n", is.AVX512ER()); printf("avx512cd: %d\n", is.AVX512CD()); printf("sha: %d\n", is.SHA()); printf("prefetchwt1: %d\n", is.PREFETCHWT1()); printf("lahf: %d\n", is.LAHF()); printf("lzcnt: %d\n", is.LZCNT()); printf("abm: %d\n", is.ABM()); printf("sse4a: %d\n", is.SSE4a()); printf("xop: %d\n", is.XOP()); printf("tbm: %d\n", is.TBM()); printf("syscall: %d\n", is.SYSCALL()); printf("mmxext: %d\n", is.MMXEXT()); printf("rdtscp: %d\n", is.RDTSCP()); printf("3dnowext: %d\n", is._3DNOWEXT()); printf("3dnow: %d\n", is._3DNOW()); printf("avx512_vbmi: %d\n", is.AVX512_VBMI()); printf("avx512_vnni: %d\n", is.AVX512_VNNI()); printf("avx512_fp16: %d\n", is.AVX512_FP16()); printf("avx512_bf16: %d\n", is.AVX512_BF16()); printf("amx_tile: %d\n", is.AMX_TILE()); printf("amx_int8: %d\n", is.AMX_INT8()); printf("amx_fp16: %d\n", is.AMX_FP16()); printf("amx_bf16: %d\n", is.AMX_BF16()); } #endif static int ggml_backend_cpu_x86_score() { // FIXME: this does not check for OS support int score = 1; cpuid_x86 is; #ifdef GGML_FMA if (!is.FMA()) { return 0; } score += 1; #endif #ifdef GGML_F16C if (!is.F16C()) { return 0; } score += 1<<1; #endif #ifdef GGML_SSE42 if (!is.SSE42()) { return 0; } score += 1<<2; #endif #ifdef GGML_BMI2 if (!is.BMI2()) { return 0; } score += 1<<3; #endif #ifdef GGML_AVX if (!is.AVX()) { return 0; } score += 1<<4; #endif #ifdef GGML_AVX2 if (!is.AVX2()) { return 0; } score += 1<<5; #endif #ifdef GGML_AVX_VNNI if (!is.AVX_VNNI()) { return 0; } score += 1<<6; #endif #ifdef GGML_AVX512 if (!is.AVX512F()) { return 0; } if (!is.AVX512CD()) { return 0; } if (!is.AVX512VL()) { return 0; } if (!is.AVX512DQ()) { return 0; } if (!is.AVX512BW()) { return 0; } score += 1<<7; #endif #ifdef GGML_AVX512_VBMI if (!is.AVX512_VBMI()) { return 0; } score += 1<<8; #endif #ifdef GGML_AVX512_BF16 if (!is.AVX512_BF16()) { return 0; } score += 1<<9; #endif #ifdef GGML_AVX512_VNNI if (!is.AVX512_VNNI()) { return 0; } score += 1<<10; #endif #ifdef GGML_AMX_INT8 if (!is.AMX_INT8()) { return 0; } score += 1<<11; #endif return score; } GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_x86_score) #endif // defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) ggml-org-ggml-3678254/src/ggml-cpu/arch/x86/quants.c000066400000000000000000005461151512524704700217230ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED // some compilers don't provide _mm256_set_m128i, e.g. gcc 7 #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) // multiply int8_t, add results pairwise twice static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { // Get absolute values of x vectors const __m128i ax = _mm_sign_epi8(x, x); // Sign the values of the y vectors const __m128i sy = _mm_sign_epi8(y, x); // Perform multiplication and create 16-bit values const __m128i dot = _mm_maddubs_epi16(ax, sy); const __m128i ones = _mm_set1_epi16(1); return _mm_madd_epi16(ones, dot); } #if __AVX__ || __AVX2__ || __AVX512F__ // horizontally add 8 floats static inline float hsum_float_8(const __m256 x) { __m128 res = _mm256_extractf128_ps(x, 1); res = _mm_add_ps(res, _mm256_castps256_ps128(x)); res = _mm_add_ps(res, _mm_movehl_ps(res, res)); res = _mm_add_ss(res, _mm_movehdup_ps(res)); return _mm_cvtss_f32(res); } // horizontally add 8 int32_t static inline int hsum_i32_8(const __m256i a) { const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); const __m128i sum64 = _mm_add_epi32(hi64, sum128); const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); } // horizontally add 4 int32_t static inline int hsum_i32_4(const __m128i a) { const __m128i hi64 = _mm_unpackhi_epi64(a, a); const __m128i sum64 = _mm_add_epi32(hi64, a); const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); } #if defined(__AVX2__) || defined(__AVX512F__) static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { const __m256i ax = _mm256_sign_epi8(x, x); const __m256i sy = _mm256_sign_epi8(y, x); return _mm256_maddubs_epi16(ax, sy); } // spread 32 bits to 32 bytes { 0x00, 0xFF } static inline __m256i bytes_from_bits_32(const uint8_t * x) { uint32_t x32; memcpy(&x32, x, sizeof(uint32_t)); const __m256i shuf_mask = _mm256_set_epi64x( 0x0303030303030303, 0x0202020202020202, 0x0101010101010101, 0x0000000000000000); __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); bytes = _mm256_or_si256(bytes, bit_mask); return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); } // Unpack 32 4-bit fields into 32 bytes // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); const __m256i lowMask = _mm256_set1_epi8( 0xF ); return _mm256_and_si256(lowMask, bytes); } // add int16_t pairwise and return as float vector static inline __m256 sum_i16_pairs_float(const __m256i x) { const __m256i ones = _mm256_set1_epi16(1); const __m256i summed_pairs = _mm256_madd_epi16(ones, x); return _mm256_cvtepi32_ps(summed_pairs); } static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { #if defined(__AVX512VNNI__) && defined(__AVX512VL__) const __m256i zero = _mm256_setzero_si256(); const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); return _mm256_cvtepi32_ps(summed_pairs); #elif defined(__AVXVNNI__) const __m256i zero = _mm256_setzero_si256(); const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); return _mm256_cvtepi32_ps(summed_pairs); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); return sum_i16_pairs_float(dot); #endif } // multiply int8_t, add results pairwise twice and return as float vector static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { #if __AVXVNNIINT8__ const __m256i zero = _mm256_setzero_si256(); const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); return _mm256_cvtepi32_ps(summed_pairs); #else // Get absolute values of x vectors const __m256i ax = _mm256_sign_epi8(x, x); // Sign the values of the y vectors const __m256i sy = _mm256_sign_epi8(y, x); return mul_sum_us8_pairs_float(ax, sy); #endif } static inline __m128i packNibbles( __m256i bytes ) { // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh #if __AVX512F__ const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh return _mm256_cvtepi16_epi8(bytes); // abcd_efgh #else const __m256i lowByte = _mm256_set1_epi16( 0xFF ); __m256i high = _mm256_andnot_si256( lowByte, bytes ); __m256i low = _mm256_and_si256( lowByte, bytes ); high = _mm256_srli_epi16( high, 4 ); bytes = _mm256_or_si256( low, high ); // Compress uint16_t lanes into bytes __m128i r0 = _mm256_castsi256_si128( bytes ); __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); return _mm_packus_epi16( r0, r1 ); #endif } #elif defined(__AVX__) static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) { // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh const __m128i lowByte = _mm_set1_epi16( 0xFF ); __m128i high = _mm_andnot_si128( lowByte, bytes1 ); __m128i low = _mm_and_si128( lowByte, bytes1 ); high = _mm_srli_epi16( high, 4 ); bytes1 = _mm_or_si128( low, high ); high = _mm_andnot_si128( lowByte, bytes2 ); low = _mm_and_si128( lowByte, bytes2 ); high = _mm_srli_epi16( high, 4 ); bytes2 = _mm_or_si128( low, high ); return _mm_packus_epi16( bytes1, bytes2); } static inline __m128i mul_add_epi8_sse(const __m128i x, const __m128i y) { const __m128i ax = _mm_sign_epi8(x, x); const __m128i sy = _mm_sign_epi8(y, x); return _mm_maddubs_epi16(ax, sy); } // spread 32 bits to 32 bytes { 0x00, 0xFF } static inline __m256i bytes_from_bits_32(const uint8_t * x) { uint32_t x32; memcpy(&x32, x, sizeof(uint32_t)); const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); bytesl = _mm_or_si128(bytesl, bit_mask); bytesh = _mm_or_si128(bytesh, bit_mask); bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); return MM256_SET_M128I(bytesh, bytesl); } // Unpack 32 4-bit fields into 32 bytes // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { // Load 16 bytes from memory __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); __m128i tmph = _mm_srli_epi16(tmpl, 4); const __m128i lowMask = _mm_set1_epi8(0xF); tmpl = _mm_and_si128(lowMask, tmpl); tmph = _mm_and_si128(lowMask, tmph); return MM256_SET_M128I(tmph, tmpl); } // add int16_t pairwise and return as float vector static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { const __m128i ones = _mm_set1_epi16(1); const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); return _mm256_cvtepi32_ps(summed_pairs); } static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { const __m128i axl = _mm256_castsi256_si128(ax); const __m128i axh = _mm256_extractf128_si256(ax, 1); const __m128i syl = _mm256_castsi256_si128(sy); const __m128i syh = _mm256_extractf128_si256(sy, 1); // Perform multiplication and create 16-bit values const __m128i dotl = _mm_maddubs_epi16(axl, syl); const __m128i doth = _mm_maddubs_epi16(axh, syh); return sum_i16_pairs_float(doth, dotl); } // multiply int8_t, add results pairwise twice and return as float vector static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { const __m128i xl = _mm256_castsi256_si128(x); const __m128i xh = _mm256_extractf128_si256(x, 1); const __m128i yl = _mm256_castsi256_si128(y); const __m128i yh = _mm256_extractf128_si256(y, 1); // Get absolute values of x vectors const __m128i axl = _mm_sign_epi8(xl, xl); const __m128i axh = _mm_sign_epi8(xh, xh); // Sign the values of the y vectors const __m128i syl = _mm_sign_epi8(yl, xl); const __m128i syh = _mm_sign_epi8(yh, xh); // Perform multiplication and create 16-bit values const __m128i dotl = _mm_maddubs_epi16(axl, syl); const __m128i doth = _mm_maddubs_epi16(axh, syh); return sum_i16_pairs_float(doth, dotl); } // larger version of mul_sum_i8_pairs_float where x and y are each represented by four 128-bit vectors static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_1_1, const __m128i x_2_0, const __m128i x_2_1, const __m128i y_1_0, const __m128i y_1_1, const __m128i y_2_0, const __m128i y_2_1) { const __m128i mone = _mm_set1_epi16(1); const __m128i p16_1_0 = mul_add_epi8_sse(x_1_0, y_1_0); const __m128i p16_1_1 = mul_add_epi8_sse(x_1_1, y_1_1); const __m128i p16_2_0 = mul_add_epi8_sse(x_2_0, y_2_0); const __m128i p16_2_1 = mul_add_epi8_sse(x_2_1, y_2_1); const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, mone); const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, mone); const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, mone); const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, mone); const __m128i p_1 = _mm_add_epi32(p_1_0, p_1_1); const __m128i p_2 = _mm_add_epi32(p_2_0, p_2_1); return _mm256_cvtepi32_ps(MM256_SET_M128I(p_2, p_1)); } // quad fp16 delta calculation static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { // GGML_CPU_FP16_TO_FP32 is faster than Intel F16C return _mm256_set_m128(_mm_set1_ps(GGML_CPU_FP16_TO_FP32(x1) * GGML_CPU_FP16_TO_FP32(y1)), _mm_set1_ps(GGML_CPU_FP16_TO_FP32(x0) * GGML_CPU_FP16_TO_FP32(y0))); } static inline __m256 quad_mx_delta_float(const int8_t x0, const float y0, const int8_t x1, const float y1) { return _mm256_set_m128(_mm_set1_ps(GGML_E8M0_TO_FP32_HALF(x1) * GGML_CPU_FP16_TO_FP32(y1)), _mm_set1_ps(GGML_E8M0_TO_FP32_HALF(x0) * GGML_CPU_FP16_TO_FP32(y0))); } #endif #elif defined(__SSSE3__) // horizontally add 4x4 floats static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { __m128 res_0 =_mm_hadd_ps(a, b); __m128 res_1 =_mm_hadd_ps(c, d); __m128 res =_mm_hadd_ps(res_0, res_1); res =_mm_hadd_ps(res, res); res =_mm_hadd_ps(res, res); return _mm_cvtss_f32(res); } #endif // __AVX__ || __AVX2__ || __AVX512F__ #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0 * GGML_RESTRICT y = vy; #if defined(__AVX2__) || defined(__AVX__) for (int i = 0; i < nb; i++) { // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x ); __m256 v1 = _mm256_loadu_ps( x + 8 ); __m256 v2 = _mm256_loadu_ps( x + 16 ); __m256 v3 = _mm256_loadu_ps( x + 24 ); x += 32; // Compute max(abs(e)) for the block const __m256 signBit = _mm256_set1_ps( -0.0f ); __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); const float maxScalar = _mm_cvtss_f32( max4 ); // Quantize these floats const float d = maxScalar / 127.f; y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); // Apply the multiplier v0 = _mm256_mul_ps( v0, mul ); v1 = _mm256_mul_ps( v1, mul ); v2 = _mm256_mul_ps( v2, mul ); v3 = _mm256_mul_ps( v3, mul ); // Round to nearest integer v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); // Convert floats to integers __m256i i0 = _mm256_cvtps_epi32( v0 ); __m256i i1 = _mm256_cvtps_epi32( v1 ); __m256i i2 = _mm256_cvtps_epi32( v2 ); __m256i i3 = _mm256_cvtps_epi32( v3 ); #if defined(__AVX2__) // Convert int32 to int16 i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 // Convert int16 to int8 i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 // We got our precious signed bytes, but the order is now wrong // These AVX2 pack instructions process 16-byte pieces independently // The following instruction is fixing the order const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); i0 = _mm256_permutevar8x32_epi32( i0, perm ); _mm256_storeu_si256((__m256i *)y[i].qs, i0); #else // Since we don't have in AVX some necessary functions, // we split the registers in half and call AVX2 analogs from SSE __m128i ni0 = _mm256_castsi256_si128( i0 ); __m128i ni1 = _mm256_extractf128_si256( i0, 1); __m128i ni2 = _mm256_castsi256_si128( i1 ); __m128i ni3 = _mm256_extractf128_si256( i1, 1); __m128i ni4 = _mm256_castsi256_si128( i2 ); __m128i ni5 = _mm256_extractf128_si256( i2, 1); __m128i ni6 = _mm256_castsi256_si128( i3 ); __m128i ni7 = _mm256_extractf128_si256( i3, 1); // Convert int32 to int16 ni0 = _mm_packs_epi32( ni0, ni1 ); ni2 = _mm_packs_epi32( ni2, ni3 ); ni4 = _mm_packs_epi32( ni4, ni5 ); ni6 = _mm_packs_epi32( ni6, ni7 ); // Convert int16 to int8 ni0 = _mm_packs_epi16( ni0, ni2 ); ni4 = _mm_packs_epi16( ni4, ni6 ); _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); #endif } #else GGML_UNUSED(nb); // scalar quantize_row_q8_0_ref(x, y, k); #endif } void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK8_1 == 0); const int nb = k / QK8_1; block_q8_1 * GGML_RESTRICT y = vy; #if defined(__AVX2__) || defined(__AVX__) for (int i = 0; i < nb; i++) { // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x ); __m256 v1 = _mm256_loadu_ps( x + 8 ); __m256 v2 = _mm256_loadu_ps( x + 16 ); __m256 v3 = _mm256_loadu_ps( x + 24 ); x += 32; // Compute max(abs(e)) for the block const __m256 signBit = _mm256_set1_ps( -0.0f ); __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); const float max_scalar = _mm_cvtss_f32( max4 ); // Quantize these floats const float d = max_scalar / 127.f; y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); // Apply the multiplier v0 = _mm256_mul_ps( v0, mul ); v1 = _mm256_mul_ps( v1, mul ); v2 = _mm256_mul_ps( v2, mul ); v3 = _mm256_mul_ps( v3, mul ); // Round to nearest integer v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); // Convert floats to integers __m256i i0 = _mm256_cvtps_epi32( v0 ); __m256i i1 = _mm256_cvtps_epi32( v1 ); __m256i i2 = _mm256_cvtps_epi32( v2 ); __m256i i3 = _mm256_cvtps_epi32( v3 ); #if defined(__AVX2__) // Compute the sum of the quants and set y[i].s y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); // Convert int32 to int16 i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 // Convert int16 to int8 i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 // We got our precious signed bytes, but the order is now wrong // These AVX2 pack instructions process 16-byte pieces independently // The following instruction is fixing the order const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); i0 = _mm256_permutevar8x32_epi32( i0, perm ); _mm256_storeu_si256((__m256i *)y[i].qs, i0); #else // Since we don't have in AVX some necessary functions, // we split the registers in half and call AVX2 analogs from SSE __m128i ni0 = _mm256_castsi256_si128( i0 ); __m128i ni1 = _mm256_extractf128_si256( i0, 1); __m128i ni2 = _mm256_castsi256_si128( i1 ); __m128i ni3 = _mm256_extractf128_si256( i1, 1); __m128i ni4 = _mm256_castsi256_si128( i2 ); __m128i ni5 = _mm256_extractf128_si256( i2, 1); __m128i ni6 = _mm256_castsi256_si128( i3 ); __m128i ni7 = _mm256_extractf128_si256( i3, 1); // Compute the sum of the quants and set y[i].s const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); // Convert int32 to int16 ni0 = _mm_packs_epi32( ni0, ni1 ); ni2 = _mm_packs_epi32( ni2, ni3 ); ni4 = _mm_packs_epi32( ni4, ni5 ); ni6 = _mm_packs_epi32( ni6, ni7 ); // Convert int16 to int8 ni0 = _mm_packs_epi16( ni0, ni2 ); ni4 = _mm_packs_epi16( ni4, ni6 ); _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); #endif } #else GGML_UNUSED(nb); // scalar quantize_row_q8_1_ref(x, y, k); #endif } // placeholder implementation for Apple targets void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_K_ref(x, y, k); } //===================================== Dot products ================================= // // Helper functions // #if __AVX__ || __AVX2__ || __AVX512F__ // shuffles to pick the required scales in dot products static inline __m256i get_scale_shuffle_q3k(int i) { static const uint8_t k_shuffle[128] = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, }; return _mm256_loadu_si256((const __m256i*)k_shuffle + i); } static inline __m256i get_scale_shuffle_k4(int i) { static const uint8_t k_shuffle[256] = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 }; return _mm256_loadu_si256((const __m256i*)k_shuffle + i); } static inline __m128i get_scale_shuffle(int i) { static const uint8_t k_shuffle[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 }; return _mm_loadu_si128((const __m128i*)k_shuffle + i); } #endif void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__AVX2__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ const __m256 d = _mm256_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); __m256i qx = bytes_from_nibbles_32(x[ib].qs); // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. const __m256i off = _mm256_set1_epi8( 8 ); qx = _mm256_sub_epi8( qx, off ); __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_i8_pairs_float(qx, qy); /* Multiply q with scale and accumulate */ acc = _mm256_fmadd_ps( d, q, acc ); } sumf = hsum_float_8(acc); #elif defined(__AVX__) __m256 accum = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); const __m128i q4b_1_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_1), _mm_set1_epi8(8)); const __m128i q4b_1_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_1, 4)), _mm_set1_epi8(8)); const __m128i q4b_2_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_2), _mm_set1_epi8(8)); const __m128i q4b_2_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_2, 4)), _mm_set1_epi8(8)); const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); const __m128i p_1 = _mm_add_epi16(p16_1_0, p16_1_1); const __m128i p_2 = _mm_add_epi16(p16_2_0, p16_2_1); const __m256 p = sum_i16_pairs_float(p_2, p_1); const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); } sumf = hsum_float_8(accum); #elif defined(__SSSE3__) // set constants const __m128i lowMask = _mm_set1_epi8(0xF); const __m128i off = _mm_set1_epi8(8); // Initialize accumulator with zeros __m128 acc_0 = _mm_setzero_ps(); __m128 acc_1 = _mm_setzero_ps(); __m128 acc_2 = _mm_setzero_ps(); __m128 acc_3 = _mm_setzero_ps(); for (; ib + 1 < nb; ib += 2) { _mm_prefetch(&x[ib] + sizeof(block_q4_0), _MM_HINT_T0); _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 const __m128 d_0_1 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); __m128i by_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); bx_0 = _mm_sub_epi8(bx_0, off); const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[ib].qs + 16)); bx_1 = _mm_sub_epi8(bx_1, off); const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); _mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 const __m128 d_2_3 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib + 1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); __m128i by_2 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); bx_2 = _mm_sub_epi8(bx_2, off); const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[ib + 1].qs + 16)); bx_3 = _mm_sub_epi8(bx_3, off); const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); // Convert int32_t to float __m128 p0 = _mm_cvtepi32_ps(i32_0); __m128 p1 = _mm_cvtepi32_ps(i32_1); __m128 p2 = _mm_cvtepi32_ps(i32_2); __m128 p3 = _mm_cvtepi32_ps(i32_3); // Apply the scale __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); // Acummulate acc_0 = _mm_add_ps(p0_d, acc_0); acc_1 = _mm_add_ps(p1_d, acc_1); acc_2 = _mm_add_ps(p2_d, acc_2); acc_3 = _mm_add_ps(p3_d, acc_3); } sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); #endif for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; #if defined(__AVX2__) || defined(__AVX__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); float summs = 0; // Main loop for (; ib < nb; ++ib) { const float d0 = GGML_CPU_FP16_TO_FP32(x[ib].d); const float d1 = GGML_CPU_FP16_TO_FP32(y[ib].d); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const __m256 d0v = _mm256_set1_ps( d0 ); const __m256 d1v = _mm256_set1_ps( d1 ); // Compute combined scales const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes const __m256i qx = bytes_from_nibbles_32(x[ib].qs); const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[ib].qs ); const __m256 xy = mul_sum_us8_pairs_float(qx, qy); // Accumulate d0*d1*x*y #if defined(__AVX2__) acc = _mm256_fmadd_ps( d0d1, xy, acc ); #else acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); #endif } *s = hsum_float_8(acc) + summs; #else UNUSED(nb); UNUSED(x); UNUSED(y); UNUSED(ib); ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_MXFP4 == 0); static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); const block_mxfp4 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK_MXFP4; int ib = 0; float sumf = 0; #if defined __AVX2__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_mxfp4); const __m128i m4b = _mm_set1_epi8(0x0f); const __m256i mone = _mm256_set1_epi16(1); __m256 accum1 = _mm256_setzero_ps(); __m256 accum2 = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_E8M0_TO_FP32_HALF(x[ib + 0].e)), _mm256_cvtepi32_ps(p_1), accum1); accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_E8M0_TO_FP32_HALF(x[ib + 1].e)), _mm256_cvtepi32_ps(p_2), accum2); } sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); #elif defined __AVX__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_mxfp4); const __m128i m4b = _mm_set1_epi8(0x0f); __m256 accum = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); const __m256 deltas = quad_mx_delta_float(x[ib].e, y[ib].d, x[ib + 1].e, y[ib + 1].d); accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); } sumf = hsum_float_8(accum); #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); int sumi1 = 0; int sumi2 = 0; for (int j = 0; j < QK_MXFP4/2; ++j) { sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; #if defined(__AVX2__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); qx = _mm256_or_si256(qx, bxhi); __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_i8_pairs_float(qx, qy); /* Multiply q with scale and accumulate */ acc = _mm256_fmadd_ps(d, q, acc); } *s = hsum_float_8(acc); #elif defined(__AVX__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); __m128i mask = _mm_set1_epi8((char)0xF0); // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); const __m256i bxhi = bytes_from_bits_32(x[ib].qh); __m128i bxhil = _mm256_castsi256_si128(bxhi); __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); bxhil = _mm_andnot_si128(bxhil, mask); bxhih = _mm_andnot_si128(bxhih, mask); __m128i bxl = _mm256_castsi256_si128(bx_0); __m128i bxh = _mm256_extractf128_si256(bx_0, 1); bxl = _mm_or_si128(bxl, bxhil); bxh = _mm_or_si128(bxh, bxhih); bx_0 = MM256_SET_M128I(bxh, bxl); const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); /* Multiply q with scale and accumulate */ acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); } *s = hsum_float_8(acc); #else UNUSED(nb); UNUSED(ib); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; #if defined(__AVX2__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); float summs = 0.0f; // Main loop for (; ib < nb; ++ib) { const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); qx = _mm256_or_si256(qx, bxhi); const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_us8_pairs_float(qx, qy); acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); } *s = hsum_float_8(acc) + summs; #elif defined(__AVX__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); __m128i mask = _mm_set1_epi8(0x10); float summs = 0.0f; // Main loop for (; ib < nb; ++ib) { const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); const __m256i bxhi = bytes_from_bits_32(x[ib].qh); __m128i bxhil = _mm256_castsi256_si128(bxhi); __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); bxhil = _mm_and_si128(bxhil, mask); bxhih = _mm_and_si128(bxhih, mask); __m128i bxl = _mm256_castsi256_si128(bx_0); __m128i bxh = _mm256_extractf128_si256(bx_0, 1); bxl = _mm_or_si128(bxl, bxhil); bxh = _mm_or_si128(bxh, bxhih); bx_0 = MM256_SET_M128I(bxh, bxl); const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); } *s = hsum_float_8(acc) + summs; #else UNUSED(nb); UNUSED(ib); UNUSED(x); UNUSED(y); ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; #if defined(__AVX2__) // Initialize accumulator with zeros __m256 acc = _mm256_setzero_ps(); // Main loop for (; ib < nb; ++ib) { // Compute combined scale for the block const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_i8_pairs_float(qx, qy); // Multiply q with scale and accumulate acc = _mm256_fmadd_ps( d, q, acc ); } sumf = hsum_float_8(acc); #elif defined(__AVX__) __m256 accum = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i qx_1_0 = _mm_loadu_si128((const __m128i *)x[ib].qs); const __m128i qx_1_1 = _mm_loadu_si128((const __m128i *)x[ib].qs + 1); const __m128i qx_2_0 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); const __m128i qx_2_1 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs + 1); const __m128i qy_1_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); const __m128i qy_1_1 = _mm_loadu_si128((const __m128i *)y[ib].qs + 1); const __m128i qy_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); const __m128i qy_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); const __m256 p = mul_sum_i8_quad_float(qx_1_0, qx_1_1, qx_2_0, qx_2_1, qy_1_0, qy_1_1, qy_2_0, qy_2_1); const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); } sumf = hsum_float_8(accum); #endif for (; ib < nb; ++ib) { int sumi = 0; for (int j = 0; j < qk; j++) { sumi += x[ib].qs[j]*y[ib].qs[j]; } sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; } void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq1_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) __m256 sumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { // 16-bit sums __m256i sumi0 = _mm256_setzero_si256(); __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); // first 32 bytes of 5 elements { __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs)); // 8-bit multiplies with shifts, masks and adds __m256i qx1 = _mm256_add_epi8(qx0, _mm256_add_epi8(qx0, qx0)); // 1 * 3 __m256i qx2 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx0, 3), _mm256_set1_epi8(-8)), qx0); // 1 * 9 __m256i qx3 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx1, 3), _mm256_set1_epi8(-8)), qx1); // 3 * 9 __m256i qx4 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx2, 3), _mm256_set1_epi8(-8)), qx2); // 9 * 9 // TODO: can _mm256_mulhi_epu16 be faster even if 16-bits? // Cancel the +1 from avg so that it behaves like a halving add qx0 = _mm256_subs_epu8(qx0, _mm256_set1_epi8(1)); qx1 = _mm256_subs_epu8(qx1, _mm256_set1_epi8(1)); qx2 = _mm256_subs_epu8(qx2, _mm256_set1_epi8(1)); qx3 = _mm256_subs_epu8(qx3, _mm256_set1_epi8(1)); qx4 = _mm256_subs_epu8(qx4, _mm256_set1_epi8(1)); // Multiply by 3 and get the top 2 bits qx0 = _mm256_avg_epu8(qx0, _mm256_avg_epu8(qx0, _mm256_setzero_si256())); qx1 = _mm256_avg_epu8(qx1, _mm256_avg_epu8(qx1, _mm256_setzero_si256())); qx2 = _mm256_avg_epu8(qx2, _mm256_avg_epu8(qx2, _mm256_setzero_si256())); qx3 = _mm256_avg_epu8(qx3, _mm256_avg_epu8(qx3, _mm256_setzero_si256())); qx4 = _mm256_avg_epu8(qx4, _mm256_avg_epu8(qx4, _mm256_setzero_si256())); qx0 = _mm256_and_si256(_mm256_srli_epi16(qx0, 6), _mm256_set1_epi8(3)); qx1 = _mm256_and_si256(_mm256_srli_epi16(qx1, 6), _mm256_set1_epi8(3)); qx2 = _mm256_and_si256(_mm256_srli_epi16(qx2, 6), _mm256_set1_epi8(3)); qx3 = _mm256_and_si256(_mm256_srli_epi16(qx3, 6), _mm256_set1_epi8(3)); qx4 = _mm256_and_si256(_mm256_srli_epi16(qx4, 6), _mm256_set1_epi8(3)); const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 0)); const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 32)); const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 64)); const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 96)); const __m256i qy4 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 128)); qx0 = _mm256_maddubs_epi16(qx0, qy0); qx1 = _mm256_maddubs_epi16(qx1, qy1); qx2 = _mm256_maddubs_epi16(qx2, qy2); qx3 = _mm256_maddubs_epi16(qx3, qy3); qx4 = _mm256_maddubs_epi16(qx4, qy4); sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); sumi2 = _mm256_add_epi16(sumi2, qx4); } // last 16 bytes of 5-element, along with the 4 bytes of 4 elements { __m128i qx0 = _mm_loadu_si128((const __m128i *) (x[i].qs + 32)); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned __m256i qx5_l = _mm256_cvtepu8_epi16(_mm_set1_epi32(qh)); __m128i qx1 = _mm_add_epi8(qx0, _mm_add_epi8(qx0, qx0)); // 1 * 3 __m128i qx2 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx0, 3), _mm_set1_epi8(-8)), qx0); // 1 * 9 __m128i qx3 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx1, 3), _mm_set1_epi8(-8)), qx1); // 3 * 9 __m128i qx4 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx2, 3), _mm_set1_epi8(-8)), qx2); // 9 * 9 __m256i qx01 = MM256_SET_M128I(qx1, qx0); __m256i qx23 = MM256_SET_M128I(qx3, qx2); // avx2 does not have 8-bit multiplies, so 16-bit it is. qx5_l = _mm256_mullo_epi16(qx5_l, _mm256_set_epi16(27, 27, 27, 27, 9, 9, 9, 9, 3, 3, 3, 3, 1, 1, 1, 1)); qx5_l = _mm256_and_si256(qx5_l, _mm256_set1_epi16(0xFF)); __m128i qx5 = _mm_packus_epi16(_mm256_castsi256_si128(qx5_l), _mm256_extracti128_si256(qx5_l, 1)); __m256i qx45 = MM256_SET_M128I(qx5, qx4); // Cancel the +1 from avg so that it behaves like a halving add qx01 = _mm256_subs_epu8(qx01, _mm256_set1_epi8(1)); qx23 = _mm256_subs_epu8(qx23, _mm256_set1_epi8(1)); qx45 = _mm256_subs_epu8(qx45, _mm256_set1_epi8(1)); // Multiply by 3 and get the top 2 bits qx01 = _mm256_avg_epu8(qx01, _mm256_avg_epu8(qx01, _mm256_setzero_si256())); qx23 = _mm256_avg_epu8(qx23, _mm256_avg_epu8(qx23, _mm256_setzero_si256())); qx45 = _mm256_avg_epu8(qx45, _mm256_avg_epu8(qx45, _mm256_setzero_si256())); qx01 = _mm256_and_si256(_mm256_srli_epi16(qx01, 6), _mm256_set1_epi8(3)); qx23 = _mm256_and_si256(_mm256_srli_epi16(qx23, 6), _mm256_set1_epi8(3)); qx45 = _mm256_and_si256(_mm256_srli_epi16(qx45, 6), _mm256_set1_epi8(3)); const __m256i qy01 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 160)); const __m256i qy23 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 192)); const __m256i qy45 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 224)); qx01 = _mm256_maddubs_epi16(qx01, qy01); qx23 = _mm256_maddubs_epi16(qx23, qy23); qx45 = _mm256_maddubs_epi16(qx45, qy45); sumi0 = _mm256_add_epi16(sumi0, qx01); sumi1 = _mm256_add_epi16(sumi1, qx23); sumi2 = _mm256_add_epi16(sumi2, qx45); } const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); sumi0 = _mm256_sub_epi16(sumi0, ysum); sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); } *s = hsum_float_8(sumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq2_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) __m256 sumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { // 16-bit sums, because 256*127 still fits __m256i sumi0 = _mm256_setzero_si256(); __m256i sumi1 = _mm256_setzero_si256(); for (size_t j = 0; j < sizeof(x->qs); j += 32) { __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs + j)); __m256i qx1 = _mm256_srli_epi16(qx0, 2); __m256i qx2 = _mm256_srli_epi16(qx0, 4); __m256i qx3 = _mm256_srli_epi16(qx0, 6); // 0, 1, 2 (should not be 3) qx0 = _mm256_and_si256(qx0, _mm256_set1_epi8(3)); qx1 = _mm256_and_si256(qx1, _mm256_set1_epi8(3)); qx2 = _mm256_and_si256(qx2, _mm256_set1_epi8(3)); qx3 = _mm256_and_si256(qx3, _mm256_set1_epi8(3)); const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 0)); const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 32)); const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 64)); const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 96)); qx0 = _mm256_maddubs_epi16(qx0, qy0); qx1 = _mm256_maddubs_epi16(qx1, qy1); qx2 = _mm256_maddubs_epi16(qx2, qy2); qx3 = _mm256_maddubs_epi16(qx3, qy3); sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); } const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); sumi0 = _mm256_add_epi16(sumi0, sumi1); sumi0 = _mm256_sub_epi16(sumi0, ysum); sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); } *s = hsum_float_8(sumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __AVX2__ const __m256i m3 = _mm256_set1_epi8(3); const __m128i m4 = _mm_set1_epi8(0xF); __m256 acc = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); const __m256i mins = _mm256_cvtepi8_epi16(mins8); const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; __m256i sumi = _mm256_setzero_si256(); for (int j = 0; j < QK_K/128; ++j) { const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q2_0 = _mm256_and_si256(q2bits, m3); const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); p0 = _mm256_add_epi32(p0, p1); p2 = _mm256_add_epi32(p2, p3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc); #elif defined __AVX__ const __m128i m3 = _mm_set1_epi8(0x3); const __m128i m4 = _mm_set1_epi8(0xF); const __m128i m2 = _mm_set1_epi8(0x2); __m256 acc = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // load mins and scales from block_q2_K.scales[QK_K/16] const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); // sumf += -dmin * summs in 32bits*8 acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); const __m128i scales[2] = { scales_0, scales_1 }; __m128i sumi_0 = _mm_setzero_si128(); __m128i sumi_1 = _mm_setzero_si128(); for (int j = 0; j < QK_K/128; ++j) { // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; // load 2bits*16*8 from block_q2_K.qs[QK_K/4] __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; const __m128i q2_0 = _mm_and_si128(q2bits, m3); const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; const __m128i q2_1 = _mm_and_si128(q2bits, m3); const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 __m128i shuffle = _mm_set1_epi16(0x0100); p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); shuffle = _mm_add_epi16(shuffle, m2); p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); shuffle = _mm_add_epi16(shuffle, m2); p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); shuffle = _mm_add_epi16(shuffle, m2); p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); shuffle = _mm_add_epi16(shuffle, m2); p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); shuffle = _mm_add_epi16(shuffle, m2); p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); shuffle = _mm_add_epi16(shuffle, m2); p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); shuffle = _mm_add_epi16(shuffle, m2); p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); p0 = _mm_add_epi32(p0, p1); p2 = _mm_add_epi32(p2, p3); p4 = _mm_add_epi32(p4, p5); p6 = _mm_add_epi32(p6, p7); // isum in 32bits*4*2 sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); } // sumf += dall * isum - dmin * summs in 32bits __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); } *s = hsum_float_8(acc); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __AVX2__ const __m256i m3 = _mm256_set1_epi8(3); const __m256i mone = _mm256_set1_epi8(1); const __m128i m32 = _mm_set1_epi8(32); __m256 acc = _mm256_setzero_ps(); uint32_t aux[3]; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Set up scales memcpy(aux, x[i].scales, 12); __m128i scales128 = _mm_set_epi32( ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); scales128 = _mm_sub_epi8(scales128, m32); const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; // high bit const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); // integer accumulator __m256i sumi = _mm256_setzero_si256(); int bit = 0; int is = 0; for (int j = 0; j < QK_K/128; ++j) { // load low 2 bits const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; // prepare low and high bits const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; // load Q8 quants const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, // and 2 if the high bit was set) __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); p16_0 = _mm256_sub_epi16(p16_0, q8s_0); p16_1 = _mm256_sub_epi16(p16_1, q8s_1); p16_2 = _mm256_sub_epi16(p16_2, q8s_2); p16_3 = _mm256_sub_epi16(p16_3, q8s_3); // multiply with scales p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); // accumulate p16_0 = _mm256_add_epi32(p16_0, p16_1); p16_2 = _mm256_add_epi32(p16_2, p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); } // multiply with block scale and accumulate acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc); #elif defined __AVX__ const __m128i m3 = _mm_set1_epi8(3); const __m128i mone = _mm_set1_epi8(1); const __m128i m32 = _mm_set1_epi8(32); const __m128i m2 = _mm_set1_epi8(2); __m256 acc = _mm256_setzero_ps(); const uint32_t *aux; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Set up scales aux = (const uint32_t *)x[i].scales; __m128i scales128 = _mm_set_epi32( ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); scales128 = _mm_sub_epi8(scales128, m32); const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); const __m128i scales[2] = { scales_0, scales_1 }; // high bit *128*2 from block_q3_K.hmask[QK_K/8] const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); // integer accumulator __m128i sumi_0 = _mm_setzero_si128(); __m128i sumi_1 = _mm_setzero_si128(); for (int j = 0; j < QK_K/128; ++j) { // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; // prepare low and high bits const int bit = j << 2; const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); // load Q8 quants from block_q8_K.qs[QK_K] const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, // and 2 if the high bit was set) __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); p16_0 = _mm_sub_epi16(p16_0, q8s_0); p16_1 = _mm_sub_epi16(p16_1, q8s_1); p16_2 = _mm_sub_epi16(p16_2, q8s_2); p16_3 = _mm_sub_epi16(p16_3, q8s_3); p16_4 = _mm_sub_epi16(p16_4, q8s_4); p16_5 = _mm_sub_epi16(p16_5, q8s_5); p16_6 = _mm_sub_epi16(p16_6, q8s_6); p16_7 = _mm_sub_epi16(p16_7, q8s_7); // multiply with scales __m128i shuffle = _mm_set1_epi16(0x0100); p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); shuffle = _mm_add_epi16(shuffle, m2); p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); shuffle = _mm_add_epi16(shuffle, m2); p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); shuffle = _mm_add_epi16(shuffle, m2); p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); shuffle = _mm_add_epi16(shuffle, m2); p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); shuffle = _mm_add_epi16(shuffle, m2); p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); shuffle = _mm_add_epi16(shuffle, m2); p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); shuffle = _mm_add_epi16(shuffle, m2); p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); // accumulate p16_0 = _mm_add_epi32(p16_0, p16_1); p16_2 = _mm_add_epi32(p16_2, p16_3); p16_4 = _mm_add_epi32(p16_4, p16_5); p16_6 = _mm_add_epi32(p16_6, p16_7); sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); } // multiply with block scale and accumulate __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); } *s = hsum_float_8(acc); #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __AVX2__ const __m256i m4 = _mm256_set1_epi8(0xF); __m256 acc = _mm256_setzero_ps(); __m128 acc_m = _mm_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); const __m256i scales = MM256_SET_M128I(sc128, sc128); __m256i sumi = _mm256_setzero_si256(); for (int j = 0; j < QK_K/64; ++j) { const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; const __m256i q4l = _mm256_and_si256(q4bits, m4); const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); p16l = _mm256_madd_epi16(scale_l, p16l); const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); p16h = _mm256_madd_epi16(scale_h, p16h); const __m256i sumj = _mm256_add_epi32(p16l, p16h); sumi = _mm256_add_epi32(sumi, sumj); } __m256 vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); #elif defined __AVX__ const __m128i m4 = _mm_set1_epi8(0xF); const __m128i m2 = _mm_set1_epi8(0x2); __m256 acc = _mm256_setzero_ps(); __m128 acc_m = _mm_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); const __m128i scales = _mm_cvtepu8_epi16(utmps); const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); const __m128i prod = _mm_madd_epi16(mins, q8s); acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); __m128i sumi_0 = _mm_setzero_si128(); __m128i sumi_1 = _mm_setzero_si128(); __m128i shuffle = _mm_set1_epi16(0x0100); for (int j = 0; j < QK_K/64; ++j) { const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); shuffle = _mm_add_epi16(shuffle, m2); const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); shuffle = _mm_add_epi16(shuffle, m2); __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4l_0 = _mm_and_si128(q4bits, m4); const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4l_1 = _mm_and_si128(q4bits, m4); const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); p16l = _mm_madd_epi16(scale_l, p16l); sumi_0 = _mm_add_epi32(sumi_0, p16l); const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; p16l = _mm_maddubs_epi16(q4l_1, q8l_1); p16l = _mm_madd_epi16(scale_l, p16l); sumi_1 = _mm_add_epi32(sumi_1, p16l); const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); p16h = _mm_madd_epi16(scale_h, p16h); sumi_0 = _mm_add_epi32(sumi_0, p16h); const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; p16h = _mm_maddubs_epi16(q4h_1, q8h_1); p16h = _mm_madd_epi16(scale_h, p16h); sumi_1 = _mm_add_epi32(sumi_1, p16h); } __m256 vd = _mm256_set1_ps(d); __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); } acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; #if defined __AVX2__ const __m256i m4 = _mm256_set1_epi8(0xF); const __m128i mzero = _mm_setzero_si128(); const __m256i mone = _mm256_set1_epi8(1); __m256 acc = _mm256_setzero_ps(); float summs = 0.f; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); summs += dmin * _mm_extract_epi32(hsum, 0); const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); const __m256i scales = MM256_SET_M128I(sc128, sc128); const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); __m256i hmask = mone; __m256i sumi = _mm256_setzero_si256(); int bit = 0; for (int j = 0; j < QK_K/64; ++j) { const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); hmask = _mm256_slli_epi16(hmask, 1); const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); hmask = _mm256_slli_epi16(hmask, 1); const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); p16_0 = _mm256_madd_epi16(scale_0, p16_0); p16_1 = _mm256_madd_epi16(scale_1, p16_1); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); } __m256 vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc) + summs; #elif defined __AVX__ const __m128i m4 = _mm_set1_epi8(0xF); const __m128i mzero = _mm_setzero_si128(); const __m128i mone = _mm_set1_epi8(1); const __m128i m2 = _mm_set1_epi8(2); __m256 acc = _mm256_setzero_ps(); float summs = 0.f; for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); const __m128i scales = _mm_cvtepu8_epi16(utmps); const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); const __m128i prod = _mm_madd_epi16(mins, q8s); const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); summs += dmin * _mm_extract_epi32(hsum, 0); const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); __m128i hmask = mone; __m128i sumi_0 = _mm_setzero_si128(); __m128i sumi_1 = _mm_setzero_si128(); int bit = 0; __m128i shuffle = _mm_set1_epi16(0x0100); for (int j = 0; j < QK_K/64; ++j) { const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); shuffle = _mm_add_epi16(shuffle, m2); const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); shuffle = _mm_add_epi16(shuffle, m2); const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); hmask = _mm_slli_epi16(hmask, 1); __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); p16_0 = _mm_madd_epi16(scale_0, p16_0); p16_1 = _mm_madd_epi16(scale_0, p16_1); q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); q5_0 = _mm_add_epi8(q5l_0, q5h_0); q5_1 = _mm_add_epi8(q5l_1, q5h_1); hmask = _mm_slli_epi16(hmask, 1); q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); p16_2 = _mm_madd_epi16(scale_1, p16_2); p16_3 = _mm_madd_epi16(scale_1, p16_3); sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); } __m256 vd = _mm256_set1_ps(d); __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); } *s = hsum_float_8(acc) + summs; #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); UNUSED(utmp); ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __AVX2__ const __m256i m4 = _mm256_set1_epi8(0xF); const __m256i m2 = _mm256_set1_epi8(3); const __m256i m32s = _mm256_set1_epi8(32); __m256 acc = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); __m256i sumi = _mm256_setzero_si256(); int is = 0; for (int j = 0; j < QK_K/128; ++j) { const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); is += 4; const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); p16_0 = _mm256_sub_epi16(p16_0, q8s_0); p16_1 = _mm256_sub_epi16(p16_1, q8s_1); p16_2 = _mm256_sub_epi16(p16_2, q8s_2); p16_3 = _mm256_sub_epi16(p16_3, q8s_3); p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc); #elif defined __AVX__ const __m128i m3 = _mm_set1_epi8(3); const __m128i m15 = _mm_set1_epi8(15); __m256 acc = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; // handle the q6_k -32 offset separately using bsums const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums); const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1); const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales); const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8)); const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5); const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5); __m128i sumi_0 = _mm_setzero_si128(); __m128i sumi_1 = _mm_setzero_si128(); int is = 0; for (int j = 0; j < QK_K/128; ++j) { const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2); const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2); const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48)); const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48)); const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2); const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2); const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0); const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1); const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2); const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3); const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4); const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5); const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6); const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7); const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); is += 4; p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1); p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3); p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5); p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7); sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); } sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0); sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1); const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc); } *s = hsum_float_8(acc); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } #if defined (__AVX__) || defined (__AVX2__) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, }; #endif void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); const uint16_t ls1 = aux32[1] >> 28; const uint16_t ls2 = aux32[3] >> 28; const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); sumi1 = _mm256_add_epi32(sumi1, p1); sumi2 = _mm256_add_epi32(sumi2, p2); } accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #elif defined(__AVX__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); const uint16_t ls1 = aux32[1] >> 28; const uint16_t ls2 = aux32[3] >> 28; const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); } accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) const __m256i mone = _mm256_set1_epi8(1); static const char block_sign_shuffle_mask_1[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, }; static const char block_sign_shuffle_mask_2[32] = { 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, }; static const uint8_t bit_selector_mask_bytes[32] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); static const uint8_t k_bit_helper[32] = { 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, }; const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); const __m256i m511 = _mm256_set1_epi16(511); const __m128i m4 = _mm_set1_epi8(0xf); const __m128i m1 = _mm_set1_epi8(1); uint64_t aux64; // somewhat hacky, but gives a significant boost in performance __m256i aux_gindex; const uint16_t * gindex = (const uint16_t *)&aux_gindex; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(&aux64, x[i].scales, 8); __m128i stmp = _mm_set1_epi64x(aux64); stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; aux_gindex = _mm256_and_si256(q2_data, m511); const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l); const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h); __m256i signs; signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); } accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #elif defined(__AVX__) const __m128i mone = _mm_set1_epi8(1); static const char block_sign_shuffle_mask_1[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, }; static const char block_sign_shuffle_mask_2[32] = { 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, }; static const uint8_t bit_selector_mask_bytes[32] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m128i bit_selector_mask_0 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes); const __m128i bit_selector_mask_1 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes + 1); const __m128i block_sign_shuffle_1_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1); const __m128i block_sign_shuffle_1_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1 + 1); const __m128i block_sign_shuffle_2_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2); const __m128i block_sign_shuffle_2_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2 + 1); static const uint8_t k_bit_helper[32] = { 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, }; const __m128i bit_helper_0 = _mm_loadu_si128((const __m128i*)k_bit_helper); const __m128i bit_helper_1 = _mm_loadu_si128((const __m128i*)k_bit_helper + 1); const __m128i m511 = _mm_set1_epi16(511); const __m128i m4 = _mm_set1_epi8(0xf); const __m128i m1 = _mm_set1_epi8(1); uint64_t aux64; // somewhat hacky, but gives a significant boost in performance __m256i aux_gindex; const uint16_t * gindex = (const uint16_t *)&aux_gindex; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(&aux64, x[i].scales, 8); __m128i stmp = _mm_set1_epi64x(aux64); stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { const __m128i q2_data_0 = _mm_loadu_si128((const __m128i*)q2); const __m128i q2_data_1 = _mm_loadu_si128((const __m128i*)q2 + 1); q2 += 16; aux_gindex = MM256_SET_M128I(_mm_and_si128(q2_data_1, m511), _mm_and_si128(q2_data_0, m511)); const __m128i partial_sign_bits_0 = _mm_srli_epi16(q2_data_0, 9); const __m128i partial_sign_bits_1 = _mm_srli_epi16(q2_data_1, 9); const __m128i partial_sign_bits_upper_0 = _mm_srli_epi16(q2_data_0, 13); const __m128i partial_sign_bits_upper_1 = _mm_srli_epi16(q2_data_1, 13); const __m128i partial_sign_bits_for_counting_0 = _mm_xor_si128(partial_sign_bits_0, partial_sign_bits_upper_0); const __m128i partial_sign_bits_for_counting_1 = _mm_xor_si128(partial_sign_bits_1, partial_sign_bits_upper_1); const __m128i odd_bits_0 = _mm_shuffle_epi8(bit_helper_0, partial_sign_bits_for_counting_0); const __m128i odd_bits_1 = _mm_shuffle_epi8(bit_helper_1, partial_sign_bits_for_counting_1); const __m128i full_sign_bits_0 = _mm_or_si128(partial_sign_bits_0, odd_bits_0); const __m128i full_sign_bits_1 = _mm_or_si128(partial_sign_bits_1, odd_bits_1); const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_3_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_3_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_4_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_4_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q2_1_0 = _mm_set_epi64x(iq2xs_grid[gindex[1]], iq2xs_grid[gindex[0]]); const __m128i q2_1_1 = _mm_set_epi64x(iq2xs_grid[gindex[3]], iq2xs_grid[gindex[2]]); const __m128i q2_2_0 = _mm_set_epi64x(iq2xs_grid[gindex[5]], iq2xs_grid[gindex[4]]); const __m128i q2_2_1 = _mm_set_epi64x(iq2xs_grid[gindex[7]], iq2xs_grid[gindex[6]]); const __m128i q2_3_0 = _mm_set_epi64x(iq2xs_grid[gindex[9]], iq2xs_grid[gindex[8]]); const __m128i q2_3_1 = _mm_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]]); const __m128i q2_4_0 = _mm_set_epi64x(iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); const __m128i q2_4_1 = _mm_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]]); // AVX2 full_signs_1 is full_sign_bits_0 here // AVX2 full_signs_2 is full_sign_bits_1 here __m128i signs_0, signs_1; signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_0); signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_1); signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, _mm_or_si128(signs_0, mone)); const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, _mm_or_si128(signs_1, mone)); signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_0); signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_1); signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, _mm_or_si128(signs_0, mone)); const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, _mm_or_si128(signs_1, mone)); signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_0); signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_1); signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); const __m128i q8s_3_0 = _mm_sign_epi8(q8_3_0, _mm_or_si128(signs_0, mone)); const __m128i q8s_3_1 = _mm_sign_epi8(q8_3_1, _mm_or_si128(signs_1, mone)); signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_0); signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_1); signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); const __m128i q8s_4_0 = _mm_sign_epi8(q8_4_0, _mm_or_si128(signs_0, mone)); const __m128i q8s_4_1 = _mm_sign_epi8(q8_4_1, _mm_or_si128(signs_1, mone)); const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); const __m128i dot3_0 = _mm_maddubs_epi16(q2_3_0, q8s_3_0); const __m128i dot3_1 = _mm_maddubs_epi16(q2_3_1, q8s_3_1); const __m128i dot4_0 = _mm_maddubs_epi16(q2_4_0, q8s_4_0); const __m128i dot4_1 = _mm_maddubs_epi16(q2_4_1, q8s_4_1); __m128i sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)); const __m128i sc1_0 = _mm_cvtepi8_epi16(sc_tmp); const __m128i sc1_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)); const __m128i sc2_0 = _mm_cvtepi8_epi16(sc_tmp); const __m128i sc2_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)); const __m128i sc3_0 = _mm_cvtepi8_epi16(sc_tmp); const __m128i sc3_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)); const __m128i sc4_0 = _mm_cvtepi8_epi16(sc_tmp); const __m128i sc4_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot1_0, sc1_0)); sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot1_1, sc1_1)); sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot2_0, sc2_0)); sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot2_1, sc2_1)); sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot3_0, sc3_0)); sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot3_1, sc3_1)); sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot4_0, sc4_0)); sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot4_1, sc4_1)); } accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m128i m4 = _mm_set1_epi8(0xf); const __m128i m1 = _mm_set1_epi8(1); const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); uint64_t aux64; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(&aux64, x[i].scales, 8); const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); qs += 8; __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); signs += 4; const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0))); const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1))); sumi1 = _mm256_add_epi32(sumi1, p1); sumi2 = _mm256_add_epi32(sumi2, p2); } accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); } *s = 0.125f * hsum_float_8(accumf); #elif defined(__AVX__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m128i m4 = _mm_set1_epi8(0xf); const __m128i m1 = _mm_set1_epi8(1); const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); uint64_t aux64; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); const int8_t * GGML_RESTRICT q8 = y[i].qs; memcpy(&aux64, x[i].scales, 8); const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); const __m128i scales16_0 = _mm_cvtepi8_epi16(scales8); const __m128i scales16_1 = _mm_cvtepi8_epi16(_mm_srli_si128(scales8, 8)); __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q2_1_0 = _mm_set_epi64x(iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); const __m128i q2_1_1 = _mm_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)]); const __m128i q2_2_0 = _mm_set_epi64x(iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); const __m128i q2_2_1 = _mm_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)]); qs += 8; __m128i aux128_0 = _mm_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); __m128i aux128_1 = aux128_0; aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); aux128_0 = _mm_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); aux128_1 = aux128_0; aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); signs += 4; const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 0))); const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 1))); const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 0))); const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 1))); sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); } accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); } *s = 0.125f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[2]; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); q3 += 8; const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); q3 += 8; memcpy(aux32, gas, 8); gas += 8; const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); const uint16_t ls1 = aux32[0] >> 28; const uint16_t ls2 = aux32[1] >> 28; const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); sumi1 = _mm256_add_epi32(sumi1, p1); sumi2 = _mm256_add_epi32(sumi2, p2); } accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); } *s = 0.25f * hsum_float_8(accumf); #elif defined(__AVX__) const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; uint32_t aux32[2]; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q2_1_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); const __m128i q2_1_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); q3 += 8; const __m128i q2_2_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); const __m128i q2_2_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); q3 += 8; memcpy(aux32, gas, 8); gas += 8; const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127]); const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); const uint16_t ls1 = aux32[0] >> 28; const uint16_t ls2 = aux32[1] >> 28; const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); } accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); } *s = 0.25f * hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined(__AVX2__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); const __m256i idx_mask = _mm256_set1_epi32(256); typedef union { __m256i vec[2]; uint32_t index[16]; } index_t; index_t idx; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16; idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]); idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]); idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask); idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask); idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l))); idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1))); // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); const __m256i q2_1 = _mm256_set_epi32( iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] ); const __m256i q2_2 = _mm256_set_epi32( iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] ); __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); signs += 4; const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; const uint16_t ls2 = x[i].scales[ib32/2] >> 4; const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); sumi1 = _mm256_add_epi32(sumi1, p1); sumi2 = _mm256_add_epi32(sumi2, p2); } accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); } *s = hsum_float_8(accumf); #elif defined(__AVX__) static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, }; const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); const __m128i idx_mul_0 = _mm_set_epi32(32, 64, 128, 256); const __m128i idx_mul_1 = _mm_set_epi32(2, 4, 8, 16); const __m128i idx_mask = _mm_set1_epi32(256); typedef union { __m128i vec[4]; uint32_t index[16]; } index_t; index_t idx; __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i qs_tmp = _mm_loadu_si128((const __m128i *)qs); const __m128i idx_l_0 = _mm_cvtepu8_epi16(qs_tmp); const __m128i idx_l_1 = _mm_cvtepu8_epi16(_mm_srli_si128(qs_tmp, 8)); qs += 16; idx.vec[0] = _mm_set1_epi32(qh[ib32+0]); idx.vec[1] = idx.vec[0]; idx.vec[2] = _mm_set1_epi32(qh[ib32+1]); idx.vec[3] = idx.vec[2]; idx.vec[0] = _mm_and_si128(_mm_mullo_epi32(idx.vec[0], idx_mul_0), idx_mask); idx.vec[1] = _mm_and_si128(_mm_mullo_epi32(idx.vec[1], idx_mul_1), idx_mask); idx.vec[2] = _mm_and_si128(_mm_mullo_epi32(idx.vec[2], idx_mul_0), idx_mask); idx.vec[3] = _mm_and_si128(_mm_mullo_epi32(idx.vec[3], idx_mul_1), idx_mask); idx.vec[0] = _mm_or_si128(idx.vec[0], _mm_cvtepi16_epi32(idx_l_0)); idx.vec[1] = _mm_or_si128(idx.vec[1], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_0, 8))); idx.vec[2] = _mm_or_si128(idx.vec[2], _mm_cvtepi16_epi32(idx_l_1)); idx.vec[3] = _mm_or_si128(idx.vec[3], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_1, 8))); const __m128i q2_1_0 = _mm_set_epi32(iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]); const __m128i q2_1_1 = _mm_set_epi32(iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]]); const __m128i q2_2_0 = _mm_set_epi32(iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[9]], iq3s_grid[idx.index[8]]); const __m128i q2_2_1 = _mm_set_epi32(iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]]); __m128i aux128_0 = _mm_set1_epi32(signs[0] | (signs[1] << 16)); __m128i aux128_1 = aux128_0; aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); aux128_0 = _mm_set1_epi32(signs[2] | (signs[3] << 16)); aux128_1 = aux128_0; aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); signs += 4; const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; const uint16_t ls2 = x[i].scales[ib32/2] >> 4; const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); } accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); } *s = hsum_float_8(accumf); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __AVX2__ __m256 accum = _mm256_setzero_ps(); float accum1 = 0; for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; __m256i sumi = _mm256_setzero_si256(); int sumi1 = 0; for (int ib = 0; ib < QK_K/32; ib += 2) { #ifdef __BMI2__ const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL); const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL); const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); #else const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); #endif qs += 8; const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1)); const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2)); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2)); sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); accum1 += d * sumi1; } *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; #elif defined __AVX__ __m256 accum = _mm256_setzero_ps(); float accum1 = 0; for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); int sumi1 = 0; for (int ib = 0; ib < QK_K/32; ib += 2) { const __m128i q1b_1_0 = _mm_set_epi64x(iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); const __m128i q1b_1_1 = _mm_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)]); const __m128i q1b_2_0 = _mm_set_epi64x(iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); const __m128i q1b_2_1 = _mm_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)]); qs += 8; const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(ls1)); const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(ls1)); const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(ls2)); const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(ls2)); sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); accum1 += d * sumi1; } *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_m * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; iq1m_scale_t scale; #if defined __AVX2__ const __m256i mask = _mm256_set1_epi16(0x7); const __m256i mone = _mm256_set1_epi16(1); const __m256i mone8 = _mm256_set1_epi8(1); const __m256i mtwo8 = _mm256_set1_epi8(2); // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); __m256 accum1 = _mm256_setzero_ps(); __m256 accum2 = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); // Extract 3-bit scales (16 values) __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); scales = _mm256_srlv_epi64(scales, scales_shift); scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); // Indices to repeat each scale 8 times. __m256i scales_idx1 = _mm256_set1_epi16(0x0100); __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib = 0; ib < QK_K/32; ib += 2) { #ifdef __BMI2__ const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL); const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL); const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); // Convert signs to bytes 0x81 (negative) or 0x01 (positive) const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL); const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign))); const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32))); #else const __m256i q1b_1 = _mm256_set_epi64x( iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)], iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)] ); const __m256i q1b_2 = _mm256_set_epi64x( iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)], iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)] ); const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); #endif const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); const __m256i p1 = _mm256_madd_epi16(dot1, scale1); const __m256i p2 = _mm256_madd_epi16(dot2, scale2); const __m256i p3 = _mm256_madd_epi16(dot3, scale1); const __m256i p4 = _mm256_madd_epi16(dot4, scale2); sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2)); sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4)); qs += 8; qh += 4; } const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); } *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); #elif defined __AVX__ const __m128i mask = _mm_set1_epi16(0x7); const __m128i mone = _mm_set1_epi16(1); __m256 accum1 = _mm256_setzero_ps(); __m256 accum2 = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib = 0; ib < QK_K/32; ib += 2) { const __m128i q1b_1_0 = _mm_set_epi64x( iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]); const __m128i q1b_1_1 = _mm_set_epi64x( iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)]); const __m128i q1b_2_0 = _mm_set_epi64x( iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]); const __m128i q1b_2_1 = _mm_set_epi64x( iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)]); const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); const __m128i delta1_0 = _mm_set_epi64x(qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); const __m128i delta1_1 = _mm_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); const __m128i delta2_0 = _mm_set_epi64x(qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); const __m128i delta2_1 = _mm_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); const __m128i dot3_0 = mul_add_epi8_sse(delta1_0, q8b_1_0); const __m128i dot3_1 = mul_add_epi8_sse(delta1_1, q8b_1_1); const __m128i dot4_0 = mul_add_epi8_sse(delta2_0, q8b_2_0); const __m128i dot4_1 = mul_add_epi8_sse(delta2_1, q8b_2_1); __m128i scale1_0 = _mm_set1_epi16(sc[ib/2] >> 0); __m128i scale1_1 = _mm_set1_epi16(sc[ib/2] >> 3); __m128i scale2_0 = _mm_set1_epi16(sc[ib/2] >> 6); __m128i scale2_1 = _mm_set1_epi16(sc[ib/2] >> 9); scale1_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_0, mask), 1), mone); scale1_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_1, mask), 1), mone); scale2_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_0, mask), 1), mone); scale2_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_1, mask), 1), mone); const __m128i p1_0 = _mm_madd_epi16(dot1_0, scale1_0); const __m128i p1_1 = _mm_madd_epi16(dot1_1, scale1_1); const __m128i p2_0 = _mm_madd_epi16(dot2_0, scale2_0); const __m128i p2_1 = _mm_madd_epi16(dot2_1, scale2_1); const __m128i p3_0 = _mm_madd_epi16(dot3_0, scale1_0); const __m128i p3_1 = _mm_madd_epi16(dot3_1, scale1_1); const __m128i p4_0 = _mm_madd_epi16(dot4_0, scale2_0); const __m128i p4_1 = _mm_madd_epi16(dot4_1, scale2_1); sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); sumi2_0 = _mm_add_epi32(sumi2_0, _mm_add_epi32(p3_0, p4_0)); sumi2_1 = _mm_add_epi32(sumi2_1, _mm_add_epi32(p3_1, p4_1)); qs += 8; qh += 4; } const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); } *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); #else UNUSED(x); UNUSED(y); UNUSED(nb); UNUSED(scale); ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; #if defined __AVX2__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); const __m128i m4b = _mm_set1_epi8(0x0f); const __m256i mone = _mm256_set1_epi16(1); __m256 accum1 = _mm256_setzero_ps(); __m256 accum2 = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_CPU_FP16_TO_FP32(x[ib + 0].d)), _mm256_cvtepi32_ps(p_1), accum1); accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_CPU_FP16_TO_FP32(x[ib + 1].d)), _mm256_cvtepi32_ps(p_2), accum2); } sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); #elif defined __AVX__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); const __m128i m4b = _mm_set1_epi8(0x0f); __m256 accum = _mm256_setzero_ps(); for (; ib + 1 < nb; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); } sumf = hsum_float_8(accum); #endif for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; #if defined __AVX2__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); const __m128i m4b = _mm_set1_epi8(0x0f); __m256 accum = _mm256_setzero_ps(); for (int ibl = 0; ibl < nb; ++ibl) { const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; uint16_t sh = x[ibl].scales_h; __m256i sumi1 = _mm256_setzero_si256(); __m256i sumi2 = _mm256_setzero_si256(); for (int ib = 0; ib < QK_K/32; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16; const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16; const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; sh >>= 4; const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1)); const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2)); sumi1 = _mm256_add_epi32(p_1, sumi1); sumi2 = _mm256_add_epi32(p_2, sumi2); } accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); } *s = hsum_float_8(accum); #elif defined __AVX__ const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); const __m128i m4b = _mm_set1_epi8(0x0f); __m256 accum = _mm256_setzero_ps(); for (int ibl = 0; ibl < nb; ++ibl) { const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; uint16_t sh = x[ibl].scales_h; __m128i sumi1_0 = _mm_setzero_si128(); __m128i sumi1_1 = _mm_setzero_si128(); __m128i sumi2_0 = _mm_setzero_si128(); __m128i sumi2_1 = _mm_setzero_si128(); for (int ib = 0; ib < QK_K/32; ib += 2) { const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)qs); qs += 16; const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)qs); qs += 16; const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; sh >>= 4; const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, _mm_set1_epi16(ls1)); const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, _mm_set1_epi16(ls1)); const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, _mm_set1_epi16(ls2)); const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, _mm_set1_epi16(ls2)); sumi1_0 = _mm_add_epi32(p_1_0, sumi1_0); sumi1_1 = _mm_add_epi32(p_1_1, sumi1_1); sumi2_0 = _mm_add_epi32(p_2_0, sumi2_0); sumi2_1 = _mm_add_epi32(p_2_1, sumi2_1); } __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); } *s = hsum_float_8(accum); #else UNUSED(x); UNUSED(y); UNUSED(nb); ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/arch/x86/repack.cpp000066400000000000000000024071121512524704700222100ustar00rootroot00000000000000#define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "ggml-backend-impl.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "traits.h" #include #include #include #include // for qsort #include // for GGML_ASSERT #define GGML_CPU_CLANG_WORKAROUND #include "../../repack.h" #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #endif #define UNUSED GGML_UNUSED #if defined(__AVX__) #if defined(__F16C__) #if defined(__AVX512F__) #define GGML_F32Cx8x2_LOAD(x, y) _mm512_cvtph_ps(_mm256_set_m128i(_mm_loadu_si128((const __m128i *)(y)), _mm_loadu_si128((const __m128i *)(x)))) #define GGML_F32Cx16_REPEAT_LOAD(x) _mm512_cvtph_ps(_mm256_set_m128i(x, x)) #endif // the _mm256_cvt intrinsics require F16C #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) #define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) _mm256_cvtph_ps(_mm_shuffle_epi32(_mm_maskload_epi32((int const*)(x), loadMask), 68)) #define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) _mm256_cvtph_ps(_mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)) #else #if defined(__AVX512F__) static inline __m512 __avx512_f32cx8x2_load(ggml_fp16_t *x, ggml_fp16_t *y) { float tmp[16]; for (int i = 0; i < 8; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } for (int i = 0; i < 8; i++) { tmp[i + 8] = GGML_CPU_FP16_TO_FP32(y[i]); } return _mm512_loadu_ps(tmp); } static inline __m512 __avx512_repeat_f32cx16_load(__m128i x) { float tmp[16]; uint16_t tmphalf[8]; _mm_storeu_si128((__m128i*)tmphalf, x); for (int i = 0; i < 4; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); tmp[i + 4] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); tmp[i + 8] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); tmp[i + 12] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); } return _mm512_loadu_ps(tmp); } #endif static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { float tmp[8]; for (int i = 0; i < 8; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); } static inline __m256 __avx_repeat_f32cx8_load(ggml_fp16_t *x) { float tmp[8]; for (int i = 0; i < 4; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); tmp[i + 4] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); } static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrangeMask) { uint16_t tmphalf[8]; float tmp[8]; _mm_storeu_si128((__m128i*)tmphalf, _mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)); for (int i = 0; i < 8; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); } return _mm256_loadu_ps(tmp); } #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) #define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) __avx_repeat_f32cx8_load(x) #define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) __avx_rearranged_f32cx8_load(x, arrangeMask) #if defined(__AVX512F__) #define GGML_F32Cx8x2_LOAD(x, y) __avx512_f32cx8x2_load(x, y) #define GGML_F32Cx16_REPEAT_LOAD(x) __avx512_repeat_f32cx16_load(x) #endif #endif #endif static inline int nearest_int(float fval) { assert(fabsf(fval) <= 4194303.f); float val = fval + 12582912.f; int i; memcpy(&i, &val, sizeof(int)); return (i & 0x007fffff) - 0x00400000; } #if defined(__AVX2__) || defined(__AVX512F__) #if defined(__AVX512F__) // add int16_t pairwise and return as 512 bit int vector, then add the accumulator static inline __m512i sum_i16_pairs_acc_int32x16(const __m512i acc, const __m512i x) { const __m512i ones = _mm512_set1_epi16(1); return _mm512_add_epi32(acc, _mm512_madd_epi16(ones, x)); } static inline __m512i mul_sum_us8_pairs_acc_int32x16(const __m512i acc, const __m512i ax, const __m512i sy) { #if defined(__AVX512VNNI__) return _mm512_dpbusd_epi32(acc, ax, sy); #else // Perform multiplication and create 16-bit values const __m512i dot = _mm512_maddubs_epi16(ax, sy); return sum_i16_pairs_acc_int32x16(acc, dot); #endif } // multiply int8_t, add results pairwise twice and return as 512 bit int vector,then add the accumulator static inline __m512i mul_sum_i8_pairs_acc_int32x16(const __m512i acc, const __m512i x, const __m512i y) { const __m512i zero = _mm512_setzero_si512(); // Get absolute values of x vectors const __m512i ax = _mm512_abs_epi8(x); // Sign the values of the y vectors __mmask64 blt0 = _mm512_movepi8_mask(x); const __m512i sy = _mm512_mask_sub_epi8(y, blt0, zero, y); return mul_sum_us8_pairs_acc_int32x16(acc, ax, sy); } #endif // add int16_t pairwise and return as 256 bit int vector, then add the accumulator static inline __m256i sum_i16_pairs_acc_int32x8(const __m256i acc, const __m256i x) { const __m256i ones = _mm256_set1_epi16(1); return _mm256_add_epi32(acc, _mm256_madd_epi16(ones, x)); } static inline __m256i mul_sum_us8_pairs_acc_int32x8(const __m256i acc, const __m256i ax, const __m256i sy) { #if defined(__AVX512VNNI__) && defined(__AVX512VL__) return _mm256_dpbusd_epi32(acc, ax, sy); #elif defined(__AVXVNNI__) return _mm256_dpbusd_avx_epi32(acc, ax, sy); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); return sum_i16_pairs_acc_int32x8(acc, dot); #endif } // Integer variant of the function defined in ggml-quants.c // multiply int8_t, add results pairwise twice and return as 256 bit int vector, then add the accumulator static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m256i x, const __m256i y) { #if defined(__AVXVNNIINT8__) return _mm256_dpbssd_epi32(acc, x, y); #else // Get absolute values of x vectors const __m256i ax = _mm256_sign_epi8(x, x); // Sign the values of the y vectors const __m256i sy = _mm256_sign_epi8(y, x); return mul_sum_us8_pairs_acc_int32x8(acc, ax, sy); #endif } #endif void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; #if defined(__AVX2__) || defined(__AVX__) float id[4]; __m256 srcv[4][4]; __m256 idvec[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 32 ); __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 8 ); __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 16 ); __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 24 ); // Compute max(abs(e)) for the block const __m256 signBit = _mm256_set1_ps( -0.0f ); __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); const float maxScalar = _mm_cvtss_f32( max4 ); // Divided by 127.f to mirror results in quantize_row_q8_0 const float d = maxScalar / 127.f; id[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; //d ? 1.0f / d : 0.0f; // Store the scale for the individual block y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); // Store the values in blocks of eight values - Aim is to use these later for block interleaving srcv[row_iter][0] = v0; srcv[row_iter][1] = v1; srcv[row_iter][2] = v2; srcv[row_iter][3] = v3; idvec[row_iter] = _mm256_set1_ps(id[row_iter]); } // The loop iterates four times - The aim is to get 4 corresponding chunks of eight bytes from the original weight blocks that are interleaved for (int j = 0; j < 4; j++) { // Apply the multiplier __m256 v0 = _mm256_mul_ps(srcv[0][j], idvec[0]); __m256 v1 = _mm256_mul_ps(srcv[1][j], idvec[1]); __m256 v2 = _mm256_mul_ps(srcv[2][j], idvec[2]); __m256 v3 = _mm256_mul_ps(srcv[3][j], idvec[3]); // Round to nearest integer v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); // Convert floats to integers __m256i i0 = _mm256_cvtps_epi32( v0 ); __m256i i1 = _mm256_cvtps_epi32( v1 ); __m256i i2 = _mm256_cvtps_epi32( v2 ); __m256i i3 = _mm256_cvtps_epi32( v3 ); #if defined(__AVX2__) // Convert int32 to int16 i0 = _mm256_packs_epi32( i0, i1 ); i2 = _mm256_packs_epi32( i2, i3 ); // Convert int16 to int8 i0 = _mm256_packs_epi16( i0, i2 ); // Permute and store the quantized weights in the required order after the pack instruction const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); i0 = _mm256_permutevar8x32_epi32( i0, perm ); _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); #else // Since we don't have in AVX some necessary functions, // we split the registers in half and call AVX2 analogs from SSE __m128i ni0 = _mm256_castsi256_si128( i0 ); __m128i ni1 = _mm256_extractf128_si256( i0, 1); __m128i ni2 = _mm256_castsi256_si128( i1 ); __m128i ni3 = _mm256_extractf128_si256( i1, 1); __m128i ni4 = _mm256_castsi256_si128( i2 ); __m128i ni5 = _mm256_extractf128_si256( i2, 1); __m128i ni6 = _mm256_castsi256_si128( i3 ); __m128i ni7 = _mm256_extractf128_si256( i3, 1); // Convert int32 to int16 ni0 = _mm_packs_epi32( ni0, ni1 ); ni2 = _mm_packs_epi32( ni2, ni3 ); ni4 = _mm_packs_epi32( ni4, ni5 ); ni6 = _mm_packs_epi32( ni6, ni7 ); // Convert int16 to int8 ni0 = _mm_packs_epi16( ni0, ni2 ); ni4 = _mm_packs_epi16( ni4, ni6 ); _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j), ni0); _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j + 16), ni4); #endif } } #else UNUSED(nb); UNUSED(y); ggml_quantize_mat_q8_0_4x8_generic(x, vy, k); #endif } void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); assert(k % QK_K == 0); const int nb = k / QK_K; block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; #if defined(__AVX2__) float iscale[4]; __m256 srcv[4][32]; __m256 iscale_vec[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 ); __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 8 ); __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 16 ); __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 24 ); // Compute max(abs(e)) for the block const __m256 signBit = _mm256_set1_ps( -0.0f ); __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); __m256 maxAbs = _mm256_max_ps( abs0, abs1 ); maxAbs = _mm256_max_ps( maxAbs, abs2 ); maxAbs = _mm256_max_ps( maxAbs, abs3 ); __m256 mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); __m256 mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); __m256 mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); __m256 mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); __m256 maskAbs = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); srcv[row_iter][0] = v0; srcv[row_iter][1] = v1; srcv[row_iter][2] = v2; srcv[row_iter][3] = v3; for (int sb = 1; sb < 8; sb++) { // Temporarily stores absolute quant values __m256 tempAbs = maxAbs; // Load elements into 4 AVX vectors __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32); __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 8 ); __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 16 ); __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 24 ); // Compute max(abs(e)) for the block __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); maxAbs = _mm256_max_ps( maxAbs, abs0 ); maxAbs = _mm256_max_ps( maxAbs, abs1 ); maxAbs = _mm256_max_ps( maxAbs, abs2 ); maxAbs = _mm256_max_ps( maxAbs, abs3 ); __m256 mask_prev = _mm256_cmp_ps( tempAbs, maxAbs, _CMP_EQ_OQ ); maskAbs = _mm256_and_ps( maskAbs, mask_prev ); mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); __m256 mask_curr = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); maskAbs = _mm256_or_ps(maskAbs, mask_curr); srcv[row_iter][sb * 4] = v0; srcv[row_iter][sb * 4 + 1] = v1; srcv[row_iter][sb * 4 + 2] = v2; srcv[row_iter][sb * 4 + 3] = v3; } __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); const float maxScalar = _mm_cvtss_f32( max4 ); __m256 maxScalarVec = _mm256_set1_ps(maxScalar); __m256 mask_next = _mm256_cmp_ps( maxScalarVec, maxAbs, _CMP_EQ_OQ ); __m256 finalMask = _mm256_and_ps(maskAbs, mask_next); const int mask = _mm256_movemask_ps(finalMask); iscale[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; if(mask) { iscale[row_iter] = ( maxScalar != 0.0f ) ? -127.f / maxScalar: 0.0f; } y[i].d[row_iter] = maxScalar ? 1/iscale[row_iter] : 0; iscale_vec[row_iter] = _mm256_set1_ps(iscale[row_iter]); } __m256i quants_interleaved[32]; for (int j = 0; j < 32; j++) { // Apply the multiplier __m256 v0 = _mm256_mul_ps(srcv[0][j], iscale_vec[0]); __m256 v1 = _mm256_mul_ps(srcv[1][j], iscale_vec[1]); __m256 v2 = _mm256_mul_ps(srcv[2][j], iscale_vec[2]); __m256 v3 = _mm256_mul_ps(srcv[3][j], iscale_vec[3]); // Round to nearest integer v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); // Convert floats to integers __m256i i0 = _mm256_cvtps_epi32( v0 ); __m256i i1 = _mm256_cvtps_epi32( v1 ); __m256i i2 = _mm256_cvtps_epi32( v2 ); __m256i i3 = _mm256_cvtps_epi32( v3 ); // Convert int32 to int16 i0 = _mm256_packs_epi32( i0, i1 ); i2 = _mm256_packs_epi32( i2, i3 ); // Convert int16 to int8 i0 = _mm256_packs_epi16( i0, i2 ); // Permute and store the quantized weights in the required order after the pack instruction const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); i0 = _mm256_permutevar8x32_epi32( i0, perm ); _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); quants_interleaved[j] = i0; } // Masks to shuffle the quants of corresonding sub blocks for rearraning quants for vectorized bsums computation __m256i shuffle_mask_sb2 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 0, 1, 4, 5, 6, 7, 8, 9, 8, 9, 12, 13, 14, 15)); shuffle_mask_sb2 = _mm256_permute2f128_si256(shuffle_mask_sb2, shuffle_mask_sb2, 0); __m256i shuffle_mask_sb3 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 0, 1, 6, 7, 8, 9, 10, 11, 8, 9, 14, 15)); shuffle_mask_sb3 = _mm256_permute2f128_si256(shuffle_mask_sb3, shuffle_mask_sb3, 0); __m256i shuffle_mask_sb4 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 4, 5, 0, 1, 8, 9, 10, 11, 12, 13, 8, 9)); shuffle_mask_sb4 = _mm256_permute2f128_si256(shuffle_mask_sb4, shuffle_mask_sb4, 0); for (int k = 0; k < 4; k++) { // Quants from four different sub blocks are taken __m256i q0 = quants_interleaved[k * 8 + 0]; __m256i q1 = quants_interleaved[k * 8 + 1]; __m256i q2 = quants_interleaved[k * 8 + 2]; __m256i q3 = quants_interleaved[k * 8 + 3]; __m256i q4 = quants_interleaved[k * 8 + 4]; __m256i q5 = quants_interleaved[k * 8 + 5]; __m256i q6 = quants_interleaved[k * 8 + 6]; __m256i q7 = quants_interleaved[k * 8 + 7]; // The below code block has the first half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time __m256i sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); __m256i sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); __m256i sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); __m256i sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); __m256i one = _mm256_set1_epi8(1); __m256i bsums_r1 = _mm256_maddubs_epi16(one, sb_h1_interleaved); for (int l = 0; l < 3; l++) { // Quants value shifted to process next two values from each sub block q0 = _mm256_srli_epi64(q0, 16); q2 = _mm256_srli_epi64(q2, 16); q4 = _mm256_srli_epi64(q4, 16); q6 = _mm256_srli_epi64(q6, 16); sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); bsums_r1 = _mm256_add_epi16(bsums_r1, _mm256_maddubs_epi16(one, sb_h1_interleaved)); } // The below code block has the second half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time __m256i sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); __m256i sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); __m256i sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); __m256i sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); __m256i bsums_r2 = _mm256_maddubs_epi16(one, sb_h2_interleaved); for (int l = 0; l < 3; l++) { // Quants value shifted to process next two values from each sub block q1 = _mm256_srli_epi64(q1, 16); q3 = _mm256_srli_epi64(q3, 16); q5 = _mm256_srli_epi64(q5, 16); q7 = _mm256_srli_epi64(q7, 16); sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); bsums_r2 = _mm256_add_epi16(bsums_r2, _mm256_maddubs_epi16(one, sb_h2_interleaved)); } // Overall bsums in interleaved fashion computed by adding results of both halves __m256i bsums_r = _mm256_add_epi16(bsums_r1, bsums_r2); _mm256_storeu_si256((__m256i *)(y[i].bsums + 16 * k), bsums_r); } } #else UNUSED(nb); UNUSED(y); ggml_quantize_mat_q8_K_4x8_generic(x, vy, k); #endif } // // GEMV/GEMM templates // #if defined(__AVX2__) || defined(__AVX512F__) // GEMV for 8x blocks of 32 4-bit quants with a single scale factor per block template static void gemv_q4_b32_8x8_q8_0_lut_avx(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc, __m256i signextendlut) { static_assert( std::is_same_v || std::is_same_v, "Unsupported block type"); const int qk = QK8_0; const int nb = n / qk; UNUSED(bs); __m128i changemask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); // Permute mask used for easier vector processing at later stages const __m256i m4b = _mm256_set1_epi8(0x0F); int64_t b_nb = n / 32; const block_tx8 * b_ptr_start = (const block_tx8 *)vx; const block_q8_0 * a_ptr_start = (const block_q8_0 *)vy; // Process Q8_0 blocks one by one for (int64_t y = 0; y < nr; y++) { // Pointers to LHS blocks of block_q8_0 format const block_q8_0 * a_ptr = a_ptr_start + (y * nb); // Take group of eight blocks at each pass of the loop and perform dot product operation for (int64_t x = 0; x < nc / 8; x++) { // Pointers to RHS blocks const block_tx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulator __m256 acc_row = _mm256_setzero_ps(); for (int64_t b = 0; b < nb; b++) { // Load 8 blocks of 32 interleaved as 8 bytes (B0 - B7) const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 1); const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 2); const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 3); // 4-bit -> 8-bit - Sign is maintained const __m256i rhs_vec_0123_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_0, m4b)); // B0(0-7) B1(0-7) B2(0-7) B3(0-7) const __m256i rhs_vec_4567_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_0, m4b)); // B4(0-7) B5(0-7) B6(0-7) B7(0-7) const __m256i rhs_vec_0123_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) const __m256i rhs_vec_4567_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) const __m256i rhs_vec_0123_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b)); // B0(16-23) B1(16-23) B2(16-23) B3(16-23) const __m256i rhs_vec_4567_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b)); // B4(16-23) B5(16-23) B6(16-23) B7(16-23) const __m256i rhs_vec_0123_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b)); // B0(24-31) B1(24-31) B2(24-31) B3(24-31) const __m256i rhs_vec_4567_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b)); // B4(24-31) B5(24-31) B6(24-31) B7(24-31) // Load the scale values for the 8 blocks interleaved in block_tx8 __m256 col_scale_f32; if constexpr ( std::is_same_v || std::is_same_v) { col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, changemask); } // Load and convert to FP32 scale from block_q8_0 const __m256 row_scale_f32 = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(a_ptr[b].d)); // Load the block values in block_q8_0 in batches of 16 bytes and replicate the same across 256 bit vector __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)a_ptr[b].qs)); __m256i lhs_vec_1 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16))); lhs_vec_0 = _mm256_permute2f128_si256(lhs_vec_0, lhs_vec_0, 0); // A0 (0-15) A0(0-15) lhs_vec_1 = _mm256_permute2f128_si256(lhs_vec_1, lhs_vec_1, 0); // A0 (16-31) A0(16-31)) __m256i iacc = _mm256_setzero_si256(); // Dot product done within 32 bit lanes and accumulated in the same vector // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) // ........................................................................... // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170)); iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255)); // Accumulated values multipled with appropriate scales acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); } // Accumulated output values permuted so as to be stored in appropriate order post accumulation acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); _mm256_storeu_ps(s + (y * nr + x * 8), acc_row); } } } // GEMM for 8x blocks of 32 4-bit quants with a single scale factor per block template static void gemm_q4_b32_8x8_q8_0_lut_avx(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc, __m256i signextendlut) { static_assert( std::is_same_v || std::is_same_v, "Unsupported block type"); const int qk = QK8_0; const int nb = n / qk; const block_tx8 * b_ptr_start = (const block_tx8 *)vx; const block_q8_0x4 * a_ptr_start = (const block_q8_0x4 *)vy; int64_t b_nb = n / 32; int64_t y = 0; // Mask to mask out nibbles from packed bytes const __m256i m4b = _mm256_set1_epi8(0x0F); const __m128i loadMask = _mm_blend_epi32(_mm_setzero_si128(), _mm_set1_epi32(0xFFFFFFFF), 3); // Permute mask used for easier vector processing at later stages __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); int64_t xstart = 0; int anr = nr - nr%16; // Used to align nr with boundary of 16 #if defined(__AVX512BW__) && defined(__AVX512DQ__) int anc = nc - nc%16; // Used to align nc with boundary of 16 // Mask to mask out nibbles from packed bytes expanded to 512 bit length const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); // Lookup table to convert signed nibbles to signed bytes expanded to 512 bit length __m512i signextendlutexpanded = _mm512_inserti32x8(_mm512_castsi256_si512(signextendlut), signextendlut, 1); // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_0x4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of two block_tx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_tx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_tx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm512_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Load the sixteen blocks of quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); // Save the values in the following vectors in the formats B0B1B4B5B8B9BCBD, B2B3B6B7BABBBEBF for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); // 4-bit -> 8-bit - Sign is maintained const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) // Shuffle pattern one - right side input const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) // Shuffle pattern two - right side input const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) // Scale values - Load the weight scale values of two block_tx8 __m512 col_scale_f32; if constexpr ( std::is_same_v || std::is_same_v) { col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); } // Process LHS in pairs of rows for (int rp = 0; rp < 4; rp++) { // Load the four blocks of quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version const __m512i zero = _mm512_setzero_epi32(); __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); // Straighten out to make 4 row vectors __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68); const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); // Multiply with appropiate scales and accumulate acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); } } } // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation for (; y < nr / 4; y ++) { const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); // Take group of two block_tx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_tx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_tx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm512_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Load the sixteen blocks of quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); // 4-bit -> 8-bit - Sign is maintained const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) // Shuffle pattern one - right side input const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) // Shuffle pattern two - right side input const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) // Scale values - Load the weight scale values of two block_tx8 __m512 col_scale_f32; if constexpr ( std::is_same_v || std::is_same_v) { col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); } // Load the four blocks of quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version const __m512i zero = _mm512_setzero_epi32(); __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); // Straighten out to make 4 row vectors __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68); const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); // Multiply with appropiate scales and accumulate acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); } // Store the accumulated values for (int i = 0; i < 4; i++) { _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); } } } if (anc != nc) { xstart = anc/8; y = 0; } #endif // __AVX512BW__ && __AVX512DQ__ // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_0x4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of eight block_tx8 structures at each pass of the loop and perform dot product operation for (int64_t x = xstart; x < nc / 8; x++) { const block_tx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm256_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Load the eight blocks of quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); // 4-bit -> 8-bit - Sign is maintained const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) // Scale values - Load the wight scale values of block_tx8 __m256 col_scale_f32; if constexpr ( std::is_same_v || std::is_same_v) { col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); } // Process LHS in groups of four for (int rp = 0; rp < 4; rp++) { // Load the four blocks of quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); // Shuffle pattern one - left side input const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version const __m256i zero = _mm256_setzero_si256(); __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); // Straighten out to make 4 row vectors __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); // Multiply with appropiate scales and accumulate acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); } } } // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation for (; y < nr / 4; y ++) { const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); // Load the eight blocks of quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 for (int64_t x = xstart; x < nc / 8; x++) { const block_tx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm256_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Load the eight block_q8_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); // 4-bit -> 8-bit - Sign is maintained const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) // Scale values - Load the wight scale values of block_tx8 __m256 col_scale_f32; if constexpr ( std::is_same_v || std::is_same_v) { col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); } // Load the four blocks of quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); // Shuffle pattern one - left side input const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version const __m256i zero = _mm256_setzero_si256(); __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); // Straighten out to make 4 row vectors __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptr[b].d, loadMask); // Multiply with appropiate scales and accumulate acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); } // Store the accumulated values for (int i = 0; i < 4; i++) { _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); } } } } #endif // defined(__AVX2__) || defined(__AVX512F__) void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { #if defined(__AVX2__) || defined(__AVX512F__) { // Lookup table to convert signed nibbles to signed bytes __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); gemv_q4_b32_8x8_q8_0_lut_avx(n, s, bs, vx, vy, nr, nc, signextendlut); return; } #endif ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__AVX2__) // Lookup table to convert signed nibbles to signed bytes __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); // Shuffle masks to rearrange delta and scale values to multiply with appropriate scales __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); __m128i scalemask = _mm_set_epi8(7, 7, 3, 3, 6, 6, 2, 2, 5, 5, 1, 1, 4, 4, 0, 0); // Permute mask used for easier vector processing at later stages __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); // Mask to extract nibbles from bytes const __m256i m4b = _mm256_set1_epi8(0x0F); int64_t b_nb = n / QK_K; const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 *)vx; const block_q8_K * a_ptr_start = (const block_q8_K *)vy; // Process Q8_K blocks one by one for (int64_t y = 0; y < nr; y++) { // Pointers to LHS blocks of block_q8_K format const block_q8_K * a_ptr = a_ptr_start + (y * nb); // Take group of eight interleaved block_q4_K structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < nc / 8; x++) { // Pointers to RHS blocks const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_row = _mm256_setzero_ps(); __m256 acc_min_rows = _mm256_setzero_ps(); for (int64_t b = 0; b < nb; b++) { // Load and convert to FP32 scale from block_q8_K const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); // Load the scale values for the 8 blocks interleaved in block_q4_Kx8 // col_scale_f32 rearranged so as to multiply with appropriate quants const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); __m256i iacc_b = _mm256_setzero_si256(); __m256i iacc_min_b = _mm256_setzero_si256(); const __m256i q8sums = _mm256_loadu_si256((const __m256i * )(a_ptr[b].bsums)); __m256i q8s = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(q8sums), _mm256_extracti128_si256(q8sums, 1))); q8s = _mm256_permute2f128_si256(q8s, q8s, 0); // Processes two sub blocks from each Q4_K in each iteration for (int sb = 0; sb < QK_K / 64; sb++) { // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); // 4-bit -> 8-bit // Values of the first sub block of eight block_q4_K structures for the sb loop const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m4b); const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m4b); const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m4b); const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m4b); const __m256i rhs_vec_0123_02 = _mm256_and_si256(rhs_raw_vec_0123_2, m4b); const __m256i rhs_vec_4567_02 = _mm256_and_si256(rhs_raw_vec_4567_2, m4b); const __m256i rhs_vec_0123_03 = _mm256_and_si256(rhs_raw_vec_0123_3, m4b); const __m256i rhs_vec_4567_03 = _mm256_and_si256(rhs_raw_vec_4567_3, m4b); // Values of the second sub block of eight block_q4_K structures when sb = 1 const __m256i rhs_vec_0123_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b); const __m256i rhs_vec_4567_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b); const __m256i rhs_vec_0123_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b); const __m256i rhs_vec_4567_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b); const __m256i rhs_vec_0123_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m4b); const __m256i rhs_vec_4567_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m4b); const __m256i rhs_vec_0123_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m4b); const __m256i rhs_vec_4567_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m4b); uint32_t utmp_0[4], utmp_1[4]; // Scales and Mins of corresponding sub blocks from different Q8_K structures are stored together // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp_0[1] & kmask1; utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); utmp_0[2] = uaux_0; utmp_0[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); const uint32_t uaux_1 = utmp_1[1] & kmask1; utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); utmp_1[2] = uaux_1; utmp_1[0] &= kmask1; // Scales of first sub block in the sb loop const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); __m128i scales_rearrange_0 = _mm_shuffle_epi8(mins_and_scales_0, scalemask); __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); // Scales of second sub block in the sb loop __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); __m128i scales_rearrange_1 = _mm_shuffle_epi8(mins_and_scales_1, scalemask); __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); // Mins of first and second sub block of Q4_K block are arranged side by side __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); // Load the two sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector __m256i lhs_vec_00 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 64))); __m256i lhs_vec_01 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 64))); __m256i lhs_vec_10 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 64))); __m256i lhs_vec_11 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 64))); lhs_vec_00 = _mm256_permute2f128_si256(lhs_vec_00, lhs_vec_00, 0); lhs_vec_01 = _mm256_permute2f128_si256(lhs_vec_01, lhs_vec_01, 0); lhs_vec_10 = _mm256_permute2f128_si256(lhs_vec_10, lhs_vec_10, 0); lhs_vec_11 = _mm256_permute2f128_si256(lhs_vec_11, lhs_vec_11, 0); // Dot product done within 32 bit lanes and accumulated in the same vector // First done for first sub block and thenn for second sub block in each sb // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) // ........................................................................... // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) __m256i iacc_0 = _mm256_setzero_si256(); __m256i iacc_1 = _mm256_setzero_si256(); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 0))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_00, 85))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 170))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_00, 255))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_02 ,_mm256_shuffle_epi32(rhs_vec_4567_02, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 0))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_02, 177) ,rhs_vec_4567_02, 170), _mm256_shuffle_epi32(lhs_vec_01, 85))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_03 ,_mm256_shuffle_epi32(rhs_vec_4567_03, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 170))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_03, 177) ,rhs_vec_4567_03, 170), _mm256_shuffle_epi32(lhs_vec_01, 255))); iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 0))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_10, 85))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 170))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_10, 255))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_12 ,_mm256_shuffle_epi32(rhs_vec_4567_12, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 0))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_12, 177) ,rhs_vec_4567_12, 170), _mm256_shuffle_epi32(lhs_vec_11, 85))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_13 ,_mm256_shuffle_epi32(rhs_vec_4567_13, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 170))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_13, 177) ,rhs_vec_4567_13, 170), _mm256_shuffle_epi32(lhs_vec_11, 255))); iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); // Accumulate the iacc value for one sb __m256i iacc_sb = _mm256_add_epi32(iacc_0, iacc_1); // Broadcast the bsums of the two sub blocks of the iteration of Q8_K across the vector // Multiply-Add with corresponding mins of Q4_Kx8 with bsums __m256i q8s_sb = _mm256_shuffle_epi32(q8s, 0); __m256i iacc_min_sb = _mm256_madd_epi16(q8s_sb, mins_01); q8s = _mm256_bsrli_epi128(q8s, 4); // Accumulate for the complete block iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); } // Multiply-Add with scale values for the complete super block acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); } // Accumulated output values permuted so as to be stored in appropriate order post accumulation acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); } } #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); ggml_gemv_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); #endif } void ggml_gemv_iq4_nl_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { #if defined(__AVX2__) __m256i signextendlut = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i*)kvalues_iq4nl)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); gemv_q4_b32_8x8_q8_0_lut_avx(n, s, bs, vx, vy, nr, nc, signextendlut); return; #endif ggml_gemv_iq4_nl_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemv_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__AVX2__) // Lookup table to convert signed nibbles to signed bytes __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); // Shuffle masks to rearrange delta values to multiply with appropriate scales __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); // Permute mask used for easier vector processing at later stages __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); const __m256i m3b = _mm256_set1_epi8(3); const __m128i m4b_sse = _mm_set1_epi8(0xF); //Mask to get appropriate scales __m128i scalemask1 = _mm_set_epi8(14,14,6,6,12,12,4,4,10,10,2,2,8,8,0,0); __m128i scalemask2 = _mm_set_epi8(15,15,7,7,13,13,5,5,11,11,3,3,9,9,1,1); int64_t b_nb = n / QK_K; const block_q2_Kx8 * b_ptr_start = (const block_q2_Kx8 *)vx; const block_q8_K * a_ptr_start = (const block_q8_K *)vy; // Process Q8_K blocks one by one for (int64_t y = 0; y < nr; y++) { // Pointers to LHS blocks of block_q8_K format const block_q8_K * a_ptr = a_ptr_start + (y * nb); // Take group of eight interleaved block_q2_K structures at each pass of the loop and perform dot product operation for(int64_t x = 0; x < nc / 8; x++) { // Pointers to RHS blocks const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_row = _mm256_setzero_ps(); __m256 acc_min_rows = _mm256_setzero_ps(); for (int64_t b = 0; b < nb; b++) { // Load and convert to FP32 delta from block_q8_K const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); // Load the delta values for the 8 blocks interleaved in block_q2_Kx8 // col_scale_f32 rearranged so as to multiply with appropriate quants const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); __m256i iacc_b = _mm256_setzero_si256(); __m256i iacc_min_b = _mm256_setzero_si256(); // Processes eight sub blocks from each Q2_K in each iteration for(int sb = 0; sb < QK_K / 128; sb++) { // Load the eight block_q2_K for eight sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); // 2-bit -> 8-bit // Values of the 0th,2nd,4th,6th sub blocks of eight block_q2_K structures for the sb loop const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m3b); //B00(0-7) B01(0-7) B02(0-7) B03(0-7) const __m256i rhs_vec_0123_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 2), m3b); //B20(0-7) B21(0-7) B22(0-7) B23(0-7) const __m256i rhs_vec_0123_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m3b); //B40(0-7) B41(0-7) B42(0-7) B43(0-7) const __m256i rhs_vec_0123_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 6), m3b); //B60(0-7) B61(0-7) B62(0-7) B63(0-7) const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m3b); //B04(0-7) B05(0-7) B06(0-7) B07(0-7) const __m256i rhs_vec_4567_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 2), m3b); //B24(0-7) B25(0-7) B26(0-7) B27(0-7) const __m256i rhs_vec_4567_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m3b); //B44(0-7) B45(0-7) B46(0-7) B47(0-7) const __m256i rhs_vec_4567_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 6), m3b); //B64(0-7) B65(0-7) B66(0-7) B67(0-7) const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m3b); //B00(8-15) B01(8-15) B02(8-15) B03(8-15) const __m256i rhs_vec_0123_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 2), m3b); //B20(8-15) B21(8-15) B22(8-15) B23(8-15) const __m256i rhs_vec_0123_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m3b); //B40(8-15) B41(8-15) B42(8-15) B43(8-15) const __m256i rhs_vec_0123_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 6), m3b); //B60(8-15) B61(8-15) B62(8-15) B63(8-15) const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m3b); //B04(8-15) B05(8-15) B06(8-15) B07(8-15) const __m256i rhs_vec_4567_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 2), m3b); //B24(8-15) B25(8-15) B26(8-15) B27(8-15) const __m256i rhs_vec_4567_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m3b); //B44(8-15) B45(8-15) B46(8-15) B47(8-15) const __m256i rhs_vec_4567_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 6), m3b); //B64(8-15) B65(8-15) B66(8-15) B67(8-15) // Values of the 1st,3rd,5th,7th sub blocks of eight block_q2_K structures for the sb loop const __m256i rhs_vec_0123_10 = _mm256_and_si256(rhs_raw_vec_0123_2, m3b); //B10(0-7) B11(0-7) B12(0-7) B13(0-7) const __m256i rhs_vec_0123_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 2), m3b); //B30(0-7) B31(0-7) B32(0-7) B33(0-7) const __m256i rhs_vec_0123_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m3b); //B50(0-7) B51(0-7) B52(0-7) B53(0-7) const __m256i rhs_vec_0123_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 6), m3b); //B70(0-7) B71(0-7) B72(0-7) B73(0-7) const __m256i rhs_vec_4567_10 = _mm256_and_si256(rhs_raw_vec_4567_2, m3b); //B14(0-7) B15(0-7) B16(0-7) B17(0-7) const __m256i rhs_vec_4567_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 2), m3b); //B34(0-7) B35(0-7) B36(0-7) B37(0-7) const __m256i rhs_vec_4567_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m3b); //B54(0-7) B55(0-7) B56(0-7) B57(0-7) const __m256i rhs_vec_4567_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 6), m3b); //B74(0-7) B75(0-7) B76(0-7) B77(0-7) const __m256i rhs_vec_0123_11 = _mm256_and_si256(rhs_raw_vec_0123_3, m3b); //B10(8-15) B11(8-15) B12(8-15) B13(8-15) const __m256i rhs_vec_0123_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 2), m3b); //B30(8-15) B31(8-15) B32(8-15) B33(8-15) const __m256i rhs_vec_0123_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m3b); //B50(8-15) B51(8-15) B52(8-15) B53(8-15) const __m256i rhs_vec_0123_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 6), m3b); //B70(8-15) B71(8-15) B72(8-15) B73(8-15) const __m256i rhs_vec_4567_11 = _mm256_and_si256(rhs_raw_vec_4567_3, m3b); //B14(8-15) B15(8-15) B16(8-15) B17(8-15) const __m256i rhs_vec_4567_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 2), m3b); //B34(8-15) B35(8-15) B36(8-15) B37(8-15) const __m256i rhs_vec_4567_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m3b); //B54(8-15) B55(8-15) B56(8-15) B57(8-15) const __m256i rhs_vec_4567_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 6), m3b); //B74(8-15) B75(8-15) B76(8-15) B77(8-15) //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); // Extract scales which is lower half from mins_and_scales const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); // Extract mins which is upper half from mins_and_scales const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); // Scales of sub blocks in the sb loop // Scales of the 0th sub block from each super block __m128i scales_rearrange_0 = _mm_shuffle_epi8(scales_01, scalemask1); __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); // Scales of the 1st sub block from each super block __m128i scales_rearrange_1 = _mm_shuffle_epi8(scales_01, scalemask2); __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); // Scales of the 2nd sub block from each super block __m128i scales_rearrange_2 = _mm_shuffle_epi8(scales_23, scalemask1); __m256i scales_2 = _mm256_cvtepu8_epi16(scales_rearrange_2); // Scales of the 3rd sub block from each super block __m128i scales_rearrange_3 = _mm_shuffle_epi8(scales_23, scalemask2); __m256i scales_3 = _mm256_cvtepu8_epi16(scales_rearrange_3); // Scales of the 4th sub block from each super block __m128i scales_rearrange_4 = _mm_shuffle_epi8(scales_45, scalemask1); __m256i scales_4 = _mm256_cvtepu8_epi16(scales_rearrange_4); // Scales of the 5th sub block from each super block __m128i scales_rearrange_5 = _mm_shuffle_epi8(scales_45, scalemask2); __m256i scales_5 = _mm256_cvtepu8_epi16(scales_rearrange_5); // Scales of the 6th sub block from each super block __m128i scales_rearrange_6 = _mm_shuffle_epi8(scales_67, scalemask1); __m256i scales_6 = _mm256_cvtepu8_epi16(scales_rearrange_6); // Scales of the 7th sub block from each super block __m128i scales_rearrange_7 = _mm_shuffle_epi8(scales_67, scalemask2); __m256i scales_7 = _mm256_cvtepu8_epi16(scales_rearrange_7); // Load the sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 128))); __m256i lhs_vec_1 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 128))); __m256i lhs_vec_2 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 128))); __m256i lhs_vec_3 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 128))); __m256i lhs_vec_4 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 64 + sb * 128))); __m256i lhs_vec_5 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 80 + sb * 128))); __m256i lhs_vec_6 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 96 + sb * 128))); __m256i lhs_vec_7 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 112 + sb * 128))); lhs_vec_0 = _mm256_permute2f128_si256(lhs_vec_0, lhs_vec_0, 0); lhs_vec_1 = _mm256_permute2f128_si256(lhs_vec_1, lhs_vec_1, 0); lhs_vec_2 = _mm256_permute2f128_si256(lhs_vec_2, lhs_vec_2, 0); lhs_vec_3 = _mm256_permute2f128_si256(lhs_vec_3, lhs_vec_3, 0); lhs_vec_4 = _mm256_permute2f128_si256(lhs_vec_4, lhs_vec_4, 0); lhs_vec_5 = _mm256_permute2f128_si256(lhs_vec_5, lhs_vec_5, 0); lhs_vec_6 = _mm256_permute2f128_si256(lhs_vec_6, lhs_vec_6, 0); lhs_vec_7 = _mm256_permute2f128_si256(lhs_vec_7, lhs_vec_7, 0); __m256i iacc_0 = _mm256_setzero_si256(); __m256i iacc_1 = _mm256_setzero_si256(); __m256i iacc_2 = _mm256_setzero_si256(); __m256i iacc_3 = _mm256_setzero_si256(); __m256i iacc_4 = _mm256_setzero_si256(); __m256i iacc_5 = _mm256_setzero_si256(); __m256i iacc_6 = _mm256_setzero_si256(); __m256i iacc_7 = _mm256_setzero_si256(); // Dot product done within 32 bit lanes and accumulated in the same vector // First done for 0th sub block and then for seven (1st - 7th) other sub blocks processed for each sb (sb < QK_K/128 loop) // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) // B0(8-11) B4(8-11) B1(8-11) B5(8-11) B2(8-11) B6(8-11) B3(8-11) B7(8-11) with A0(8-11) // B0(12-15) B4(12-15) B1(12-15) B5(12-15) B2(12-15) B6(12-15) B3(12-15) B7(12-15) with A0(12-15) iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_0, 85))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170))); iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_0, 255))); iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_1, 85))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170))); iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_1, 255))); iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_20 ,_mm256_shuffle_epi32(rhs_vec_4567_20, 177), 170), _mm256_shuffle_epi32(lhs_vec_2, 0))); iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_20, 177) ,rhs_vec_4567_20, 170), _mm256_shuffle_epi32(lhs_vec_2, 85))); iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_21 ,_mm256_shuffle_epi32(rhs_vec_4567_21, 177), 170), _mm256_shuffle_epi32(lhs_vec_2, 170))); iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_21, 177) ,rhs_vec_4567_21, 170), _mm256_shuffle_epi32(lhs_vec_2, 255))); iacc_2 = _mm256_madd_epi16(iacc_2, scales_2); iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_30 ,_mm256_shuffle_epi32(rhs_vec_4567_30, 177), 170), _mm256_shuffle_epi32(lhs_vec_3, 0))); iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_30, 177) ,rhs_vec_4567_30, 170), _mm256_shuffle_epi32(lhs_vec_3, 85))); iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_31 ,_mm256_shuffle_epi32(rhs_vec_4567_31, 177), 170), _mm256_shuffle_epi32(lhs_vec_3, 170))); iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_31, 177) ,rhs_vec_4567_31, 170), _mm256_shuffle_epi32(lhs_vec_3, 255))); iacc_3 = _mm256_madd_epi16(iacc_3, scales_3); iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_40 ,_mm256_shuffle_epi32(rhs_vec_4567_40, 177), 170), _mm256_shuffle_epi32(lhs_vec_4, 0))); iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_40, 177) ,rhs_vec_4567_40, 170), _mm256_shuffle_epi32(lhs_vec_4, 85))); iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_41 ,_mm256_shuffle_epi32(rhs_vec_4567_41, 177), 170), _mm256_shuffle_epi32(lhs_vec_4, 170))); iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_41, 177) ,rhs_vec_4567_41, 170), _mm256_shuffle_epi32(lhs_vec_4, 255))); iacc_4 = _mm256_madd_epi16(iacc_4, scales_4); iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_50 ,_mm256_shuffle_epi32(rhs_vec_4567_50, 177), 170), _mm256_shuffle_epi32(lhs_vec_5, 0))); iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_50, 177) ,rhs_vec_4567_50, 170), _mm256_shuffle_epi32(lhs_vec_5, 85))); iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_51 ,_mm256_shuffle_epi32(rhs_vec_4567_51, 177), 170), _mm256_shuffle_epi32(lhs_vec_5, 170))); iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_51, 177) ,rhs_vec_4567_51, 170), _mm256_shuffle_epi32(lhs_vec_5, 255))); iacc_5 = _mm256_madd_epi16(iacc_5, scales_5); iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_60 ,_mm256_shuffle_epi32(rhs_vec_4567_60, 177), 170), _mm256_shuffle_epi32(lhs_vec_6, 0))); iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_60, 177) ,rhs_vec_4567_60, 170), _mm256_shuffle_epi32(lhs_vec_6, 85))); iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_61 ,_mm256_shuffle_epi32(rhs_vec_4567_61, 177), 170), _mm256_shuffle_epi32(lhs_vec_6, 170))); iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_61, 177) ,rhs_vec_4567_61, 170), _mm256_shuffle_epi32(lhs_vec_6, 255))); iacc_6 = _mm256_madd_epi16(iacc_6, scales_6); iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_70 ,_mm256_shuffle_epi32(rhs_vec_4567_70, 177), 170), _mm256_shuffle_epi32(lhs_vec_7, 0))); iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_70, 177) ,rhs_vec_4567_70, 170), _mm256_shuffle_epi32(lhs_vec_7, 85))); iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_71 ,_mm256_shuffle_epi32(rhs_vec_4567_71, 177), 170), _mm256_shuffle_epi32(lhs_vec_7, 170))); iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_71, 177) ,rhs_vec_4567_71, 170), _mm256_shuffle_epi32(lhs_vec_7, 255))); iacc_7 = _mm256_madd_epi16(iacc_7, scales_7); // Accumulate the iacc value for one sb __m256i iacc_sb = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_0, iacc_1), _mm256_add_epi32(iacc_2, iacc_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_4, iacc_5), _mm256_add_epi32(iacc_6, iacc_7))); __m128i q8sums = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + sb * 8)); __m256i q8s = _mm256_castsi128_si256(q8sums); q8s= _mm256_permute2f128_si256(q8s, q8s, 0); // Broadcast the bsums of the two corresponding subblocks of q8_k // Multiply-Add with corresponding mins of Q2_Kx8 with bsums __m256i iacc_min_sb_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 0), mins_01); __m256i iacc_min_sb_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 85), mins_23); __m256i iacc_min_sb_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 170), mins_45); __m256i iacc_min_sb_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 255), mins_67); __m256i iacc_min_sb = _mm256_add_epi32(_mm256_add_epi32(iacc_min_sb_01, iacc_min_sb_23), _mm256_add_epi32(iacc_min_sb_45,iacc_min_sb_67)); // Accumulate for the complete block iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); } //Multiply-Add with scale values for complete super block acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); } // Accumulated output values permuted so as to be stored in appropriate order post accumulation acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); } } #else ggml_gemv_q2_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); #endif } void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { #if defined(__AVX2__) || defined(__AVX512F__) { // Lookup table to convert signed nibbles to signed bytes __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); gemm_q4_b32_8x8_q8_0_lut_avx(n, s, bs, vx, vy, nr, nc, signextendlut); return; } #endif // defined(__AVX2__) || defined(__AVX512F__) ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__AVX2__) || defined(__AVX512F__) const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 * ) vx; const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; int64_t b_nb = n / QK_K; int64_t y = 0; // Mask to mask out nibbles from packed bytes const __m256i m4b = _mm256_set1_epi8(0x0F); // Permute mask used for easier vector processing at later stages __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); int64_t xstart = 0; int anr = nr - nr % 16;; // Used to align nr with boundary of 16 #if defined(__AVX512BW__) && defined(__AVX512DQ__) int anc = nc - nc % 16; // Used to align nc with boundary of 16 // Mask to mask out nibbles from packed bytes expanded to 512 bit length const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); //Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_Kx4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm512_setzero_ps(); } __m512 acc_min_rows[16]; for (int i = 0; i < 16; i++) { acc_min_rows[i] = _mm512_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Scale values - Load the sixteen scale values from two block_q4_kx8 structures const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 64; sb++) { const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); //4-bit -> 8-bit const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) // Shuffle pattern one - right side input const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) // Shuffle pattern two - right side input const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); const uint32_t uaux_00 = utmp_00[1] & kmask1; utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); utmp_00[2] = uaux_00; utmp_00[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); const uint32_t uaux_01 = utmp_01[1] & kmask1; utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); utmp_01[2] = uaux_01; utmp_01[0] &= kmask1; memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); const uint32_t uaux_10 = utmp_10[1] & kmask1; utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); utmp_10[2] = uaux_10; utmp_10[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); const uint32_t uaux_11 = utmp_11[1] & kmask1; utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); utmp_11[2] = uaux_11; utmp_11[0] &= kmask1; // Scales of first sub block in the sb loop const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); // Scales of second sub block in the sb loop const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); // Mins of first and second sub block of Q4_K block are arranged side by side const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); for (int rp = 0; rp < 4; rp++) { // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); acc_min_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); acc_min_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); acc_min_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); acc_min_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); } } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); } } } for (; y < nr / 4; y++) { const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm512_setzero_ps(); } __m512 acc_min_rows[4]; for (int i = 0; i < 4; i++) { acc_min_rows[i] = _mm512_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Scale values - Load the sixteen scale values from two block_q4_kx8 structures const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 64; sb++) { const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); //4-bit -> 8-bit const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) // Shuffle pattern one - right side input const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) // Shuffle pattern two - right side input const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); const uint32_t uaux_00 = utmp_00[1] & kmask1; utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); utmp_00[2] = uaux_00; utmp_00[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); const uint32_t uaux_01 = utmp_01[1] & kmask1; utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); utmp_01[2] = uaux_01; utmp_01[0] &= kmask1; // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); const uint32_t uaux_10 = utmp_10[1] & kmask1; utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); utmp_10[2] = uaux_10; utmp_10[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); const uint32_t uaux_11 = utmp_11[1] & kmask1; utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); utmp_11[2] = uaux_11; utmp_11[0] &= kmask1; // Scales of first sub block in the sb loop const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); // Scales of second sub block in the sb loop const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); // Mins of first and second sub block of Q4_K block are arranged side by side const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); //Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into a 512 bit vector __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); acc_min_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); acc_min_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); acc_min_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); acc_min_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); } } // Store accumlated values for (int i = 0; i < 4; i++) { _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); } } } if (anc != nc) { xstart = anc/8; y = 0; } #endif // __AVX512BW__ && __AVX512DQ__ // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_Kx4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = xstart; x < nc / 8; x++) { const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm256_setzero_ps(); } __m256 acc_min_rows[16]; for (int i = 0; i < 16; i++) { acc_min_rows[i] = _mm256_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Scale values - Load the eight scale values of block_q4_kx8 const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); // dmin values - Load the eight dmin values of block_q4_kx8 const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 64; sb++) { // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); // 4-bit -> 8-bit // First sub block of the two sub blocks processed in the iteration const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) // Second sub block of the two sub blocks processed in the iteration const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) uint32_t utmp_0[4], utmp_1[4]; // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp_0[1] & kmask1; utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); utmp_0[2] = uaux_0; utmp_0[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); const uint32_t uaux_1 = utmp_1[1] & kmask1; utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); utmp_1[2] = uaux_1; utmp_1[0] &= kmask1; // Scales of first sub block in the sb loop const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); // Scales of second sub block in the sb loop const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); // Mins of first and second sub block of Q4_K block are arranged side by side const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); for (int rp = 0; rp < 4; rp++) { // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); // Shuffle pattern one - left side input const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) // Shuffle pattern two- left side input const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse);//GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); } } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); } } } for (; y < nr / 4; y++) { const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); for (int64_t x = xstart; x < nc / 8; x++) { const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm256_setzero_ps(); } __m256 acc_min_rows[4]; for (int i = 0; i < 4; i++) { acc_min_rows[i] = _mm256_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Scale values - Load the eight scale values of block_q4_Kx8 const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); // dmin values - Load the eight dmin values of block_q4_Kx8 const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 64; sb++) { // Load the eight block_q4_k for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); // 4-bit -> 8-bit // First sub block of the two sub blocks processed in the iteration const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) // Second sub block of the two sub blocks processed in the iteration const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) uint32_t utmp_0[4], utmp_1[4]; // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp_0[1] & kmask1; utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); utmp_0[2] = uaux_0; utmp_0[0] &= kmask1; // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures when sb = 1 memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); const uint32_t uaux_1 = utmp_1[1] & kmask1; utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); utmp_1[2] = uaux_1; utmp_1[0] &= kmask1; // Scales of first sub block in the sb loop const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); // Scales of second sub block in the sb loop const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); // Mins of first and second sub block of Q4_K block are arranged side by side const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); // Shuffle pattern one - left side input const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) // Shuffle pattern two- left side input const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); //GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); } } // Store the accumulated values for (int i = 0; i < 4; i++) { _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); } } } #else UNUSED(kmask1); UNUSED(kmask2); UNUSED(kmask3); ggml_gemm_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); #endif } void ggml_gemm_iq4_nl_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { #if defined(__AVX2__) || defined(__AVX512F__) { __m256i signextendlut = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i*)kvalues_iq4nl)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); gemm_q4_b32_8x8_q8_0_lut_avx(n, s, bs, vx, vy, nr, nc, signextendlut); return; } #endif // defined(__AVX2__) || defined(__AVX512F__) ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } void ggml_gemm_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); #if defined(__AVX2__) || defined(__AVX512F__) const block_q2_Kx8 * b_ptr_start = (const block_q2_Kx8 * ) vx; const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; int64_t b_nb = n / QK_K; int64_t y = 0; // Permute mask used for easier vector processing at later stages __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); int64_t xstart = 0; int anr = nr - nr % 16; // Used to align nr with boundary of 16 // Mask to convert 2 bit and 4 bit values into a bytes const __m256i m3b = _mm256_set1_epi8(3); const __m128i m4b_sse = _mm_set1_epi8(0xF); //Mask to get appropriate scales __m128i scalesmask1_sse = _mm_set_epi8(14,14,12,12,10,10,8,8,6,6,4,4,2,2,0,0); __m128i scalesmask2_sse = _mm_set_epi8(15,15,13,13,11,11,9,9,7,7,5,5,3,3,1,1); __m256i scalesmask1 = _mm256_castsi128_si256(scalesmask1_sse); scalesmask1 = _mm256_permute2f128_si256(scalesmask1, scalesmask1, 0); __m256i scalesmask2 = _mm256_castsi128_si256(scalesmask2_sse); scalesmask2 = _mm256_permute2f128_si256(scalesmask2, scalesmask2, 0); #if defined(__AVX512BW__) && defined(__AVX512DQ__) int anc = nc - nc % 16; // Used to align nc with boundary of 16 // Mask to mask out nibbles from packed bytes const __m256i m4b = _mm256_set1_epi8(0x0F); // Mask to mask out nibbles from packed bytes expanded to 512 bit length const __m512i m3bexpanded = _mm512_set1_epi8(3); //Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_Kx4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_q2_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_q2_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm512_setzero_ps(); } __m512 acc_min_rows[16]; for (int i = 0; i < 16; i++) { acc_min_rows[i] = _mm512_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Delta values - Load the sixteen scale values from two block_q2_kx8 structures const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); // dmin values - Load the sixteen dmin values from two block_q2_kx8 structures const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 128; sb++) { // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); //2-bit -> 8-bit const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0,m3bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0,m3bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1,m3bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1,m3bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(rhs_raw_mat_014589CD_2,m3bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2,m3bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(rhs_raw_mat_014589CD_3,m3bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3,m3bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) const __m512i rhs_mat_014589CD_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 2), m3bexpanded); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) B28(0-7) B29(0-7) B2C(0-7) B2D(0-7) const __m512i rhs_mat_2367ABEF_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 2), m3bexpanded); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) B2A(0-7) B2B(0-7) B2E(0-7) B2F(0-7) const __m512i rhs_mat_014589CD_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 2), m3bexpanded); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) B28(8-15) B29(8-15) B2C(8-15) B2D(8-15) const __m512i rhs_mat_2367ABEF_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 2), m3bexpanded); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) B2A(8-15) B2B(8-15) B2E(8-15) B2F(8-15) const __m512i rhs_mat_014589CD_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 2), m3bexpanded); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) B38(0-7) B39(0-7) B3C(0-7) B3D(0-7) const __m512i rhs_mat_2367ABEF_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 2), m3bexpanded); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) B3A(0-7) B3B(0-7) B3E(0-7) B3F(0-7) const __m512i rhs_mat_014589CD_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 2), m3bexpanded); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) B38(8-15) B39(8-15) B3C(8-15) B3D(8-15) const __m512i rhs_mat_2367ABEF_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 2), m3bexpanded); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) B3A(8-15) B3B(8-15) B3E(8-15) B3F(8-15) const __m512i rhs_mat_014589CD_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m3bexpanded); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) B48(0-7) B49(0-7) B4C(0-7) B4D(0-7) const __m512i rhs_mat_2367ABEF_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m3bexpanded); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) B4A(0-7) B4B(0-7) B4E(0-7) B4F(0-7) const __m512i rhs_mat_014589CD_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m3bexpanded); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) B48(8-15) B49(8-15) B4C(8-15) B4D(8-15) const __m512i rhs_mat_2367ABEF_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m3bexpanded); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) B4A(8-15) B4B(8-15) B4E(8-15) B4F(8-15) const __m512i rhs_mat_014589CD_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m3bexpanded); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) B58(0-7) B59(0-7) B5C(0-7) B5D(0-7) const __m512i rhs_mat_2367ABEF_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m3bexpanded); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) B5A(0-7) B5B(0-7) B5E(0-7) B5F(0-7) const __m512i rhs_mat_014589CD_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m3bexpanded); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) B58(8-15) B59(8-15) B5C(8-15) B5D(8-15) const __m512i rhs_mat_2367ABEF_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m3bexpanded); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) B5A(8-15) B5B(8-15) B5E(8-15) B5F(8-15) const __m512i rhs_mat_014589CD_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 6), m3bexpanded); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) B68(0-7) B69(0-7) B6C(0-7) B6D(0-7) const __m512i rhs_mat_2367ABEF_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 6), m3bexpanded); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) B6A(0-7) B6B(0-7) B6E(0-7) B6F(0-7) const __m512i rhs_mat_014589CD_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 6), m3bexpanded); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) B68(8-15) B69(8-15) B6C(8-15) B6D(8-15) const __m512i rhs_mat_2367ABEF_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 6), m3bexpanded); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) B6A(8-15) B6B(8-15) B6E(8-15) B6F(8-15) const __m512i rhs_mat_014589CD_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 6), m3bexpanded); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) B78(0-7) B79(0-7) B7C(0-7) B7D(0-7) const __m512i rhs_mat_2367ABEF_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 6), m3bexpanded); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) B7A(0-7) B7B(0-7) B7E(0-7) B7F(0-7) const __m512i rhs_mat_014589CD_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 6), m3bexpanded); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) B78(8-15) B79(8-15) B7C(8-15) B7D(8-15) const __m512i rhs_mat_2367ABEF_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 6), m3bexpanded); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) B7A(8-15) B7B(8-15) B7E(8-15) B7F(8-15) const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) const __m512i rhs_mat_014589CD_20_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) B28(0-3) B29(0-3) B28(0-3) B29(0-3) B2C(0-3) B2D(0-3) B2C(0-3) B2D(0-3) const __m512i rhs_mat_2367ABEF_20_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) B2A(0-3) B2B(0-3) B2A(0-3) B2B(0-3) B2E(0-3) B2F(0-3) B2E(0-3) B2F(0-3) const __m512i rhs_mat_014589CD_21_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) B28(8-11) B29(8-11) B28(8-11) B29(8-11) B2C(8-11) B2D(8-11) B2C(8-11) B2D(8-11) const __m512i rhs_mat_2367ABEF_21_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) B2A(8-11) B2B(8-11) B2A(8-11) B2B(8-11) B2E(8-11) B2F(8-11) B2E(8-11) B2F(8-11) const __m512i rhs_mat_014589CD_30_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)136); ///B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) B38(0-3) B39(0-3) B38(0-3) B39(0-3) B3C(0-3) B3D(0-3) B3C(0-3) B3D(0-3) const __m512i rhs_mat_2367ABEF_30_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) B3A(0-3) B3B(0-3) B3A(0-3) B3B(0-3) B3E(0-3) B3F(0-3) B3E(0-3) B3F(0-3) const __m512i rhs_mat_014589CD_31_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11) B38(8-11) B39(8-11) B38(8-11) B39(8-11) B3C(8-11) B3D(8-11) B3C(8-11) B3D(8-11) const __m512i rhs_mat_2367ABEF_31_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) B3A(8-11) B3B(8-11) B3A(8-11) B3B(8-11) B3E(8-11) B3F(8-11) B3E(8-11) B3F(8-11) const __m512i rhs_mat_014589CD_40_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) B48(0-3) B49(0-3) B48(0-3) B49(0-3) B4C(0-3) B4D(0-3) B4C(0-3) B4D(0-3) const __m512i rhs_mat_2367ABEF_40_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) B4A(0-3) B4B(0-3) B4A(0-3) B4B(0-3) B4E(0-3) B4F(0-3) B4E(0-3) B4F(0-3) const __m512i rhs_mat_014589CD_41_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) B48(8-11) B49(8-11) B48(8-11) B49(8-11) B4C(8-11) B4D(8-11) B4C(8-11) B4D(8-11) const __m512i rhs_mat_2367ABEF_41_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) B4A(8-11) B4B(8-11) B4A(8-11) B4B(8-11) B4E(8-11) B4F(8-11) B4E(8-11) B4F(8-11) const __m512i rhs_mat_014589CD_50_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) B58(0-3) B59(0-3) B58(0-3) B59(0-3) B5C(0-3) B5D(0-3) B5C(0-3) B5D(0-3) const __m512i rhs_mat_2367ABEF_50_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) B5A(0-3) B5B(0-3) B5A(0-3) B5B(0-3) B5E(0-3) B5F(0-3) B5E(0-3) B5F(0-3) const __m512i rhs_mat_014589CD_51_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) B58(8-11) B59(8-11) B58(8-11) B59(8-11) B5C(8-11) B5D(8-11) B5C(8-11) B5D(8-11) const __m512i rhs_mat_2367ABEF_51_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) B5A(8-11) B5B(8-11) B5A(8-11) B5B(8-11) B5E(8-11) B5F(8-11) B5E(8-11) B5F(8-11) const __m512i rhs_mat_014589CD_60_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) B68(0-3) B69(0-3) B68(0-3) B69(0-3) B6C(0-3) B6D(0-3) B6C(0-3) B6D(0-3) const __m512i rhs_mat_2367ABEF_60_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) B6A(0-3) B6B(0-3) B6A(0-3) B6B(0-3) B6E(0-3) B6F(0-3) B6E(0-3) B6F(0-3) const __m512i rhs_mat_014589CD_61_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) B68(8-11) B69(8-11) B68(8-11) B69(8-11) B6C(8-11) B6D(8-11) B6C(8-11) B6D(8-11) const __m512i rhs_mat_2367ABEF_61_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) B6A(8-11) B6B(8-11) B6A(8-11) B6B(8-11) B6E(8-11) B6F(8-11) B6E(8-11) B6F(8-11) const __m512i rhs_mat_014589CD_70_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) B78(0-3) B79(0-3) B78(0-3) B79(0-3) B7C(0-3) B7D(0-3) B7C(0-3) B7D(0-3) const __m512i rhs_mat_2367ABEF_70_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) B7A(0-3) B7B(0-3) B7A(0-3) B7B(0-3) B7E(0-3) B7F(0-3) B7E(0-3) B7F(0-3) const __m512i rhs_mat_014589CD_71_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_71_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) B7A(8-11) B7B(8-11) B7A(8-11) B7B(8-11) B7E(8-11) B7F(8-11) B7E(8-11) B7F(8-11) const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) const __m512i rhs_mat_014589CD_20_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) B28(4-7) B29(4-7) B28(4-7) B29(4-7) B2C(4-7) B2D(4-7) B2C(4-7) B2D(4-7) const __m512i rhs_mat_2367ABEF_20_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) B2A(4-7) B2B(4-7) B2A(4-7) B2B(4-7) B2E(4-7) B2F(4-7) B2E(4-7) B2F(4-7) const __m512i rhs_mat_014589CD_21_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) B28(12-15) B29(12-15) B28(12-15) B29(12-15) B2C(12-15) B2D(12-15) B2C(12-15) B2D(12-15) const __m512i rhs_mat_2367ABEF_21_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) B2A(12-15) B2B(12-15) B2A(12-15) B2B(12-15) B2E(12-15) B2F(12-15) B2E(12-15) B2F(12-15) const __m512i rhs_mat_014589CD_30_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) B38(4-7) B39(4-7) B38(4-7) B39(4-7) B3C(4-7) B3D(4-7) B3C(4-7) B3D(4-7) const __m512i rhs_mat_2367ABEF_30_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) B3A(4-7) B3B(4-7) B3A(4-7) B3B(4-7) B3E(4-7) B3F(4-7) B3E(4-7) B3F(4-7) const __m512i rhs_mat_014589CD_31_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) B38(12-15) B39(12-15) B38(12-15) B39(12-15) B3C(12-15) B3D(12-15) B3C(12-15) B3D(12-15) const __m512i rhs_mat_2367ABEF_31_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) B3A(12-15) B3B(12-15) B3A(12-15) B3B(12-15) B3E(12-15) B3F(12-15) B3E(12-15) B3F(12-15) const __m512i rhs_mat_014589CD_40_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) B48(4-7) B49(4-7) B48(4-7) B49(4-7) B4C(4-7) B4D(4-7) B4C(4-7) B4D(4-7) const __m512i rhs_mat_2367ABEF_40_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) B4A(4-7) B4B(4-7) B4A(4-7) B4B(4-7) B4E(4-7) B4F(4-7) B4E(4-7) B4F(4-7) const __m512i rhs_mat_014589CD_41_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) B48(12-15) B49(12-15) B48(12-15) B49(12-15) B4C(12-15) B4D(12-15) B4C(12-15) B4D(12-15) const __m512i rhs_mat_2367ABEF_41_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) B4A(12-15) B4B(12-15) B4A(12-15) B4B(12-15) B4E(12-15) B4F(12-15) B4E(12-15) B4F(12-15) const __m512i rhs_mat_014589CD_50_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) B58(4-7) B59(4-7) B58(4-7) B59(4-7) B5C(4-7) B5D(4-7) B5C(4-7) B5D(4-7) const __m512i rhs_mat_2367ABEF_50_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) B5A(4-7) B5B(4-7) B5A(4-7) B5B(4-7) B5E(4-7) B5F(4-7) B5E(4-7) B5F(4-7) const __m512i rhs_mat_014589CD_51_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) B58(12-15) B59(12-15) B58(12-15) B59(12-15) B5C(12-15) B5D(12-15) B5C(12-15) B5D(12-15) const __m512i rhs_mat_2367ABEF_51_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) B5A(12-15) B5B(12-15) B5A(12-15) B5B(12-15) B5E(12-15) B5F(12-15) B5E(12-15) B5F(12-15) const __m512i rhs_mat_014589CD_60_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) B68(4-7) B69(4-7) B68(4-7) B69(4-7) B6C(4-7) B6D(4-7) B6C(4-7) B6D(4-7) const __m512i rhs_mat_2367ABEF_60_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) B6A(4-7) B6B(4-7) B6A(4-7) B6B(4-7) B6E(4-7) B6F(4-7) B6E(4-7) B6F(4-7) const __m512i rhs_mat_014589CD_61_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) B68(12-15) B69(12-15) B68(12-15) B69(12-15) B6C(12-15) B6D(12-15) B6C(12-15) B6D(12-15) const __m512i rhs_mat_2367ABEF_61_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) B6A(12-15) B6B(12-15) B6A(12-15) B6B(12-15) B6E(12-15) B6F(12-15) B6E(12-15) B6F(12-15) const __m512i rhs_mat_014589CD_70_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) B78(4-7) B79(4-7) B78(4-7) B79(4-7) B7C(4-7) B7D(4-7) B7C(4-7) B7D(4-7) const __m512i rhs_mat_2367ABEF_70_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) B7A(4-7) B7B(4-7) B7A(4-7) B7B(4-7) B7E(4-7) B7F(4-7) B7E(4-7) B7F(4-7) const __m512i rhs_mat_014589CD_71_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) B78(12-15) B79(12-15) B78(12-15) B79(12-15) B7C(12-15) B7D(12-15) B7C(12-15) B7D(12-15) const __m512i rhs_mat_2367ABEF_71_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) B7A(12-15) B7B(12-15) B7A(12-15) B7B(12-15) B7E(12-15) B7F(12-15) B7E(12-15) B7F(12-15) //notation:superblock subblock //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 const __m128i mins_and_scales_01_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + sb * 64)); const __m128i mins_and_scales_23_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 48 + sb * 64)); const __m128i mins_and_scales_01_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + sb * 64)); const __m128i mins_and_scales_23_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 48 + sb * 64)); // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop const __m256i mins_and_scales_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_01_0), mins_and_scales_01_1, 1); const __m256i mins_and_scales_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_23_0), mins_and_scales_23_1, 1); const __m256i mins_and_scales_45 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_45_0), mins_and_scales_45_1, 1); const __m256i mins_and_scales_67 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_67_0), mins_and_scales_67_1, 1); // Extract scales which is lower half from mins_and_scales const __m256i scales_01 = _mm256_and_si256(mins_and_scales_01, m4b); const __m256i scales_23 = _mm256_and_si256(mins_and_scales_23, m4b); const __m256i scales_45 = _mm256_and_si256(mins_and_scales_45, m4b); const __m256i scales_67 = _mm256_and_si256(mins_and_scales_67, m4b); // Extract mins which is upper half from mins_and_scales const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_01, 4), m4b)); const __m512i mins_23 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_23, 4), m4b)); const __m512i mins_45 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_45, 4), m4b)); const __m512i mins_67 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_67, 4), m4b)); const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01,scalesmask1)); const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01,scalesmask2)); const __m512i scales_2 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23,scalesmask1)); const __m512i scales_3 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23,scalesmask2)); const __m512i scales_4 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45,scalesmask1)); const __m512i scales_5 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45,scalesmask2)); const __m512i scales_6 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67,scalesmask1)); const __m512i scales_7 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67,scalesmask2)); const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)238); for (int rp = 0; rp < 4; rp++) { // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 512 * sb))); __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 512 * sb))); __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 512 * sb))); __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 512 * sb))); __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); __m256i lhs_mat_ymm_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 512 * sb))); __m256i lhs_mat_ymm_01_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 0); __m256i lhs_mat_ymm_23_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 17); __m256i lhs_mat_ymm_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 512 * sb))); __m256i lhs_mat_ymm_01_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 0); __m256i lhs_mat_ymm_23_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 17); __m256i lhs_mat_ymm_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 512 * sb))); __m256i lhs_mat_ymm_01_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 0); __m256i lhs_mat_ymm_23_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 17); __m256i lhs_mat_ymm_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 512 * sb))); __m256i lhs_mat_ymm_01_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 0); __m256i lhs_mat_ymm_23_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 17); __m256i lhs_mat_ymm_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 + 512 * sb))); __m256i lhs_mat_ymm_01_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 0); __m256i lhs_mat_ymm_23_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 17); __m256i lhs_mat_ymm_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 288 + 512 * sb))); __m256i lhs_mat_ymm_01_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 0); __m256i lhs_mat_ymm_23_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 17); __m256i lhs_mat_ymm_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 320 + 512 * sb))); __m256i lhs_mat_ymm_01_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 0); __m256i lhs_mat_ymm_23_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 17); __m256i lhs_mat_ymm_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 352 + 512 * sb))); __m256i lhs_mat_ymm_01_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 0); __m256i lhs_mat_ymm_23_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 17); __m256i lhs_mat_ymm_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 384 + 512 * sb))); __m256i lhs_mat_ymm_01_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 0); __m256i lhs_mat_ymm_23_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 17); __m256i lhs_mat_ymm_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 416 + 512 * sb))); __m256i lhs_mat_ymm_01_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 0); __m256i lhs_mat_ymm_23_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 17); __m256i lhs_mat_ymm_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 448 + 512 * sb))); __m256i lhs_mat_ymm_01_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 0); __m256i lhs_mat_ymm_23_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 17); __m256i lhs_mat_ymm_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 480 + 512 * sb))); __m256i lhs_mat_ymm_01_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 0); __m256i lhs_mat_ymm_23_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 17); __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); __m512i lhs_mat_01_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_20), lhs_mat_ymm_01_20, 1); __m512i lhs_mat_23_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_20), lhs_mat_ymm_23_20, 1); __m512i lhs_mat_01_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_21), lhs_mat_ymm_01_21, 1); __m512i lhs_mat_23_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_21), lhs_mat_ymm_23_21, 1); __m512i lhs_mat_01_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_30), lhs_mat_ymm_01_30, 1); __m512i lhs_mat_23_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_30), lhs_mat_ymm_23_30, 1); __m512i lhs_mat_01_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_31), lhs_mat_ymm_01_31, 1); __m512i lhs_mat_23_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_31), lhs_mat_ymm_23_31, 1); __m512i lhs_mat_01_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_40), lhs_mat_ymm_01_40, 1); __m512i lhs_mat_23_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_40), lhs_mat_ymm_23_40, 1); __m512i lhs_mat_01_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_41), lhs_mat_ymm_01_41, 1); __m512i lhs_mat_23_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_41), lhs_mat_ymm_23_41, 1); __m512i lhs_mat_01_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_50), lhs_mat_ymm_01_50, 1); __m512i lhs_mat_23_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_50), lhs_mat_ymm_23_50, 1); __m512i lhs_mat_01_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_51), lhs_mat_ymm_01_51, 1); __m512i lhs_mat_23_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_51), lhs_mat_ymm_23_51, 1); __m512i lhs_mat_01_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_60), lhs_mat_ymm_01_60, 1); __m512i lhs_mat_23_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_60), lhs_mat_ymm_23_60, 1); __m512i lhs_mat_01_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_61), lhs_mat_ymm_01_61, 1); __m512i lhs_mat_23_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_61), lhs_mat_ymm_23_61, 1); __m512i lhs_mat_01_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_70), lhs_mat_ymm_01_70, 1); __m512i lhs_mat_23_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_70), lhs_mat_ymm_23_70, 1); __m512i lhs_mat_01_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_71), lhs_mat_ymm_01_71, 1); __m512i lhs_mat_23_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_71), lhs_mat_ymm_23_71, 1); // Bsums are loaded for the different Q8_K blocks __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 32 * sb))); __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 8 + 32 * sb)); __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 16 + 32 * sb))); __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 24 + 32 * sb)); __m256i lhs_bsums_ymm_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); __m512i lhs_bsums_01_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_0123), lhs_bsums_ymm_01_0123, 1); __m256i lhs_bsums_ymm_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); __m512i lhs_bsums_23_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_0123), lhs_bsums_ymm_23_0123, 1); __m256i lhs_bsums_ymm_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); __m512i lhs_bsums_01_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_4567), lhs_bsums_ymm_01_4567, 1); __m256i lhs_bsums_ymm_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); __m512i lhs_bsums_23_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_4567), lhs_bsums_ymm_23_4567, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) const __m512i lhs_mat_01_20_sp1 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) const __m512i lhs_mat_23_20_sp1 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)160); //A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) const __m512i lhs_mat_01_21_sp1 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) const __m512i lhs_mat_23_21_sp1 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)160); //A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) const __m512i lhs_mat_01_30_sp1 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) const __m512i lhs_mat_23_30_sp1 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)160); //A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) const __m512i lhs_mat_01_31_sp1 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) const __m512i lhs_mat_23_31_sp1 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)160); //A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) const __m512i lhs_mat_01_40_sp1 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) const __m512i lhs_mat_23_40_sp1 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)160); //A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) const __m512i lhs_mat_01_41_sp1 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) const __m512i lhs_mat_23_41_sp1 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)160); //A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) const __m512i lhs_mat_01_50_sp1 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) const __m512i lhs_mat_23_50_sp1 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)160); //A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) const __m512i lhs_mat_01_51_sp1 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) const __m512i lhs_mat_23_51_sp1 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)160); //A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) const __m512i lhs_mat_01_60_sp1 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) const __m512i lhs_mat_23_60_sp1 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)160); //A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) const __m512i lhs_mat_01_61_sp1 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) const __m512i lhs_mat_23_61_sp1 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)160); //A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) const __m512i lhs_mat_01_70_sp1 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) const __m512i lhs_mat_23_70_sp1 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)160); //A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) const __m512i lhs_mat_01_71_sp1 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) const __m512i lhs_mat_23_71_sp1 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)160); //A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) const __m512i lhs_mat_01_20_sp2 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) const __m512i lhs_mat_23_20_sp2 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)245); //A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) const __m512i lhs_mat_01_21_sp2 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) const __m512i lhs_mat_23_21_sp2 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)245); //A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) const __m512i lhs_mat_01_30_sp2 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) const __m512i lhs_mat_23_30_sp2 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)245); //A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) const __m512i lhs_mat_01_31_sp2 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) const __m512i lhs_mat_23_31_sp2 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)245); //A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) const __m512i lhs_mat_01_40_sp2 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) const __m512i lhs_mat_23_40_sp2 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)245); //A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) const __m512i lhs_mat_01_41_sp2 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) const __m512i lhs_mat_23_41_sp2 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)245); //A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) const __m512i lhs_mat_01_50_sp2 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) const __m512i lhs_mat_23_50_sp2 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)245); //A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) const __m512i lhs_mat_01_51_sp2 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) const __m512i lhs_mat_23_51_sp2 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)245); //A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) const __m512i lhs_mat_01_60_sp2 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) const __m512i lhs_mat_23_60_sp2 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)245); //A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) const __m512i lhs_mat_01_61_sp2 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) const __m512i lhs_mat_23_61_sp2 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)245); //A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) const __m512i lhs_mat_01_70_sp2 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) const __m512i lhs_mat_23_70_sp2 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)245); //A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) const __m512i lhs_mat_01_71_sp2 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) const __m512i lhs_mat_23_71_sp2 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)245); //A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)); __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)); __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)); __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)); __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)); __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)); __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)); __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)); __m512i iacc_mat_00_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_01_21_sp1)); __m512i iacc_mat_01_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_01_21_sp1)); __m512i iacc_mat_10_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_23_21_sp1)); __m512i iacc_mat_11_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_23_21_sp1)); __m512i iacc_mat_00_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_01_31_sp1)); __m512i iacc_mat_01_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_01_31_sp1)); __m512i iacc_mat_10_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_23_31_sp1)); __m512i iacc_mat_11_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_23_31_sp1)); __m512i iacc_mat_00_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_01_41_sp1)); __m512i iacc_mat_01_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_01_41_sp1)); __m512i iacc_mat_10_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_23_41_sp1)); __m512i iacc_mat_11_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_23_41_sp1)); __m512i iacc_mat_00_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_01_51_sp1)); __m512i iacc_mat_01_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_01_51_sp1)); __m512i iacc_mat_10_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_23_51_sp1)); __m512i iacc_mat_11_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_23_51_sp1)); __m512i iacc_mat_00_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_01_61_sp1)); __m512i iacc_mat_01_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_01_61_sp1)); __m512i iacc_mat_10_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_23_61_sp1)); __m512i iacc_mat_11_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_23_61_sp1)); __m512i iacc_mat_00_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_01_71_sp1)); __m512i iacc_mat_01_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_01_71_sp1)); __m512i iacc_mat_10_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_23_71_sp1)); __m512i iacc_mat_11_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_23_71_sp1)); __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)); __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)); __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)); __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)); __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)); __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)); __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)); __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)); __m512i iacc_mat_00_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_01_21_sp2)); __m512i iacc_mat_01_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_01_21_sp2)); __m512i iacc_mat_10_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_23_21_sp2)); __m512i iacc_mat_11_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_23_21_sp2)); __m512i iacc_mat_00_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_01_31_sp2)); __m512i iacc_mat_01_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_01_31_sp2)); __m512i iacc_mat_10_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_23_31_sp2)); __m512i iacc_mat_11_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_23_31_sp2)); __m512i iacc_mat_00_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_01_41_sp2)); __m512i iacc_mat_01_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_01_41_sp2)); __m512i iacc_mat_10_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_23_41_sp2)); __m512i iacc_mat_11_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_23_41_sp2)); __m512i iacc_mat_00_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_01_51_sp2)); __m512i iacc_mat_01_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_01_51_sp2)); __m512i iacc_mat_10_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_23_51_sp2)); __m512i iacc_mat_11_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_23_51_sp2)); __m512i iacc_mat_00_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_01_61_sp2)); __m512i iacc_mat_01_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_01_61_sp2)); __m512i iacc_mat_10_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_23_61_sp2)); __m512i iacc_mat_11_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_23_61_sp2)); __m512i iacc_mat_00_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_01_71_sp2)); __m512i iacc_mat_01_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_01_71_sp2)); __m512i iacc_mat_10_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_23_71_sp2)); __m512i iacc_mat_11_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_23_71_sp2)); // Combine results from both shuffle patterns for each output block __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); __m512i iacc_mat_00_2 = _mm512_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); __m512i iacc_mat_01_2 = _mm512_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); __m512i iacc_mat_10_2 = _mm512_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); __m512i iacc_mat_11_2 = _mm512_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); __m512i iacc_mat_00_3 = _mm512_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); __m512i iacc_mat_01_3 = _mm512_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); __m512i iacc_mat_10_3 = _mm512_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); __m512i iacc_mat_11_3 = _mm512_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); __m512i iacc_mat_00_4 = _mm512_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); __m512i iacc_mat_01_4 = _mm512_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); __m512i iacc_mat_10_4 = _mm512_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); __m512i iacc_mat_11_4 = _mm512_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); __m512i iacc_mat_00_5 = _mm512_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); __m512i iacc_mat_01_5 = _mm512_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); __m512i iacc_mat_10_5 = _mm512_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); __m512i iacc_mat_11_5 = _mm512_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); __m512i iacc_mat_00_6 = _mm512_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); __m512i iacc_mat_01_6 = _mm512_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); __m512i iacc_mat_10_6 = _mm512_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); __m512i iacc_mat_11_6 = _mm512_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); __m512i iacc_mat_00_7 = _mm512_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); __m512i iacc_mat_01_7 = _mm512_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); __m512i iacc_mat_10_7 = _mm512_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); __m512i iacc_mat_11_7 = _mm512_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); iacc_mat_00_2 = _mm512_madd_epi16(iacc_mat_00_2, scale_014589CD_2); iacc_mat_01_2 = _mm512_madd_epi16(iacc_mat_01_2, scale_2367ABEF_2); iacc_mat_10_2 = _mm512_madd_epi16(iacc_mat_10_2, scale_014589CD_2); iacc_mat_11_2 = _mm512_madd_epi16(iacc_mat_11_2, scale_2367ABEF_2); iacc_mat_00_3 = _mm512_madd_epi16(iacc_mat_00_3, scale_014589CD_3); iacc_mat_01_3 = _mm512_madd_epi16(iacc_mat_01_3, scale_2367ABEF_3); iacc_mat_10_3 = _mm512_madd_epi16(iacc_mat_10_3, scale_014589CD_3); iacc_mat_11_3 = _mm512_madd_epi16(iacc_mat_11_3, scale_2367ABEF_3); iacc_mat_00_4 = _mm512_madd_epi16(iacc_mat_00_4, scale_014589CD_4); iacc_mat_01_4 = _mm512_madd_epi16(iacc_mat_01_4, scale_2367ABEF_4); iacc_mat_10_4 = _mm512_madd_epi16(iacc_mat_10_4, scale_014589CD_4); iacc_mat_11_4 = _mm512_madd_epi16(iacc_mat_11_4, scale_2367ABEF_4); iacc_mat_00_5 = _mm512_madd_epi16(iacc_mat_00_5, scale_014589CD_5); iacc_mat_01_5 = _mm512_madd_epi16(iacc_mat_01_5, scale_2367ABEF_5); iacc_mat_10_5 = _mm512_madd_epi16(iacc_mat_10_5, scale_014589CD_5); iacc_mat_11_5 = _mm512_madd_epi16(iacc_mat_11_5, scale_2367ABEF_5); iacc_mat_00_6 = _mm512_madd_epi16(iacc_mat_00_6, scale_014589CD_6); iacc_mat_01_6 = _mm512_madd_epi16(iacc_mat_01_6, scale_2367ABEF_6); iacc_mat_10_6 = _mm512_madd_epi16(iacc_mat_10_6, scale_014589CD_6); iacc_mat_11_6 = _mm512_madd_epi16(iacc_mat_11_6, scale_2367ABEF_6); iacc_mat_00_7 = _mm512_madd_epi16(iacc_mat_00_7, scale_014589CD_7); iacc_mat_01_7 = _mm512_madd_epi16(iacc_mat_01_7, scale_2367ABEF_7); iacc_mat_10_7 = _mm512_madd_epi16(iacc_mat_10_7, scale_014589CD_7); iacc_mat_11_7 = _mm512_madd_epi16(iacc_mat_11_7, scale_2367ABEF_7); __m512i iacc_mat_00 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm512_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm512_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); __m512i iacc_mat_01 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm512_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm512_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); __m512i iacc_mat_10 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm512_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm512_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); __m512i iacc_mat_11 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm512_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm512_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); // Straighten out to make 4 row vectors __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K __m512i iacc_row_min_0_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_1_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_2_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_3_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_0_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)85), mins_23); __m512i iacc_row_min_1_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)255), mins_23); __m512i iacc_row_min_2_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)85), mins_23); __m512i iacc_row_min_3_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)255), mins_23); __m512i iacc_row_min_0_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)0), mins_45); __m512i iacc_row_min_1_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)170), mins_45); __m512i iacc_row_min_2_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)0), mins_45); __m512i iacc_row_min_3_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)170), mins_45); __m512i iacc_row_min_0_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)85), mins_67); __m512i iacc_row_min_1_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)255), mins_67); __m512i iacc_row_min_2_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)85), mins_67); __m512i iacc_row_min_3_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)255), mins_67); __m512i iacc_row_min_0 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm512_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); __m512i iacc_row_min_1 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm512_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); __m512i iacc_row_min_2 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm512_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); __m512i iacc_row_min_3 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm512_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); acc_min_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); acc_min_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); acc_min_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); acc_min_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); } } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); } } } for (; y < nr / 4; y ++) { const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = 0; x < anc / 8; x += 2) { const block_q2_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); const block_q2_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); // Master FP accumulators __m512 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm512_setzero_ps(); } __m512 acc_min_rows[4]; for (int i = 0; i < 4; i++) { acc_min_rows[i] = _mm512_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Delta values - Load the sixteen scale values from two block_q2_kx8 structures const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); // dmin values - Load the sixteen dmin values from two block_q2_kx8 structures const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 128; sb++) { // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); //2-bit -> 8-bit const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0,m3bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0,m3bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1,m3bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1,m3bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(rhs_raw_mat_014589CD_2,m3bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2,m3bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(rhs_raw_mat_014589CD_3,m3bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3,m3bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) const __m512i rhs_mat_014589CD_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 2), m3bexpanded); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) B28(0-7) B29(0-7) B2C(0-7) B2D(0-7) const __m512i rhs_mat_2367ABEF_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 2), m3bexpanded); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) B2A(0-7) B2B(0-7) B2E(0-7) B2F(0-7) const __m512i rhs_mat_014589CD_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 2), m3bexpanded); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) B28(8-15) B29(8-15) B2C(8-15) B2D(8-15) const __m512i rhs_mat_2367ABEF_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 2), m3bexpanded); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) B2A(8-15) B2B(8-15) B2E(8-15) B2F(8-15) const __m512i rhs_mat_014589CD_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 2), m3bexpanded); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) B38(0-7) B39(0-7) B3C(0-7) B3D(0-7) const __m512i rhs_mat_2367ABEF_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 2), m3bexpanded); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) B3A(0-7) B3B(0-7) B3E(0-7) B3F(0-7) const __m512i rhs_mat_014589CD_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 2), m3bexpanded); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) B38(8-15) B39(8-15) B3C(8-15) B3D(8-15) const __m512i rhs_mat_2367ABEF_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 2), m3bexpanded); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) B3A(8-15) B3B(8-15) B3E(8-15) B3F(8-15) const __m512i rhs_mat_014589CD_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m3bexpanded); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) B48(0-7) B49(0-7) B4C(0-7) B4D(0-7) const __m512i rhs_mat_2367ABEF_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m3bexpanded); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) B4A(0-7) B4B(0-7) B4E(0-7) B4F(0-7) const __m512i rhs_mat_014589CD_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m3bexpanded); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) B48(8-15) B49(8-15) B4C(8-15) B4D(8-15) const __m512i rhs_mat_2367ABEF_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m3bexpanded); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) B4A(8-15) B4B(8-15) B4E(8-15) B4F(8-15) const __m512i rhs_mat_014589CD_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m3bexpanded); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) B58(0-7) B59(0-7) B5C(0-7) B5D(0-7) const __m512i rhs_mat_2367ABEF_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m3bexpanded); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) B5A(0-7) B5B(0-7) B5E(0-7) B5F(0-7) const __m512i rhs_mat_014589CD_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m3bexpanded); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) B58(8-15) B59(8-15) B5C(8-15) B5D(8-15) const __m512i rhs_mat_2367ABEF_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m3bexpanded); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) B5A(8-15) B5B(8-15) B5E(8-15) B5F(8-15) const __m512i rhs_mat_014589CD_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 6), m3bexpanded); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) B68(0-7) B69(0-7) B6C(0-7) B6D(0-7) const __m512i rhs_mat_2367ABEF_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 6), m3bexpanded); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) B6A(0-7) B6B(0-7) B6E(0-7) B6F(0-7) const __m512i rhs_mat_014589CD_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 6), m3bexpanded); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) B68(8-15) B69(8-15) B6C(8-15) B6D(8-15) const __m512i rhs_mat_2367ABEF_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 6), m3bexpanded); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) B6A(8-15) B6B(8-15) B6E(8-15) B6F(8-15) const __m512i rhs_mat_014589CD_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 6), m3bexpanded); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) B78(0-7) B79(0-7) B7C(0-7) B7D(0-7) const __m512i rhs_mat_2367ABEF_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 6), m3bexpanded); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) B7A(0-7) B7B(0-7) B7E(0-7) B7F(0-7) const __m512i rhs_mat_014589CD_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 6), m3bexpanded); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) B78(8-15) B79(8-15) B7C(8-15) B7D(8-15) const __m512i rhs_mat_2367ABEF_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 6), m3bexpanded); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) B7A(8-15) B7B(8-15) B7E(8-15) B7F(8-15) const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) const __m512i rhs_mat_014589CD_20_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) B28(0-3) B29(0-3) B28(0-3) B29(0-3) B2C(0-3) B2D(0-3) B2C(0-3) B2D(0-3) const __m512i rhs_mat_2367ABEF_20_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) B2A(0-3) B2B(0-3) B2A(0-3) B2B(0-3) B2E(0-3) B2F(0-3) B2E(0-3) B2F(0-3) const __m512i rhs_mat_014589CD_21_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) B28(8-11) B29(8-11) B28(8-11) B29(8-11) B2C(8-11) B2D(8-11) B2C(8-11) B2D(8-11) const __m512i rhs_mat_2367ABEF_21_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) B2A(8-11) B2B(8-11) B2A(8-11) B2B(8-11) B2E(8-11) B2F(8-11) B2E(8-11) B2F(8-11) const __m512i rhs_mat_014589CD_30_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)136); ///B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) B38(0-3) B39(0-3) B38(0-3) B39(0-3) B3C(0-3) B3D(0-3) B3C(0-3) B3D(0-3) const __m512i rhs_mat_2367ABEF_30_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) B3A(0-3) B3B(0-3) B3A(0-3) B3B(0-3) B3E(0-3) B3F(0-3) B3E(0-3) B3F(0-3) const __m512i rhs_mat_014589CD_31_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11) B38(8-11) B39(8-11) B38(8-11) B39(8-11) B3C(8-11) B3D(8-11) B3C(8-11) B3D(8-11) const __m512i rhs_mat_2367ABEF_31_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) B3A(8-11) B3B(8-11) B3A(8-11) B3B(8-11) B3E(8-11) B3F(8-11) B3E(8-11) B3F(8-11) const __m512i rhs_mat_014589CD_40_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) B48(0-3) B49(0-3) B48(0-3) B49(0-3) B4C(0-3) B4D(0-3) B4C(0-3) B4D(0-3) const __m512i rhs_mat_2367ABEF_40_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) B4A(0-3) B4B(0-3) B4A(0-3) B4B(0-3) B4E(0-3) B4F(0-3) B4E(0-3) B4F(0-3) const __m512i rhs_mat_014589CD_41_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) B48(8-11) B49(8-11) B48(8-11) B49(8-11) B4C(8-11) B4D(8-11) B4C(8-11) B4D(8-11) const __m512i rhs_mat_2367ABEF_41_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) B4A(8-11) B4B(8-11) B4A(8-11) B4B(8-11) B4E(8-11) B4F(8-11) B4E(8-11) B4F(8-11) const __m512i rhs_mat_014589CD_50_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) B58(0-3) B59(0-3) B58(0-3) B59(0-3) B5C(0-3) B5D(0-3) B5C(0-3) B5D(0-3) const __m512i rhs_mat_2367ABEF_50_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) B5A(0-3) B5B(0-3) B5A(0-3) B5B(0-3) B5E(0-3) B5F(0-3) B5E(0-3) B5F(0-3) const __m512i rhs_mat_014589CD_51_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) B58(8-11) B59(8-11) B58(8-11) B59(8-11) B5C(8-11) B5D(8-11) B5C(8-11) B5D(8-11) const __m512i rhs_mat_2367ABEF_51_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) B5A(8-11) B5B(8-11) B5A(8-11) B5B(8-11) B5E(8-11) B5F(8-11) B5E(8-11) B5F(8-11) const __m512i rhs_mat_014589CD_60_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) B68(0-3) B69(0-3) B68(0-3) B69(0-3) B6C(0-3) B6D(0-3) B6C(0-3) B6D(0-3) const __m512i rhs_mat_2367ABEF_60_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) B6A(0-3) B6B(0-3) B6A(0-3) B6B(0-3) B6E(0-3) B6F(0-3) B6E(0-3) B6F(0-3) const __m512i rhs_mat_014589CD_61_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) B68(8-11) B69(8-11) B68(8-11) B69(8-11) B6C(8-11) B6D(8-11) B6C(8-11) B6D(8-11) const __m512i rhs_mat_2367ABEF_61_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) B6A(8-11) B6B(8-11) B6A(8-11) B6B(8-11) B6E(8-11) B6F(8-11) B6E(8-11) B6F(8-11) const __m512i rhs_mat_014589CD_70_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) B78(0-3) B79(0-3) B78(0-3) B79(0-3) B7C(0-3) B7D(0-3) B7C(0-3) B7D(0-3) const __m512i rhs_mat_2367ABEF_70_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) B7A(0-3) B7B(0-3) B7A(0-3) B7B(0-3) B7E(0-3) B7F(0-3) B7E(0-3) B7F(0-3) const __m512i rhs_mat_014589CD_71_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) const __m512i rhs_mat_2367ABEF_71_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) B7A(8-11) B7B(8-11) B7A(8-11) B7B(8-11) B7E(8-11) B7F(8-11) B7E(8-11) B7F(8-11) const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) const __m512i rhs_mat_014589CD_20_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) B28(4-7) B29(4-7) B28(4-7) B29(4-7) B2C(4-7) B2D(4-7) B2C(4-7) B2D(4-7) const __m512i rhs_mat_2367ABEF_20_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) B2A(4-7) B2B(4-7) B2A(4-7) B2B(4-7) B2E(4-7) B2F(4-7) B2E(4-7) B2F(4-7) const __m512i rhs_mat_014589CD_21_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) B28(12-15) B29(12-15) B28(12-15) B29(12-15) B2C(12-15) B2D(12-15) B2C(12-15) B2D(12-15) const __m512i rhs_mat_2367ABEF_21_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) B2A(12-15) B2B(12-15) B2A(12-15) B2B(12-15) B2E(12-15) B2F(12-15) B2E(12-15) B2F(12-15) const __m512i rhs_mat_014589CD_30_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) B38(4-7) B39(4-7) B38(4-7) B39(4-7) B3C(4-7) B3D(4-7) B3C(4-7) B3D(4-7) const __m512i rhs_mat_2367ABEF_30_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) B3A(4-7) B3B(4-7) B3A(4-7) B3B(4-7) B3E(4-7) B3F(4-7) B3E(4-7) B3F(4-7) const __m512i rhs_mat_014589CD_31_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) B38(12-15) B39(12-15) B38(12-15) B39(12-15) B3C(12-15) B3D(12-15) B3C(12-15) B3D(12-15) const __m512i rhs_mat_2367ABEF_31_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) B3A(12-15) B3B(12-15) B3A(12-15) B3B(12-15) B3E(12-15) B3F(12-15) B3E(12-15) B3F(12-15) const __m512i rhs_mat_014589CD_40_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) B48(4-7) B49(4-7) B48(4-7) B49(4-7) B4C(4-7) B4D(4-7) B4C(4-7) B4D(4-7) const __m512i rhs_mat_2367ABEF_40_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) B4A(4-7) B4B(4-7) B4A(4-7) B4B(4-7) B4E(4-7) B4F(4-7) B4E(4-7) B4F(4-7) const __m512i rhs_mat_014589CD_41_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) B48(12-15) B49(12-15) B48(12-15) B49(12-15) B4C(12-15) B4D(12-15) B4C(12-15) B4D(12-15) const __m512i rhs_mat_2367ABEF_41_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) B4A(12-15) B4B(12-15) B4A(12-15) B4B(12-15) B4E(12-15) B4F(12-15) B4E(12-15) B4F(12-15) const __m512i rhs_mat_014589CD_50_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) B58(4-7) B59(4-7) B58(4-7) B59(4-7) B5C(4-7) B5D(4-7) B5C(4-7) B5D(4-7) const __m512i rhs_mat_2367ABEF_50_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) B5A(4-7) B5B(4-7) B5A(4-7) B5B(4-7) B5E(4-7) B5F(4-7) B5E(4-7) B5F(4-7) const __m512i rhs_mat_014589CD_51_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) B58(12-15) B59(12-15) B58(12-15) B59(12-15) B5C(12-15) B5D(12-15) B5C(12-15) B5D(12-15) const __m512i rhs_mat_2367ABEF_51_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) B5A(12-15) B5B(12-15) B5A(12-15) B5B(12-15) B5E(12-15) B5F(12-15) B5E(12-15) B5F(12-15) const __m512i rhs_mat_014589CD_60_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) B68(4-7) B69(4-7) B68(4-7) B69(4-7) B6C(4-7) B6D(4-7) B6C(4-7) B6D(4-7) const __m512i rhs_mat_2367ABEF_60_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) B6A(4-7) B6B(4-7) B6A(4-7) B6B(4-7) B6E(4-7) B6F(4-7) B6E(4-7) B6F(4-7) const __m512i rhs_mat_014589CD_61_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) B68(12-15) B69(12-15) B68(12-15) B69(12-15) B6C(12-15) B6D(12-15) B6C(12-15) B6D(12-15) const __m512i rhs_mat_2367ABEF_61_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) B6A(12-15) B6B(12-15) B6A(12-15) B6B(12-15) B6E(12-15) B6F(12-15) B6E(12-15) B6F(12-15) const __m512i rhs_mat_014589CD_70_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) B78(4-7) B79(4-7) B78(4-7) B79(4-7) B7C(4-7) B7D(4-7) B7C(4-7) B7D(4-7) const __m512i rhs_mat_2367ABEF_70_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) B7A(4-7) B7B(4-7) B7A(4-7) B7B(4-7) B7E(4-7) B7F(4-7) B7E(4-7) B7F(4-7) const __m512i rhs_mat_014589CD_71_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) B78(12-15) B79(12-15) B78(12-15) B79(12-15) B7C(12-15) B7D(12-15) B7C(12-15) B7D(12-15) const __m512i rhs_mat_2367ABEF_71_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) B7A(12-15) B7B(12-15) B7A(12-15) B7B(12-15) B7E(12-15) B7F(12-15) B7E(12-15) B7F(12-15) //notation:superblock subblock //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 const __m128i mins_and_scales_01_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + sb * 64)); const __m128i mins_and_scales_23_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 48 + sb * 64)); const __m128i mins_and_scales_01_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + sb * 64)); const __m128i mins_and_scales_23_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 48 + sb * 64)); // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop const __m256i mins_and_scales_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_01_0), mins_and_scales_01_1, 1); const __m256i mins_and_scales_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_23_0), mins_and_scales_23_1, 1); const __m256i mins_and_scales_45 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_45_0), mins_and_scales_45_1, 1); const __m256i mins_and_scales_67 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_67_0), mins_and_scales_67_1, 1); // Extract scales which is lower half from mins_and_scales const __m256i scales_01 = _mm256_and_si256(mins_and_scales_01, m4b); const __m256i scales_23 = _mm256_and_si256(mins_and_scales_23, m4b); const __m256i scales_45 = _mm256_and_si256(mins_and_scales_45, m4b); const __m256i scales_67 = _mm256_and_si256(mins_and_scales_67, m4b); // Extract mins which is upper half from mins_and_scales const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_01, 4), m4b)); const __m512i mins_23 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_23, 4), m4b)); const __m512i mins_45 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_45, 4), m4b)); const __m512i mins_67 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_67, 4), m4b)); const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01, scalesmask1)); const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01, scalesmask2)); const __m512i scales_2 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23, scalesmask1)); const __m512i scales_3 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23, scalesmask2)); const __m512i scales_4 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45, scalesmask1)); const __m512i scales_5 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45, scalesmask2)); const __m512i scales_6 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67, scalesmask1)); const __m512i scales_7 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67, scalesmask2)); const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)238); const __m512i scale_014589CD_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)68); const __m512i scale_2367ABEF_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)238); // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 512 * sb))); __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 512 * sb))); __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 512 * sb))); __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 512 * sb))); __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); __m256i lhs_mat_ymm_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 512 * sb))); __m256i lhs_mat_ymm_01_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 0); __m256i lhs_mat_ymm_23_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 17); __m256i lhs_mat_ymm_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 512 * sb))); __m256i lhs_mat_ymm_01_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 0); __m256i lhs_mat_ymm_23_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 17); __m256i lhs_mat_ymm_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 512 * sb))); __m256i lhs_mat_ymm_01_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 0); __m256i lhs_mat_ymm_23_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 17); __m256i lhs_mat_ymm_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 512 * sb))); __m256i lhs_mat_ymm_01_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 0); __m256i lhs_mat_ymm_23_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 17); __m256i lhs_mat_ymm_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 + 512 * sb))); __m256i lhs_mat_ymm_01_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 0); __m256i lhs_mat_ymm_23_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 17); __m256i lhs_mat_ymm_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 288 + 512 * sb))); __m256i lhs_mat_ymm_01_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 0); __m256i lhs_mat_ymm_23_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 17); __m256i lhs_mat_ymm_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 320 + 512 * sb))); __m256i lhs_mat_ymm_01_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 0); __m256i lhs_mat_ymm_23_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 17); __m256i lhs_mat_ymm_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 352 + 512 * sb))); __m256i lhs_mat_ymm_01_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 0); __m256i lhs_mat_ymm_23_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 17); __m256i lhs_mat_ymm_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 384 + 512 * sb))); __m256i lhs_mat_ymm_01_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 0); __m256i lhs_mat_ymm_23_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 17); __m256i lhs_mat_ymm_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 416 + 512 * sb))); __m256i lhs_mat_ymm_01_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 0); __m256i lhs_mat_ymm_23_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 17); __m256i lhs_mat_ymm_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 448 + 512 * sb))); __m256i lhs_mat_ymm_01_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 0); __m256i lhs_mat_ymm_23_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 17); __m256i lhs_mat_ymm_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 480 + 512 * sb))); __m256i lhs_mat_ymm_01_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 0); __m256i lhs_mat_ymm_23_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 17); __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); __m512i lhs_mat_01_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_20), lhs_mat_ymm_01_20, 1); __m512i lhs_mat_23_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_20), lhs_mat_ymm_23_20, 1); __m512i lhs_mat_01_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_21), lhs_mat_ymm_01_21, 1); __m512i lhs_mat_23_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_21), lhs_mat_ymm_23_21, 1); __m512i lhs_mat_01_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_30), lhs_mat_ymm_01_30, 1); __m512i lhs_mat_23_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_30), lhs_mat_ymm_23_30, 1); __m512i lhs_mat_01_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_31), lhs_mat_ymm_01_31, 1); __m512i lhs_mat_23_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_31), lhs_mat_ymm_23_31, 1); __m512i lhs_mat_01_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_40), lhs_mat_ymm_01_40, 1); __m512i lhs_mat_23_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_40), lhs_mat_ymm_23_40, 1); __m512i lhs_mat_01_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_41), lhs_mat_ymm_01_41, 1); __m512i lhs_mat_23_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_41), lhs_mat_ymm_23_41, 1); __m512i lhs_mat_01_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_50), lhs_mat_ymm_01_50, 1); __m512i lhs_mat_23_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_50), lhs_mat_ymm_23_50, 1); __m512i lhs_mat_01_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_51), lhs_mat_ymm_01_51, 1); __m512i lhs_mat_23_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_51), lhs_mat_ymm_23_51, 1); __m512i lhs_mat_01_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_60), lhs_mat_ymm_01_60, 1); __m512i lhs_mat_23_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_60), lhs_mat_ymm_23_60, 1); __m512i lhs_mat_01_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_61), lhs_mat_ymm_01_61, 1); __m512i lhs_mat_23_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_61), lhs_mat_ymm_23_61, 1); __m512i lhs_mat_01_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_70), lhs_mat_ymm_01_70, 1); __m512i lhs_mat_23_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_70), lhs_mat_ymm_23_70, 1); __m512i lhs_mat_01_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_71), lhs_mat_ymm_01_71, 1); __m512i lhs_mat_23_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_71), lhs_mat_ymm_23_71, 1); // Bsums are loaded for the different Q8_K blocks __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 32 * sb))); __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 8 + 32 * sb)); __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 16 + 32 * sb))); __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 24 + 32 * sb)); __m256i lhs_bsums_ymm_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); __m512i lhs_bsums_01_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_0123), lhs_bsums_ymm_01_0123, 1); __m256i lhs_bsums_ymm_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); __m512i lhs_bsums_23_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_0123), lhs_bsums_ymm_23_0123, 1); __m256i lhs_bsums_ymm_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); __m512i lhs_bsums_01_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_4567), lhs_bsums_ymm_01_4567, 1); __m256i lhs_bsums_ymm_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); __m512i lhs_bsums_23_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_4567), lhs_bsums_ymm_23_4567, 1); // Shuffle pattern one - left side input const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) const __m512i lhs_mat_01_20_sp1 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) const __m512i lhs_mat_23_20_sp1 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)160); //A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) const __m512i lhs_mat_01_21_sp1 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) const __m512i lhs_mat_23_21_sp1 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)160); //A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) const __m512i lhs_mat_01_30_sp1 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) const __m512i lhs_mat_23_30_sp1 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)160); //A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) const __m512i lhs_mat_01_31_sp1 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) const __m512i lhs_mat_23_31_sp1 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)160); //A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) const __m512i lhs_mat_01_40_sp1 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) const __m512i lhs_mat_23_40_sp1 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)160); //A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) const __m512i lhs_mat_01_41_sp1 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) const __m512i lhs_mat_23_41_sp1 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)160); //A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) const __m512i lhs_mat_01_50_sp1 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) const __m512i lhs_mat_23_50_sp1 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)160); //A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) const __m512i lhs_mat_01_51_sp1 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) const __m512i lhs_mat_23_51_sp1 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)160); //A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) const __m512i lhs_mat_01_60_sp1 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) const __m512i lhs_mat_23_60_sp1 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)160); //A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) const __m512i lhs_mat_01_61_sp1 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) const __m512i lhs_mat_23_61_sp1 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)160); //A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) const __m512i lhs_mat_01_70_sp1 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) const __m512i lhs_mat_23_70_sp1 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)160); //A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) const __m512i lhs_mat_01_71_sp1 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) const __m512i lhs_mat_23_71_sp1 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)160); //A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) const __m512i lhs_mat_01_20_sp2 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) const __m512i lhs_mat_23_20_sp2 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)245); //A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) const __m512i lhs_mat_01_21_sp2 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) const __m512i lhs_mat_23_21_sp2 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)245); //A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) const __m512i lhs_mat_01_30_sp2 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) const __m512i lhs_mat_23_30_sp2 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)245); //A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) const __m512i lhs_mat_01_31_sp2 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) const __m512i lhs_mat_23_31_sp2 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)245); //A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) const __m512i lhs_mat_01_40_sp2 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) const __m512i lhs_mat_23_40_sp2 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)245); //A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) const __m512i lhs_mat_01_41_sp2 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) const __m512i lhs_mat_23_41_sp2 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)245); //A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) const __m512i lhs_mat_01_50_sp2 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) const __m512i lhs_mat_23_50_sp2 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)245); //A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) const __m512i lhs_mat_01_51_sp2 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) const __m512i lhs_mat_23_51_sp2 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)245); //A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) const __m512i lhs_mat_01_60_sp2 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) const __m512i lhs_mat_23_60_sp2 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)245); //A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) const __m512i lhs_mat_01_61_sp2 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) const __m512i lhs_mat_23_61_sp2 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)245); //A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) const __m512i lhs_mat_01_70_sp2 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) const __m512i lhs_mat_23_70_sp2 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)245); //A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) const __m512i lhs_mat_01_71_sp2 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) const __m512i lhs_mat_23_71_sp2 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)245); //A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)); __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)); __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)); __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)); __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)); __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)); __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)); __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)); __m512i iacc_mat_00_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_01_21_sp1)); __m512i iacc_mat_01_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_01_21_sp1)); __m512i iacc_mat_10_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_23_21_sp1)); __m512i iacc_mat_11_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_23_21_sp1)); __m512i iacc_mat_00_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_01_31_sp1)); __m512i iacc_mat_01_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_01_31_sp1)); __m512i iacc_mat_10_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_23_31_sp1)); __m512i iacc_mat_11_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_23_31_sp1)); __m512i iacc_mat_00_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_01_41_sp1)); __m512i iacc_mat_01_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_01_41_sp1)); __m512i iacc_mat_10_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_23_41_sp1)); __m512i iacc_mat_11_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_23_41_sp1)); __m512i iacc_mat_00_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_01_51_sp1)); __m512i iacc_mat_01_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_01_51_sp1)); __m512i iacc_mat_10_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_23_51_sp1)); __m512i iacc_mat_11_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_23_51_sp1)); __m512i iacc_mat_00_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_01_61_sp1)); __m512i iacc_mat_01_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_01_61_sp1)); __m512i iacc_mat_10_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_23_61_sp1)); __m512i iacc_mat_11_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_23_61_sp1)); __m512i iacc_mat_00_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_01_71_sp1)); __m512i iacc_mat_01_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_01_71_sp1)); __m512i iacc_mat_10_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_23_71_sp1)); __m512i iacc_mat_11_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_23_71_sp1)); __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)); __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)); __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)); __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)); __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)); __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)); __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)); __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)); __m512i iacc_mat_00_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_01_21_sp2)); __m512i iacc_mat_01_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_01_21_sp2)); __m512i iacc_mat_10_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_23_21_sp2)); __m512i iacc_mat_11_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_23_21_sp2)); __m512i iacc_mat_00_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_01_31_sp2)); __m512i iacc_mat_01_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_01_31_sp2)); __m512i iacc_mat_10_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_23_31_sp2)); __m512i iacc_mat_11_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_23_31_sp2)); __m512i iacc_mat_00_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_01_41_sp2)); __m512i iacc_mat_01_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_01_41_sp2)); __m512i iacc_mat_10_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_23_41_sp2)); __m512i iacc_mat_11_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_23_41_sp2)); __m512i iacc_mat_00_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_01_51_sp2)); __m512i iacc_mat_01_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_01_51_sp2)); __m512i iacc_mat_10_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_23_51_sp2)); __m512i iacc_mat_11_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_23_51_sp2)); __m512i iacc_mat_00_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_01_61_sp2)); __m512i iacc_mat_01_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_01_61_sp2)); __m512i iacc_mat_10_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_23_61_sp2)); __m512i iacc_mat_11_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_23_61_sp2)); __m512i iacc_mat_00_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_01_71_sp2)); __m512i iacc_mat_01_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_01_71_sp2)); __m512i iacc_mat_10_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_23_71_sp2)); __m512i iacc_mat_11_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_23_71_sp2)); // Combine results from both shuffle patterns for each output block __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); __m512i iacc_mat_00_2 = _mm512_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); __m512i iacc_mat_01_2 = _mm512_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); __m512i iacc_mat_10_2 = _mm512_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); __m512i iacc_mat_11_2 = _mm512_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); __m512i iacc_mat_00_3 = _mm512_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); __m512i iacc_mat_01_3 = _mm512_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); __m512i iacc_mat_10_3 = _mm512_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); __m512i iacc_mat_11_3 = _mm512_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); __m512i iacc_mat_00_4 = _mm512_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); __m512i iacc_mat_01_4 = _mm512_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); __m512i iacc_mat_10_4 = _mm512_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); __m512i iacc_mat_11_4 = _mm512_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); __m512i iacc_mat_00_5 = _mm512_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); __m512i iacc_mat_01_5 = _mm512_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); __m512i iacc_mat_10_5 = _mm512_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); __m512i iacc_mat_11_5 = _mm512_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); __m512i iacc_mat_00_6 = _mm512_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); __m512i iacc_mat_01_6 = _mm512_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); __m512i iacc_mat_10_6 = _mm512_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); __m512i iacc_mat_11_6 = _mm512_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); __m512i iacc_mat_00_7 = _mm512_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); __m512i iacc_mat_01_7 = _mm512_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); __m512i iacc_mat_10_7 = _mm512_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); __m512i iacc_mat_11_7 = _mm512_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); iacc_mat_00_2 = _mm512_madd_epi16(iacc_mat_00_2, scale_014589CD_2); iacc_mat_01_2 = _mm512_madd_epi16(iacc_mat_01_2, scale_2367ABEF_2); iacc_mat_10_2 = _mm512_madd_epi16(iacc_mat_10_2, scale_014589CD_2); iacc_mat_11_2 = _mm512_madd_epi16(iacc_mat_11_2, scale_2367ABEF_2); iacc_mat_00_3 = _mm512_madd_epi16(iacc_mat_00_3, scale_014589CD_3); iacc_mat_01_3 = _mm512_madd_epi16(iacc_mat_01_3, scale_2367ABEF_3); iacc_mat_10_3 = _mm512_madd_epi16(iacc_mat_10_3, scale_014589CD_3); iacc_mat_11_3 = _mm512_madd_epi16(iacc_mat_11_3, scale_2367ABEF_3); iacc_mat_00_4 = _mm512_madd_epi16(iacc_mat_00_4, scale_014589CD_4); iacc_mat_01_4 = _mm512_madd_epi16(iacc_mat_01_4, scale_2367ABEF_4); iacc_mat_10_4 = _mm512_madd_epi16(iacc_mat_10_4, scale_014589CD_4); iacc_mat_11_4 = _mm512_madd_epi16(iacc_mat_11_4, scale_2367ABEF_4); iacc_mat_00_5 = _mm512_madd_epi16(iacc_mat_00_5, scale_014589CD_5); iacc_mat_01_5 = _mm512_madd_epi16(iacc_mat_01_5, scale_2367ABEF_5); iacc_mat_10_5 = _mm512_madd_epi16(iacc_mat_10_5, scale_014589CD_5); iacc_mat_11_5 = _mm512_madd_epi16(iacc_mat_11_5, scale_2367ABEF_5); iacc_mat_00_6 = _mm512_madd_epi16(iacc_mat_00_6, scale_014589CD_6); iacc_mat_01_6 = _mm512_madd_epi16(iacc_mat_01_6, scale_2367ABEF_6); iacc_mat_10_6 = _mm512_madd_epi16(iacc_mat_10_6, scale_014589CD_6); iacc_mat_11_6 = _mm512_madd_epi16(iacc_mat_11_6, scale_2367ABEF_6); iacc_mat_00_7 = _mm512_madd_epi16(iacc_mat_00_7, scale_014589CD_7); iacc_mat_01_7 = _mm512_madd_epi16(iacc_mat_01_7, scale_2367ABEF_7); iacc_mat_10_7 = _mm512_madd_epi16(iacc_mat_10_7, scale_014589CD_7); iacc_mat_11_7 = _mm512_madd_epi16(iacc_mat_11_7, scale_2367ABEF_7); __m512i iacc_mat_00 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm512_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm512_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); __m512i iacc_mat_01 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm512_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm512_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); __m512i iacc_mat_10 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm512_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm512_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); __m512i iacc_mat_11 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm512_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm512_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); // Straighten out to make 4 row vectors __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K __m512i iacc_row_min_0_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_1_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_2_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)0), mins_01); __m512i iacc_row_min_3_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)170), mins_01); __m512i iacc_row_min_0_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)85), mins_23); __m512i iacc_row_min_1_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)255), mins_23); __m512i iacc_row_min_2_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)85), mins_23); __m512i iacc_row_min_3_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)255), mins_23); __m512i iacc_row_min_0_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)0), mins_45); __m512i iacc_row_min_1_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)170), mins_45); __m512i iacc_row_min_2_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)0), mins_45); __m512i iacc_row_min_3_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)170), mins_45); __m512i iacc_row_min_0_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)85), mins_67); __m512i iacc_row_min_1_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)255), mins_67); __m512i iacc_row_min_2_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)85), mins_67); __m512i iacc_row_min_3_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)255), mins_67); __m512i iacc_row_min_0 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm512_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); __m512i iacc_row_min_1 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm512_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); __m512i iacc_row_min_2 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm512_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); __m512i iacc_row_min_3 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm512_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); acc_min_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); acc_min_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); acc_min_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); acc_min_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); } } // Store accumlated values for (int i = 0; i < 4; i++) { _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); } } } if (anc != nc) { xstart = anc/8; y = 0; } #endif // __AVX512BW__ && __AVX512DQ__ // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation for (; y < anr / 4; y += 4) { const block_q8_Kx4 * a_ptrs[4]; a_ptrs[0] = a_ptr_start + (y * nb); for (int i = 0; i < 3; ++i) { a_ptrs[i + 1] = a_ptrs[i] + nb; } // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = xstart; x < nc / 8; x++) { const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[16]; for (int i = 0; i < 16; i++) { acc_rows[i] = _mm256_setzero_ps(); } __m256 acc_min_rows[16]; for (int i = 0; i < 16; i++) { acc_min_rows[i] = _mm256_setzero_ps(); } // For super block for (int64_t b = 0; b < nb; b++) { // Delta values - Load the eight scale values of block_q2_kx8 const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); // dmin values - Load the eight dmin values of block_q2_kx8 const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 128; sb++) { // Load the eight block_q2_K for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 224 + sb * 256)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values //superblock sub block which part of sub block const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); // 2-bit -> 8-bit // First sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m3b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m3b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m3b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m3b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) // Second sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_10 = _mm256_and_si256(rhs_raw_mat_0145_2, m3b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) const __m256i rhs_mat_2367_10 = _mm256_and_si256(rhs_raw_mat_2367_2, m3b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) const __m256i rhs_mat_0145_11 = _mm256_and_si256(rhs_raw_mat_0145_3, m3b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) const __m256i rhs_mat_2367_11 = _mm256_and_si256(rhs_raw_mat_2367_3, m3b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) // Third sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 2), m3b); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) const __m256i rhs_mat_2367_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 2), m3b); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) const __m256i rhs_mat_0145_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 2), m3b); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) const __m256i rhs_mat_2367_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 2), m3b); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) // Fourth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 2), m3b); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) const __m256i rhs_mat_2367_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 2), m3b); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) const __m256i rhs_mat_0145_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 2), m3b); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) const __m256i rhs_mat_2367_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 2), m3b); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) // Fifth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m3b); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) const __m256i rhs_mat_2367_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m3b); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) const __m256i rhs_mat_0145_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m3b); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) const __m256i rhs_mat_2367_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m3b); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) // Sixth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m3b); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) const __m256i rhs_mat_2367_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m3b); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) const __m256i rhs_mat_0145_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m3b); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) const __m256i rhs_mat_2367_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m3b); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) // Seventh sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 6), m3b); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) const __m256i rhs_mat_2367_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 6), m3b); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) const __m256i rhs_mat_0145_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 6), m3b); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) const __m256i rhs_mat_2367_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 6), m3b); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) // Eighth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 6), m3b); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) const __m256i rhs_mat_2367_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 6), m3b); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) const __m256i rhs_mat_0145_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 6), m3b); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) const __m256i rhs_mat_2367_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 6), m3b); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) const __m256i rhs_mat_0145_20_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_20, 136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) const __m256i rhs_mat_2367_20_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_20, 136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) const __m256i rhs_mat_0145_21_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_21, 136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) const __m256i rhs_mat_2367_21_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_21, 136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) const __m256i rhs_mat_0145_30_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_30, 136); //B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) const __m256i rhs_mat_2367_30_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_30, 136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) const __m256i rhs_mat_0145_31_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_31, 136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11 const __m256i rhs_mat_2367_31_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_31, 136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) const __m256i rhs_mat_0145_40_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_40, 136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) const __m256i rhs_mat_2367_40_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_40, 136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) const __m256i rhs_mat_0145_41_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_41, 136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) const __m256i rhs_mat_2367_41_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_41, 136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) const __m256i rhs_mat_0145_50_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_50, 136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) const __m256i rhs_mat_2367_50_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_50, 136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) const __m256i rhs_mat_0145_51_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_51, 136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) const __m256i rhs_mat_2367_51_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_51, 136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) const __m256i rhs_mat_0145_60_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_60, 136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) const __m256i rhs_mat_2367_60_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_60, 136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) const __m256i rhs_mat_0145_61_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_61, 136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) const __m256i rhs_mat_2367_61_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_61, 136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) const __m256i rhs_mat_0145_70_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_70, 136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) const __m256i rhs_mat_2367_70_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_70, 136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) const __m256i rhs_mat_0145_71_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_71, 136); //B70(8-11) B71(8-11) B70(8-11) B71(8-11) B74(8-11) B75(8-11) B74(8-11) B75(8-11) const __m256i rhs_mat_2367_71_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_71, 136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) const __m256i rhs_mat_0145_20_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_20, 221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) const __m256i rhs_mat_2367_20_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_20, 221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) const __m256i rhs_mat_0145_21_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_21, 221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) const __m256i rhs_mat_2367_21_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_21, 221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) const __m256i rhs_mat_0145_30_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_30, 221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) const __m256i rhs_mat_2367_30_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_30, 221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) const __m256i rhs_mat_0145_31_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_31, 221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) const __m256i rhs_mat_2367_31_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_31, 221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) const __m256i rhs_mat_0145_40_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_40, 221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) const __m256i rhs_mat_2367_40_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_40, 221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) const __m256i rhs_mat_0145_41_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_41, 221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) const __m256i rhs_mat_2367_41_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_41, 221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) const __m256i rhs_mat_0145_50_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_50, 221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) const __m256i rhs_mat_2367_50_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_50, 221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) const __m256i rhs_mat_0145_51_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_51, 221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) const __m256i rhs_mat_2367_51_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_51, 221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) const __m256i rhs_mat_0145_60_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_60, 221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) const __m256i rhs_mat_2367_60_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_60, 221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) const __m256i rhs_mat_0145_61_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_61, 221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) const __m256i rhs_mat_2367_61_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_61, 221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) const __m256i rhs_mat_0145_70_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_70, 221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) const __m256i rhs_mat_2367_70_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_70, 221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) const __m256i rhs_mat_0145_71_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_71, 221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) const __m256i rhs_mat_2367_71_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_71, 221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); // Extract scales which is lower half from mins_and_scales const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); // Extract mins which is upper half from mins_and_scales const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask1_sse)); const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask2_sse)); const __m256i scales_2 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask1_sse)); const __m256i scales_3 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask2_sse)); const __m256i scales_4 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask1_sse)); const __m256i scales_5 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask2_sse)); const __m256i scales_6 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask1_sse)); const __m256i scales_7 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask2_sse)); const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); const __m256i scale_0145_2 = _mm256_shuffle_epi32(scales_2, 68); const __m256i scale_2367_2 = _mm256_shuffle_epi32(scales_2, 238); const __m256i scale_0145_3 = _mm256_shuffle_epi32(scales_3, 68); const __m256i scale_2367_3 = _mm256_shuffle_epi32(scales_3, 238); const __m256i scale_0145_4 = _mm256_shuffle_epi32(scales_4, 68); const __m256i scale_2367_4 = _mm256_shuffle_epi32(scales_4, 238); const __m256i scale_0145_5 = _mm256_shuffle_epi32(scales_5, 68); const __m256i scale_2367_5 = _mm256_shuffle_epi32(scales_5, 238); const __m256i scale_0145_6 = _mm256_shuffle_epi32(scales_6, 68); const __m256i scale_2367_6 = _mm256_shuffle_epi32(scales_6, 238); const __m256i scale_0145_7 = _mm256_shuffle_epi32(scales_7, 68); const __m256i scale_2367_7 = _mm256_shuffle_epi32(scales_7, 238); for (int rp = 0; rp < 4; rp++) { // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 512 * sb))); __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 512 * sb))); __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 512 * sb))); __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 512 * sb))); __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); __m256i lhs_mat_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 512 * sb))); __m256i lhs_mat_01_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 0); __m256i lhs_mat_23_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 17); __m256i lhs_mat_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 512 * sb))); __m256i lhs_mat_01_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 0); __m256i lhs_mat_23_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 17); __m256i lhs_mat_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 512 * sb))); __m256i lhs_mat_01_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 0); __m256i lhs_mat_23_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 17); __m256i lhs_mat_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 512 * sb))); __m256i lhs_mat_01_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 0); __m256i lhs_mat_23_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 17); __m256i lhs_mat_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 + 512 * sb))); __m256i lhs_mat_01_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 0); __m256i lhs_mat_23_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 17); __m256i lhs_mat_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 288 + 512 * sb))); __m256i lhs_mat_01_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 0); __m256i lhs_mat_23_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 17); __m256i lhs_mat_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 320 + 512 * sb))); __m256i lhs_mat_01_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 0); __m256i lhs_mat_23_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 17); __m256i lhs_mat_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 352 + 512 * sb))); __m256i lhs_mat_01_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 0); __m256i lhs_mat_23_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 17); __m256i lhs_mat_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 384 + 512 * sb))); __m256i lhs_mat_01_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 0); __m256i lhs_mat_23_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 17); __m256i lhs_mat_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 416 + 512 * sb))); __m256i lhs_mat_01_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 0); __m256i lhs_mat_23_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 17); __m256i lhs_mat_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 448 + 512 * sb))); __m256i lhs_mat_01_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 0); __m256i lhs_mat_23_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 17); __m256i lhs_mat_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 480 + 512 * sb))); __m256i lhs_mat_01_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 0); __m256i lhs_mat_23_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 17); // Bsums are loaded for the different Q8_K blocks __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 32 * sb))); __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 8 + 32 * sb)); __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 16 + 32 * sb))); __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 24 + 32 * sb)); // Shuffle pattern one - left side input const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) const __m256i lhs_mat_01_20_sp1 = _mm256_shuffle_epi32(lhs_mat_01_20, 160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) const __m256i lhs_mat_23_20_sp1 = _mm256_shuffle_epi32(lhs_mat_23_20, 160); //A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) const __m256i lhs_mat_01_21_sp1 = _mm256_shuffle_epi32(lhs_mat_01_21, 160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) const __m256i lhs_mat_23_21_sp1 = _mm256_shuffle_epi32(lhs_mat_23_21, 160); //A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) const __m256i lhs_mat_01_30_sp1 = _mm256_shuffle_epi32(lhs_mat_01_30, 160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) const __m256i lhs_mat_23_30_sp1 = _mm256_shuffle_epi32(lhs_mat_23_30, 160); //A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) const __m256i lhs_mat_01_31_sp1 = _mm256_shuffle_epi32(lhs_mat_01_31, 160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) const __m256i lhs_mat_23_31_sp1 = _mm256_shuffle_epi32(lhs_mat_23_31, 160); //A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) const __m256i lhs_mat_01_40_sp1 = _mm256_shuffle_epi32(lhs_mat_01_40, 160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) const __m256i lhs_mat_23_40_sp1 = _mm256_shuffle_epi32(lhs_mat_23_40, 160); //A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) const __m256i lhs_mat_01_41_sp1 = _mm256_shuffle_epi32(lhs_mat_01_41, 160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) const __m256i lhs_mat_23_41_sp1 = _mm256_shuffle_epi32(lhs_mat_23_41, 160); //A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) const __m256i lhs_mat_01_50_sp1 = _mm256_shuffle_epi32(lhs_mat_01_50, 160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) const __m256i lhs_mat_23_50_sp1 = _mm256_shuffle_epi32(lhs_mat_23_50, 160); //A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) const __m256i lhs_mat_01_51_sp1 = _mm256_shuffle_epi32(lhs_mat_01_51, 160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) const __m256i lhs_mat_23_51_sp1 = _mm256_shuffle_epi32(lhs_mat_23_51, 160); //A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) const __m256i lhs_mat_01_60_sp1 = _mm256_shuffle_epi32(lhs_mat_01_60, 160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) const __m256i lhs_mat_23_60_sp1 = _mm256_shuffle_epi32(lhs_mat_23_60, 160); //A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) const __m256i lhs_mat_01_61_sp1 = _mm256_shuffle_epi32(lhs_mat_01_61, 160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) const __m256i lhs_mat_23_61_sp1 = _mm256_shuffle_epi32(lhs_mat_23_61, 160); //A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) const __m256i lhs_mat_01_70_sp1 = _mm256_shuffle_epi32(lhs_mat_01_70, 160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) const __m256i lhs_mat_23_70_sp1 = _mm256_shuffle_epi32(lhs_mat_23_70, 160); //A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) const __m256i lhs_mat_01_71_sp1 = _mm256_shuffle_epi32(lhs_mat_01_71, 160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) const __m256i lhs_mat_23_71_sp1 = _mm256_shuffle_epi32(lhs_mat_23_71, 160); //A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) // Shuffle pattern two- left side input const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) const __m256i lhs_mat_01_20_sp2 = _mm256_shuffle_epi32(lhs_mat_01_20, 245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) const __m256i lhs_mat_23_20_sp2 = _mm256_shuffle_epi32(lhs_mat_23_20, 245); //A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) const __m256i lhs_mat_01_21_sp2 = _mm256_shuffle_epi32(lhs_mat_01_21, 245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) const __m256i lhs_mat_23_21_sp2 = _mm256_shuffle_epi32(lhs_mat_23_21, 245); //A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) const __m256i lhs_mat_01_30_sp2 = _mm256_shuffle_epi32(lhs_mat_01_30, 245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) const __m256i lhs_mat_23_30_sp2 = _mm256_shuffle_epi32(lhs_mat_23_30, 245); //A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) const __m256i lhs_mat_01_31_sp2 = _mm256_shuffle_epi32(lhs_mat_01_31, 245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) const __m256i lhs_mat_23_31_sp2 = _mm256_shuffle_epi32(lhs_mat_23_31, 245); //A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) const __m256i lhs_mat_01_40_sp2 = _mm256_shuffle_epi32(lhs_mat_01_40, 245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) const __m256i lhs_mat_23_40_sp2 = _mm256_shuffle_epi32(lhs_mat_23_40, 245); //A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) const __m256i lhs_mat_01_41_sp2 = _mm256_shuffle_epi32(lhs_mat_01_41, 245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) const __m256i lhs_mat_23_41_sp2 = _mm256_shuffle_epi32(lhs_mat_23_41, 245); //A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) const __m256i lhs_mat_01_50_sp2 = _mm256_shuffle_epi32(lhs_mat_01_50, 245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) const __m256i lhs_mat_23_50_sp2 = _mm256_shuffle_epi32(lhs_mat_23_50, 245); //A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) const __m256i lhs_mat_01_51_sp2 = _mm256_shuffle_epi32(lhs_mat_01_51, 245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) const __m256i lhs_mat_23_51_sp2 = _mm256_shuffle_epi32(lhs_mat_23_51, 245); //A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) const __m256i lhs_mat_01_60_sp2 = _mm256_shuffle_epi32(lhs_mat_01_60, 245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) const __m256i lhs_mat_23_60_sp2 = _mm256_shuffle_epi32(lhs_mat_23_60, 245); //A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) const __m256i lhs_mat_01_61_sp2 = _mm256_shuffle_epi32(lhs_mat_01_61, 245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) const __m256i lhs_mat_23_61_sp2 = _mm256_shuffle_epi32(lhs_mat_23_61, 245); //A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) const __m256i lhs_mat_01_70_sp2 = _mm256_shuffle_epi32(lhs_mat_01_70, 245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) const __m256i lhs_mat_23_70_sp2 = _mm256_shuffle_epi32(lhs_mat_23_70, 245); //A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) const __m256i lhs_mat_01_71_sp2 = _mm256_shuffle_epi32(lhs_mat_01_71, 245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) const __m256i lhs_mat_23_71_sp2 = _mm256_shuffle_epi32(lhs_mat_23_71, 245); //A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)); __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)); __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)); __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)); __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)); __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)); __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)); __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)); __m256i iacc_mat_00_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_01_21_sp1)); __m256i iacc_mat_01_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_01_21_sp1)); __m256i iacc_mat_10_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_23_21_sp1)); __m256i iacc_mat_11_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_23_21_sp1)); __m256i iacc_mat_00_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_01_31_sp1)); __m256i iacc_mat_01_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_01_31_sp1)); __m256i iacc_mat_10_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_23_31_sp1)); __m256i iacc_mat_11_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_23_31_sp1)); __m256i iacc_mat_00_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_01_41_sp1)); __m256i iacc_mat_01_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_01_41_sp1)); __m256i iacc_mat_10_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_23_41_sp1)); __m256i iacc_mat_11_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_23_41_sp1)); __m256i iacc_mat_00_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_01_51_sp1)); __m256i iacc_mat_01_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_01_51_sp1)); __m256i iacc_mat_10_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_23_51_sp1)); __m256i iacc_mat_11_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_23_51_sp1)); __m256i iacc_mat_00_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_01_61_sp1)); __m256i iacc_mat_01_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_01_61_sp1)); __m256i iacc_mat_10_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_23_61_sp1)); __m256i iacc_mat_11_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_23_61_sp1)); __m256i iacc_mat_00_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_01_71_sp1)); __m256i iacc_mat_01_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_01_71_sp1)); __m256i iacc_mat_10_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_23_71_sp1)); __m256i iacc_mat_11_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_23_71_sp1)); __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)); __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)); __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)); __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)); __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)); __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)); __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)); __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)); __m256i iacc_mat_00_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_01_21_sp2)); __m256i iacc_mat_01_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_01_21_sp2)); __m256i iacc_mat_10_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_23_21_sp2)); __m256i iacc_mat_11_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_23_21_sp2)); __m256i iacc_mat_00_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_01_31_sp2)); __m256i iacc_mat_01_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_01_31_sp2)); __m256i iacc_mat_10_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_23_31_sp2)); __m256i iacc_mat_11_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_23_31_sp2)); __m256i iacc_mat_00_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_01_41_sp2)); __m256i iacc_mat_01_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_01_41_sp2)); __m256i iacc_mat_10_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_23_41_sp2)); __m256i iacc_mat_11_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_23_41_sp2)); __m256i iacc_mat_00_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_01_51_sp2)); __m256i iacc_mat_01_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_01_51_sp2)); __m256i iacc_mat_10_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_23_51_sp2)); __m256i iacc_mat_11_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_23_51_sp2)); __m256i iacc_mat_00_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_01_61_sp2)); __m256i iacc_mat_01_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_01_61_sp2)); __m256i iacc_mat_10_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_23_61_sp2)); __m256i iacc_mat_11_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_23_61_sp2)); __m256i iacc_mat_00_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_01_71_sp2)); __m256i iacc_mat_01_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_01_71_sp2)); __m256i iacc_mat_10_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_23_71_sp2)); __m256i iacc_mat_11_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_23_71_sp2)); // Combine results from both shuffle patterns for each output block __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); __m256i iacc_mat_00_2 = _mm256_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); __m256i iacc_mat_01_2 = _mm256_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); __m256i iacc_mat_10_2 = _mm256_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); __m256i iacc_mat_11_2 = _mm256_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); __m256i iacc_mat_00_3 = _mm256_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); __m256i iacc_mat_01_3 = _mm256_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); __m256i iacc_mat_10_3 = _mm256_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); __m256i iacc_mat_11_3 = _mm256_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); __m256i iacc_mat_00_4 = _mm256_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); __m256i iacc_mat_01_4 = _mm256_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); __m256i iacc_mat_10_4 = _mm256_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); __m256i iacc_mat_11_4 = _mm256_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); __m256i iacc_mat_00_5 = _mm256_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); __m256i iacc_mat_01_5 = _mm256_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); __m256i iacc_mat_10_5 = _mm256_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); __m256i iacc_mat_11_5 = _mm256_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); __m256i iacc_mat_00_6 = _mm256_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); __m256i iacc_mat_01_6 = _mm256_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); __m256i iacc_mat_10_6 = _mm256_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); __m256i iacc_mat_11_6 = _mm256_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); __m256i iacc_mat_00_7 = _mm256_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); __m256i iacc_mat_01_7 = _mm256_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); __m256i iacc_mat_10_7 = _mm256_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); __m256i iacc_mat_11_7 = _mm256_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); iacc_mat_00_2 = _mm256_madd_epi16(iacc_mat_00_2, scale_0145_2); iacc_mat_01_2 = _mm256_madd_epi16(iacc_mat_01_2, scale_2367_2); iacc_mat_10_2 = _mm256_madd_epi16(iacc_mat_10_2, scale_0145_2); iacc_mat_11_2 = _mm256_madd_epi16(iacc_mat_11_2, scale_2367_2); iacc_mat_00_3 = _mm256_madd_epi16(iacc_mat_00_3, scale_0145_3); iacc_mat_01_3 = _mm256_madd_epi16(iacc_mat_01_3, scale_2367_3); iacc_mat_10_3 = _mm256_madd_epi16(iacc_mat_10_3, scale_0145_3); iacc_mat_11_3 = _mm256_madd_epi16(iacc_mat_11_3, scale_2367_3); iacc_mat_00_4 = _mm256_madd_epi16(iacc_mat_00_4, scale_0145_4); iacc_mat_01_4 = _mm256_madd_epi16(iacc_mat_01_4, scale_2367_4); iacc_mat_10_4 = _mm256_madd_epi16(iacc_mat_10_4, scale_0145_4); iacc_mat_11_4 = _mm256_madd_epi16(iacc_mat_11_4, scale_2367_4); iacc_mat_00_5 = _mm256_madd_epi16(iacc_mat_00_5, scale_0145_5); iacc_mat_01_5 = _mm256_madd_epi16(iacc_mat_01_5, scale_2367_5); iacc_mat_10_5 = _mm256_madd_epi16(iacc_mat_10_5, scale_0145_5); iacc_mat_11_5 = _mm256_madd_epi16(iacc_mat_11_5, scale_2367_5); iacc_mat_00_6 = _mm256_madd_epi16(iacc_mat_00_6, scale_0145_6); iacc_mat_01_6 = _mm256_madd_epi16(iacc_mat_01_6, scale_2367_6); iacc_mat_10_6 = _mm256_madd_epi16(iacc_mat_10_6, scale_0145_6); iacc_mat_11_6 = _mm256_madd_epi16(iacc_mat_11_6, scale_2367_6); iacc_mat_00_7 = _mm256_madd_epi16(iacc_mat_00_7, scale_0145_7); iacc_mat_01_7 = _mm256_madd_epi16(iacc_mat_01_7, scale_2367_7); iacc_mat_10_7 = _mm256_madd_epi16(iacc_mat_10_7, scale_0145_7); iacc_mat_11_7 = _mm256_madd_epi16(iacc_mat_11_7, scale_2367_7); __m256i iacc_mat_00 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm256_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm256_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); __m256i iacc_mat_01 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm256_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm256_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); __m256i iacc_mat_10 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm256_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm256_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); __m256i iacc_mat_11 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm256_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm256_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); // Straighten out to make 4 row vectors __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); __m256i lhs_bsums_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); __m256i lhs_bsums_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); __m256i lhs_bsums_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); __m256i lhs_bsums_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K __m256i iacc_row_min_0_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 0), mins_01); __m256i iacc_row_min_1_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 170), mins_01); __m256i iacc_row_min_2_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 0), mins_01); __m256i iacc_row_min_3_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 170), mins_01); __m256i iacc_row_min_0_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 85), mins_23); __m256i iacc_row_min_1_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 255), mins_23); __m256i iacc_row_min_2_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 85), mins_23); __m256i iacc_row_min_3_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 255), mins_23); __m256i iacc_row_min_0_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 0), mins_45); __m256i iacc_row_min_1_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 170), mins_45); __m256i iacc_row_min_2_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 0), mins_45); __m256i iacc_row_min_3_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 170), mins_45); __m256i iacc_row_min_0_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 85), mins_67); __m256i iacc_row_min_1_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 255), mins_67); __m256i iacc_row_min_2_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 85), mins_67); __m256i iacc_row_min_3_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 255), mins_67); __m256i iacc_row_min_0 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm256_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); __m256i iacc_row_min_1 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm256_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); __m256i iacc_row_min_2 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm256_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); __m256i iacc_row_min_3 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm256_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); } } } // Store the accumulated values for (int i = 0; i < 16; i++) { _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); } } } for (; y < nr / 4; y ++) { const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation for (int64_t x = xstart; x < nc / 8; x++) { const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); // Master FP accumulators __m256 acc_rows[4]; for (int i = 0; i < 4; i++) { acc_rows[i] = _mm256_setzero_ps(); } __m256 acc_min_rows[4]; for (int i = 0; i < 4; i++) { acc_min_rows[i] = _mm256_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { // Delta values - Load the eight scale values of block_q2_kx8 const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); // dmin values - Load the eight dmin values of block_q2_kx8 const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration for (int sb = 0; sb < QK_K / 128; sb++) { // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + sb * 256)); const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32 + sb * 256)); const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64 + sb * 256)); const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96 + sb * 256)); const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 128 + sb * 256)); const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 160 + sb * 256)); const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 192 + sb * 256)); const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 224 + sb * 256)); // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values //superblock sub block which part of sub block const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); // 2-bit -> 8-bit // First sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m3b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m3b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m3b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m3b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) // Second sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_10 = _mm256_and_si256(rhs_raw_mat_0145_2, m3b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) const __m256i rhs_mat_2367_10 = _mm256_and_si256(rhs_raw_mat_2367_2, m3b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) const __m256i rhs_mat_0145_11 = _mm256_and_si256(rhs_raw_mat_0145_3, m3b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) const __m256i rhs_mat_2367_11 = _mm256_and_si256(rhs_raw_mat_2367_3, m3b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) // Third sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 2), m3b); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) const __m256i rhs_mat_2367_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 2), m3b); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) const __m256i rhs_mat_0145_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 2), m3b); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) const __m256i rhs_mat_2367_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 2), m3b); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) // Fourth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 2), m3b); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) const __m256i rhs_mat_2367_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 2), m3b); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) const __m256i rhs_mat_0145_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 2), m3b); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) const __m256i rhs_mat_2367_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 2), m3b); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) // Fifth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m3b); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) const __m256i rhs_mat_2367_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m3b); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) const __m256i rhs_mat_0145_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m3b); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) const __m256i rhs_mat_2367_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m3b); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) // Sixth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m3b); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) const __m256i rhs_mat_2367_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m3b); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) const __m256i rhs_mat_0145_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m3b); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) const __m256i rhs_mat_2367_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m3b); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) // Seventh sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 6), m3b); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) const __m256i rhs_mat_2367_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 6), m3b); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) const __m256i rhs_mat_0145_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 6), m3b); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) const __m256i rhs_mat_2367_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 6), m3b); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) // Eighth sub block of the eight sub blocks processed in the iteration const __m256i rhs_mat_0145_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 6), m3b); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) const __m256i rhs_mat_2367_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 6), m3b); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) const __m256i rhs_mat_0145_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 6), m3b); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) const __m256i rhs_mat_2367_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 6), m3b); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) // Shuffle pattern one - right side input const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) const __m256i rhs_mat_0145_20_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_20, 136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) const __m256i rhs_mat_2367_20_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_20, 136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) const __m256i rhs_mat_0145_21_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_21, 136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) const __m256i rhs_mat_2367_21_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_21, 136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) const __m256i rhs_mat_0145_30_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_30, 136); //B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) const __m256i rhs_mat_2367_30_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_30, 136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) const __m256i rhs_mat_0145_31_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_31, 136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11 const __m256i rhs_mat_2367_31_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_31, 136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) const __m256i rhs_mat_0145_40_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_40, 136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) const __m256i rhs_mat_2367_40_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_40, 136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) const __m256i rhs_mat_0145_41_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_41, 136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) const __m256i rhs_mat_2367_41_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_41, 136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) const __m256i rhs_mat_0145_50_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_50, 136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) const __m256i rhs_mat_2367_50_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_50, 136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) const __m256i rhs_mat_0145_51_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_51, 136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) const __m256i rhs_mat_2367_51_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_51, 136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) const __m256i rhs_mat_0145_60_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_60, 136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) const __m256i rhs_mat_2367_60_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_60, 136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) const __m256i rhs_mat_0145_61_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_61, 136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) const __m256i rhs_mat_2367_61_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_61, 136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) const __m256i rhs_mat_0145_70_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_70, 136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) const __m256i rhs_mat_2367_70_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_70, 136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) const __m256i rhs_mat_0145_71_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_71, 136); //B70(8-11) B71(8-11) B70(8-11) B71(8-11) B74(8-11) B75(8-11) B74(8-11) B75(8-11) const __m256i rhs_mat_2367_71_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_71, 136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) // Shuffle pattern two - right side input const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) const __m256i rhs_mat_0145_20_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_20, 221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) const __m256i rhs_mat_2367_20_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_20, 221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) const __m256i rhs_mat_0145_21_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_21, 221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) const __m256i rhs_mat_2367_21_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_21, 221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) const __m256i rhs_mat_0145_30_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_30, 221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) const __m256i rhs_mat_2367_30_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_30, 221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) const __m256i rhs_mat_0145_31_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_31, 221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) const __m256i rhs_mat_2367_31_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_31, 221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) const __m256i rhs_mat_0145_40_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_40, 221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) const __m256i rhs_mat_2367_40_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_40, 221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) const __m256i rhs_mat_0145_41_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_41, 221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) const __m256i rhs_mat_2367_41_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_41, 221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) const __m256i rhs_mat_0145_50_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_50, 221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) const __m256i rhs_mat_2367_50_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_50, 221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) const __m256i rhs_mat_0145_51_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_51, 221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) const __m256i rhs_mat_2367_51_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_51, 221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) const __m256i rhs_mat_0145_60_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_60, 221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) const __m256i rhs_mat_2367_60_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_60, 221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) const __m256i rhs_mat_0145_61_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_61, 221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) const __m256i rhs_mat_2367_61_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_61, 221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) const __m256i rhs_mat_0145_70_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_70, 221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) const __m256i rhs_mat_2367_70_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_70, 221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) const __m256i rhs_mat_0145_71_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_71, 221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) const __m256i rhs_mat_2367_71_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_71, 221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); // Extract scales which is lower half from mins_and_scales const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); // Extract mins which is upper half from mins_and_scales const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask1_sse)); const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask2_sse)); const __m256i scales_2 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask1_sse)); const __m256i scales_3 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask2_sse)); const __m256i scales_4 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask1_sse)); const __m256i scales_5 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask2_sse)); const __m256i scales_6 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask1_sse)); const __m256i scales_7 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask2_sse)); const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); const __m256i scale_0145_2 = _mm256_shuffle_epi32(scales_2, 68); const __m256i scale_2367_2 = _mm256_shuffle_epi32(scales_2, 238); const __m256i scale_0145_3 = _mm256_shuffle_epi32(scales_3, 68); const __m256i scale_2367_3 = _mm256_shuffle_epi32(scales_3, 238); const __m256i scale_0145_4 = _mm256_shuffle_epi32(scales_4, 68); const __m256i scale_2367_4 = _mm256_shuffle_epi32(scales_4, 238); const __m256i scale_0145_5 = _mm256_shuffle_epi32(scales_5, 68); const __m256i scale_2367_5 = _mm256_shuffle_epi32(scales_5, 238); const __m256i scale_0145_6 = _mm256_shuffle_epi32(scales_6, 68); const __m256i scale_2367_6 = _mm256_shuffle_epi32(scales_6, 238); const __m256i scale_0145_7 = _mm256_shuffle_epi32(scales_7, 68); const __m256i scale_2367_7 = _mm256_shuffle_epi32(scales_7, 238); // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 // Loaded as set of 128 bit vectors and repeated into a 256 bit vector __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 512 * sb))); __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 512 * sb))); __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 512 * sb))); __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 512 * sb))); __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); __m256i lhs_mat_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 512 * sb))); __m256i lhs_mat_01_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 0); __m256i lhs_mat_23_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 17); __m256i lhs_mat_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 512 * sb))); __m256i lhs_mat_01_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 0); __m256i lhs_mat_23_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 17); __m256i lhs_mat_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 512 * sb))); __m256i lhs_mat_01_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 0); __m256i lhs_mat_23_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 17); __m256i lhs_mat_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 512 * sb))); __m256i lhs_mat_01_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 0); __m256i lhs_mat_23_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 17); __m256i lhs_mat_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 + 512 * sb))); __m256i lhs_mat_01_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 0); __m256i lhs_mat_23_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 17); __m256i lhs_mat_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 288 + 512 * sb))); __m256i lhs_mat_01_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 0); __m256i lhs_mat_23_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 17); __m256i lhs_mat_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 320 + 512 * sb))); __m256i lhs_mat_01_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 0); __m256i lhs_mat_23_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 17); __m256i lhs_mat_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 352 + 512 * sb))); __m256i lhs_mat_01_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 0); __m256i lhs_mat_23_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 17); __m256i lhs_mat_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 384 + 512 * sb))); __m256i lhs_mat_01_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 0); __m256i lhs_mat_23_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 17); __m256i lhs_mat_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 416 + 512 * sb))); __m256i lhs_mat_01_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 0); __m256i lhs_mat_23_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 17); __m256i lhs_mat_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 448 + 512 * sb))); __m256i lhs_mat_01_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 0); __m256i lhs_mat_23_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 17); __m256i lhs_mat_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 480 + 512 * sb))); __m256i lhs_mat_01_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 0); __m256i lhs_mat_23_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 17); // Bsums are loaded for the different Q8_K blocks __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 32 * sb))); __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 8 + 32 * sb)); __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 16 + 32 * sb))); __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 24 + 32 * sb)); // Shuffle pattern one - left side input const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) const __m256i lhs_mat_01_20_sp1 = _mm256_shuffle_epi32(lhs_mat_01_20, 160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) const __m256i lhs_mat_23_20_sp1 = _mm256_shuffle_epi32(lhs_mat_23_20, 160); //A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) const __m256i lhs_mat_01_21_sp1 = _mm256_shuffle_epi32(lhs_mat_01_21, 160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) const __m256i lhs_mat_23_21_sp1 = _mm256_shuffle_epi32(lhs_mat_23_21, 160); //A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) const __m256i lhs_mat_01_30_sp1 = _mm256_shuffle_epi32(lhs_mat_01_30, 160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) const __m256i lhs_mat_23_30_sp1 = _mm256_shuffle_epi32(lhs_mat_23_30, 160); //A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) const __m256i lhs_mat_01_31_sp1 = _mm256_shuffle_epi32(lhs_mat_01_31, 160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) const __m256i lhs_mat_23_31_sp1 = _mm256_shuffle_epi32(lhs_mat_23_31, 160); //A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) const __m256i lhs_mat_01_40_sp1 = _mm256_shuffle_epi32(lhs_mat_01_40, 160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) const __m256i lhs_mat_23_40_sp1 = _mm256_shuffle_epi32(lhs_mat_23_40, 160); //A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) const __m256i lhs_mat_01_41_sp1 = _mm256_shuffle_epi32(lhs_mat_01_41, 160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) const __m256i lhs_mat_23_41_sp1 = _mm256_shuffle_epi32(lhs_mat_23_41, 160); //A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) const __m256i lhs_mat_01_50_sp1 = _mm256_shuffle_epi32(lhs_mat_01_50, 160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) const __m256i lhs_mat_23_50_sp1 = _mm256_shuffle_epi32(lhs_mat_23_50, 160); //A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) const __m256i lhs_mat_01_51_sp1 = _mm256_shuffle_epi32(lhs_mat_01_51, 160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) const __m256i lhs_mat_23_51_sp1 = _mm256_shuffle_epi32(lhs_mat_23_51, 160); //A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) const __m256i lhs_mat_01_60_sp1 = _mm256_shuffle_epi32(lhs_mat_01_60, 160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) const __m256i lhs_mat_23_60_sp1 = _mm256_shuffle_epi32(lhs_mat_23_60, 160); //A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) const __m256i lhs_mat_01_61_sp1 = _mm256_shuffle_epi32(lhs_mat_01_61, 160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) const __m256i lhs_mat_23_61_sp1 = _mm256_shuffle_epi32(lhs_mat_23_61, 160); //A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) const __m256i lhs_mat_01_70_sp1 = _mm256_shuffle_epi32(lhs_mat_01_70, 160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) const __m256i lhs_mat_23_70_sp1 = _mm256_shuffle_epi32(lhs_mat_23_70, 160); //A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) const __m256i lhs_mat_01_71_sp1 = _mm256_shuffle_epi32(lhs_mat_01_71, 160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) const __m256i lhs_mat_23_71_sp1 = _mm256_shuffle_epi32(lhs_mat_23_71, 160); //A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) // Shuffle pattern two- left side input const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) const __m256i lhs_mat_01_20_sp2 = _mm256_shuffle_epi32(lhs_mat_01_20, 245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) const __m256i lhs_mat_23_20_sp2 = _mm256_shuffle_epi32(lhs_mat_23_20, 245); //A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) const __m256i lhs_mat_01_21_sp2 = _mm256_shuffle_epi32(lhs_mat_01_21, 245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) const __m256i lhs_mat_23_21_sp2 = _mm256_shuffle_epi32(lhs_mat_23_21, 245); //A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) const __m256i lhs_mat_01_30_sp2 = _mm256_shuffle_epi32(lhs_mat_01_30, 245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) const __m256i lhs_mat_23_30_sp2 = _mm256_shuffle_epi32(lhs_mat_23_30, 245); //A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) const __m256i lhs_mat_01_31_sp2 = _mm256_shuffle_epi32(lhs_mat_01_31, 245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) const __m256i lhs_mat_23_31_sp2 = _mm256_shuffle_epi32(lhs_mat_23_31, 245); //A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) const __m256i lhs_mat_01_40_sp2 = _mm256_shuffle_epi32(lhs_mat_01_40, 245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) const __m256i lhs_mat_23_40_sp2 = _mm256_shuffle_epi32(lhs_mat_23_40, 245); //A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) const __m256i lhs_mat_01_41_sp2 = _mm256_shuffle_epi32(lhs_mat_01_41, 245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) const __m256i lhs_mat_23_41_sp2 = _mm256_shuffle_epi32(lhs_mat_23_41, 245); //A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) const __m256i lhs_mat_01_50_sp2 = _mm256_shuffle_epi32(lhs_mat_01_50, 245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) const __m256i lhs_mat_23_50_sp2 = _mm256_shuffle_epi32(lhs_mat_23_50, 245); //A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) const __m256i lhs_mat_01_51_sp2 = _mm256_shuffle_epi32(lhs_mat_01_51, 245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) const __m256i lhs_mat_23_51_sp2 = _mm256_shuffle_epi32(lhs_mat_23_51, 245); //A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) const __m256i lhs_mat_01_60_sp2 = _mm256_shuffle_epi32(lhs_mat_01_60, 245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) const __m256i lhs_mat_23_60_sp2 = _mm256_shuffle_epi32(lhs_mat_23_60, 245); //A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) const __m256i lhs_mat_01_61_sp2 = _mm256_shuffle_epi32(lhs_mat_01_61, 245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) const __m256i lhs_mat_23_61_sp2 = _mm256_shuffle_epi32(lhs_mat_23_61, 245); //A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) const __m256i lhs_mat_01_70_sp2 = _mm256_shuffle_epi32(lhs_mat_01_70, 245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) const __m256i lhs_mat_23_70_sp2 = _mm256_shuffle_epi32(lhs_mat_23_70, 245); //A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) const __m256i lhs_mat_01_71_sp2 = _mm256_shuffle_epi32(lhs_mat_01_71, 245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) const __m256i lhs_mat_23_71_sp2 = _mm256_shuffle_epi32(lhs_mat_23_71, 245); //A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)); __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)); __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)); __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)); __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)); __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)); __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)); __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)); __m256i iacc_mat_00_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_01_21_sp1)); __m256i iacc_mat_01_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_01_21_sp1)); __m256i iacc_mat_10_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_23_21_sp1)); __m256i iacc_mat_11_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_23_21_sp1)); __m256i iacc_mat_00_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_01_31_sp1)); __m256i iacc_mat_01_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_01_31_sp1)); __m256i iacc_mat_10_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_23_31_sp1)); __m256i iacc_mat_11_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_23_31_sp1)); __m256i iacc_mat_00_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_01_41_sp1)); __m256i iacc_mat_01_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_01_41_sp1)); __m256i iacc_mat_10_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_23_41_sp1)); __m256i iacc_mat_11_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_23_41_sp1)); __m256i iacc_mat_00_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_01_51_sp1)); __m256i iacc_mat_01_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_01_51_sp1)); __m256i iacc_mat_10_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_23_51_sp1)); __m256i iacc_mat_11_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_23_51_sp1)); __m256i iacc_mat_00_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_01_61_sp1)); __m256i iacc_mat_01_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_01_61_sp1)); __m256i iacc_mat_10_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_23_61_sp1)); __m256i iacc_mat_11_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_23_61_sp1)); __m256i iacc_mat_00_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_01_71_sp1)); __m256i iacc_mat_01_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_01_71_sp1)); __m256i iacc_mat_10_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_23_71_sp1)); __m256i iacc_mat_11_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_23_71_sp1)); __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)); __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)); __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)); __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)); __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)); __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)); __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)); __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)); __m256i iacc_mat_00_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_01_21_sp2)); __m256i iacc_mat_01_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_01_21_sp2)); __m256i iacc_mat_10_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_23_21_sp2)); __m256i iacc_mat_11_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_23_21_sp2)); __m256i iacc_mat_00_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_01_31_sp2)); __m256i iacc_mat_01_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_01_31_sp2)); __m256i iacc_mat_10_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_23_31_sp2)); __m256i iacc_mat_11_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_23_31_sp2)); __m256i iacc_mat_00_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_01_41_sp2)); __m256i iacc_mat_01_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_01_41_sp2)); __m256i iacc_mat_10_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_23_41_sp2)); __m256i iacc_mat_11_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_23_41_sp2)); __m256i iacc_mat_00_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_01_51_sp2)); __m256i iacc_mat_01_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_01_51_sp2)); __m256i iacc_mat_10_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_23_51_sp2)); __m256i iacc_mat_11_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_23_51_sp2)); __m256i iacc_mat_00_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_01_61_sp2)); __m256i iacc_mat_01_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_01_61_sp2)); __m256i iacc_mat_10_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_23_61_sp2)); __m256i iacc_mat_11_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_23_61_sp2)); __m256i iacc_mat_00_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_01_71_sp2)); __m256i iacc_mat_01_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_01_71_sp2)); __m256i iacc_mat_10_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_23_71_sp2)); __m256i iacc_mat_11_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_23_71_sp2)); // Combine results from both shuffle patterns for each output block. __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); __m256i iacc_mat_00_2 = _mm256_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); __m256i iacc_mat_01_2 = _mm256_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); __m256i iacc_mat_10_2 = _mm256_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); __m256i iacc_mat_11_2 = _mm256_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); __m256i iacc_mat_00_3 = _mm256_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); __m256i iacc_mat_01_3 = _mm256_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); __m256i iacc_mat_10_3 = _mm256_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); __m256i iacc_mat_11_3 = _mm256_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); __m256i iacc_mat_00_4 = _mm256_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); __m256i iacc_mat_01_4 = _mm256_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); __m256i iacc_mat_10_4 = _mm256_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); __m256i iacc_mat_11_4 = _mm256_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); __m256i iacc_mat_00_5 = _mm256_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); __m256i iacc_mat_01_5 = _mm256_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); __m256i iacc_mat_10_5 = _mm256_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); __m256i iacc_mat_11_5 = _mm256_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); __m256i iacc_mat_00_6 = _mm256_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); __m256i iacc_mat_01_6 = _mm256_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); __m256i iacc_mat_10_6 = _mm256_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); __m256i iacc_mat_11_6 = _mm256_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); __m256i iacc_mat_00_7 = _mm256_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); __m256i iacc_mat_01_7 = _mm256_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); __m256i iacc_mat_10_7 = _mm256_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); __m256i iacc_mat_11_7 = _mm256_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); iacc_mat_00_2 = _mm256_madd_epi16(iacc_mat_00_2, scale_0145_2); iacc_mat_01_2 = _mm256_madd_epi16(iacc_mat_01_2, scale_2367_2); iacc_mat_10_2 = _mm256_madd_epi16(iacc_mat_10_2, scale_0145_2); iacc_mat_11_2 = _mm256_madd_epi16(iacc_mat_11_2, scale_2367_2); iacc_mat_00_3 = _mm256_madd_epi16(iacc_mat_00_3, scale_0145_3); iacc_mat_01_3 = _mm256_madd_epi16(iacc_mat_01_3, scale_2367_3); iacc_mat_10_3 = _mm256_madd_epi16(iacc_mat_10_3, scale_0145_3); iacc_mat_11_3 = _mm256_madd_epi16(iacc_mat_11_3, scale_2367_3); iacc_mat_00_4 = _mm256_madd_epi16(iacc_mat_00_4, scale_0145_4); iacc_mat_01_4 = _mm256_madd_epi16(iacc_mat_01_4, scale_2367_4); iacc_mat_10_4 = _mm256_madd_epi16(iacc_mat_10_4, scale_0145_4); iacc_mat_11_4 = _mm256_madd_epi16(iacc_mat_11_4, scale_2367_4); iacc_mat_00_5 = _mm256_madd_epi16(iacc_mat_00_5, scale_0145_5); iacc_mat_01_5 = _mm256_madd_epi16(iacc_mat_01_5, scale_2367_5); iacc_mat_10_5 = _mm256_madd_epi16(iacc_mat_10_5, scale_0145_5); iacc_mat_11_5 = _mm256_madd_epi16(iacc_mat_11_5, scale_2367_5); iacc_mat_00_6 = _mm256_madd_epi16(iacc_mat_00_6, scale_0145_6); iacc_mat_01_6 = _mm256_madd_epi16(iacc_mat_01_6, scale_2367_6); iacc_mat_10_6 = _mm256_madd_epi16(iacc_mat_10_6, scale_0145_6); iacc_mat_11_6 = _mm256_madd_epi16(iacc_mat_11_6, scale_2367_6); iacc_mat_00_7 = _mm256_madd_epi16(iacc_mat_00_7, scale_0145_7); iacc_mat_01_7 = _mm256_madd_epi16(iacc_mat_01_7, scale_2367_7); iacc_mat_10_7 = _mm256_madd_epi16(iacc_mat_10_7, scale_0145_7); iacc_mat_11_7 = _mm256_madd_epi16(iacc_mat_11_7, scale_2367_7); __m256i iacc_mat_00 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm256_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm256_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); __m256i iacc_mat_01 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm256_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm256_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); __m256i iacc_mat_10 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm256_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm256_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); __m256i iacc_mat_11 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm256_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm256_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); // Straighten out to make 4 row vectors __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); // Multiply with appropiate scales and accumulate (for both d and dmin) below acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); __m256i lhs_bsums_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); __m256i lhs_bsums_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); __m256i lhs_bsums_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); __m256i lhs_bsums_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K __m256i iacc_row_min_0_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 0), mins_01); __m256i iacc_row_min_1_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 170), mins_01); __m256i iacc_row_min_2_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 0), mins_01); __m256i iacc_row_min_3_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 170), mins_01); __m256i iacc_row_min_0_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 85), mins_23); __m256i iacc_row_min_1_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 255), mins_23); __m256i iacc_row_min_2_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 85), mins_23); __m256i iacc_row_min_3_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 255), mins_23); __m256i iacc_row_min_0_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 0), mins_45); __m256i iacc_row_min_1_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 170), mins_45); __m256i iacc_row_min_2_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 0), mins_45); __m256i iacc_row_min_3_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 170), mins_45); __m256i iacc_row_min_0_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 85), mins_67); __m256i iacc_row_min_1_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 255), mins_67); __m256i iacc_row_min_2_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 85), mins_67); __m256i iacc_row_min_3_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 255), mins_67); __m256i iacc_row_min_0 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm256_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); __m256i iacc_row_min_1 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm256_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); __m256i iacc_row_min_2 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm256_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); __m256i iacc_row_min_3 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm256_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); } } // Store the accumulated values for (int i = 0; i < 4; i++) { _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); } } } #else ggml_gemm_q2_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); #endif } ggml-org-ggml-3678254/src/ggml-cpu/binary-ops.cpp000066400000000000000000000152661512524704700214670ustar00rootroot00000000000000#include "binary-ops.h" #if defined(GGML_USE_ACCELERATE) #include using vDSP_fn_t = void (*)(const float *, vDSP_Stride, const float *, vDSP_Stride, float *, vDSP_Stride, vDSP_Length); #endif static inline float op_add(float a, float b) { return a + b; } static inline float op_sub(float a, float b) { return a - b; } static inline float op_mul(float a, float b) { return a * b; } static inline float op_div(float a, float b) { return a / b; } template static inline void vec_binary_op_contiguous(const int64_t n, dst_t * z, const src0_t * x, const src1_t * y) { constexpr auto src0_to_f32 = type_conversion_table::to_f32; constexpr auto src1_to_f32 = type_conversion_table::to_f32; constexpr auto f32_to_dst = type_conversion_table::from_f32; for (int i = 0; i < n; i++) { z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(y[i]))); } } template static inline void vec_binary_op_non_contiguous(const int64_t n, const int64_t ne10, const int64_t nb10, dst_t * z, const src0_t * x, const src1_t * y) { constexpr auto src0_to_f32 = type_conversion_table::to_f32; constexpr auto src1_to_f32 = type_conversion_table::to_f32; constexpr auto f32_to_dst = type_conversion_table::from_f32; for (int i = 0; i < n; i++) { int i10 = i % ne10; const src1_t * y_ptr = (const src1_t *)((const char *)y + i10*nb10); z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(*y_ptr))); } } template static void apply_binary_op(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst)); GGML_TENSOR_BINARY_OP_LOCALS GGML_ASSERT( nb0 == sizeof(dst_t)); GGML_ASSERT(nb00 == sizeof(src0_t)); const auto [ir0, ir1] = get_thread_range(params, src0); const bool is_src1_contiguous = (nb10 == sizeof(src1_t)); if (!is_src1_contiguous) { // broadcast not implemented yet for non-contiguous GGML_ASSERT(ggml_are_same_shape(src0, src1)); } #ifdef GGML_USE_ACCELERATE vDSP_fn_t vDSP_op = nullptr; // TODO - avoid the f32-only check using type 'trait' lookup tables and row-based src-to-float conversion functions if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { if (op == op_add) { vDSP_op = vDSP_vadd; } else if (op == op_sub) { vDSP_op = vDSP_vsub; } else if (op == op_mul) { vDSP_op = vDSP_vmul; } else if (op == op_div) { vDSP_op = vDSP_vdiv; } } #endif for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 ); const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); const src1_t * src1_ptr = (const src1_t *) ((const char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11); if (is_src1_contiguous) { // src1 is broadcastable across src0 and dst in i1, i2, i3 const int64_t nr0 = ne00 / ne10; for (int64_t r = 0; r < nr0; ++r) { #ifdef GGML_USE_ACCELERATE if constexpr (std::is_same_v && std::is_same_v && std::is_same_v) { if (vDSP_op != nullptr) { vDSP_op(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10); continue; } } #endif vec_binary_op_contiguous(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr); } } else { vec_binary_op_non_contiguous(ne0, ne10, nb10, dst_ptr, src0_ptr, src1_ptr); } } } // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates template static void binary_op(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; /* */ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32 apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16 apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_BF16) { apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) { apply_binary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { apply_binary_op(params, dst); } else { GGML_ABORT("%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); } } void ggml_compute_forward_add_non_quantized(const ggml_compute_params * params, ggml_tensor * dst) { binary_op(params, dst); } void ggml_compute_forward_sub(const ggml_compute_params * params, ggml_tensor * dst) { binary_op(params, dst); } void ggml_compute_forward_mul(const ggml_compute_params * params, ggml_tensor * dst) { binary_op(params, dst); } void ggml_compute_forward_div(const ggml_compute_params * params, ggml_tensor * dst) { binary_op(params, dst); } ggml-org-ggml-3678254/src/ggml-cpu/binary-ops.h000066400000000000000000000010061512524704700211170ustar00rootroot00000000000000#pragma once #include "common.h" #ifdef __cplusplus extern "C" { #endif void ggml_compute_forward_add_non_quantized(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sub(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_mul(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_div(const struct ggml_compute_params * params, struct ggml_tensor * dst); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/cmake/000077500000000000000000000000001512524704700177465ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/cmake/FindSIMD.cmake000066400000000000000000000051331512524704700223070ustar00rootroot00000000000000include(CheckCSourceRuns) set(AVX_CODE " #include int main() { __m256 a; a = _mm256_set1_ps(0); return 0; } ") set(AVX512_CODE " #include int main() { __m512i a = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); __m512i b = a; __mmask64 equality_mask = _mm512_cmp_epi8_mask(a, b, _MM_CMPINT_EQ); return 0; } ") set(AVX2_CODE " #include int main() { __m256i a = {0}; a = _mm256_abs_epi16(a); __m256i x; _mm256_extract_epi64(x, 0); // we rely on this in our AVX2 code return 0; } ") set(FMA_CODE " #include int main() { __m256 acc = _mm256_setzero_ps(); const __m256 d = _mm256_setzero_ps(); const __m256 p = _mm256_setzero_ps(); acc = _mm256_fmadd_ps( d, p, acc ); return 0; } ") macro(check_sse type flags) set(__FLAG_I 1) set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) foreach (__FLAG ${flags}) if (NOT ${type}_FOUND) set(CMAKE_REQUIRED_FLAGS ${__FLAG}) check_c_source_runs("${${type}_CODE}" HAS_${type}_${__FLAG_I}) if (HAS_${type}_${__FLAG_I}) set(${type}_FOUND TRUE CACHE BOOL "${type} support") set(${type}_FLAGS "${__FLAG}" CACHE STRING "${type} flags") endif() math(EXPR __FLAG_I "${__FLAG_I}+1") endif() endforeach() set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) if (NOT ${type}_FOUND) set(${type}_FOUND FALSE CACHE BOOL "${type} support") set(${type}_FLAGS "" CACHE STRING "${type} flags") endif() mark_as_advanced(${type}_FOUND ${type}_FLAGS) endmacro() # flags are for MSVC only! check_sse("AVX" " ;/arch:AVX") if (NOT ${AVX_FOUND}) set(GGML_AVX OFF) else() set(GGML_AVX ON) endif() check_sse("AVX2" " ;/arch:AVX2") check_sse("FMA" " ;/arch:AVX2") if ((NOT ${AVX2_FOUND}) OR (NOT ${FMA_FOUND})) set(GGML_AVX2 OFF) else() set(GGML_AVX2 ON) endif() check_sse("AVX512" " ;/arch:AVX512") if (NOT ${AVX512_FOUND}) set(GGML_AVX512 OFF) else() set(GGML_AVX512 ON) endif() ggml-org-ggml-3678254/src/ggml-cpu/common.h000066400000000000000000000041421512524704700203300ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-impl.h" #include "simd-mappings.h" #ifdef __cplusplus #include // convenience functions/macros for use in template calls // note: these won't be required after the 'traits' lookup table is used. static inline ggml_fp16_t f32_to_f16(float x) { return GGML_CPU_FP32_TO_FP16(x); } static inline float f16_to_f32(ggml_fp16_t x) { return GGML_CPU_FP16_TO_FP32(x); } static inline ggml_bf16_t f32_to_bf16(float x) { return GGML_FP32_TO_BF16(x); } static inline float bf16_to_f32(ggml_bf16_t x) { return GGML_BF16_TO_FP32(x); } static inline float i32_to_f32(int32_t x) { return x; } static inline int32_t f32_to_i32(float x) { return x; } static inline float f32_to_f32(float x) { return x; } // TODO - merge this into the traits table, after using row-based conversions template struct type_conversion_table; template <> struct type_conversion_table { static constexpr float (*to_f32)(ggml_fp16_t) = f16_to_f32; static constexpr ggml_fp16_t (*from_f32)(float) = f32_to_f16; }; template <> struct type_conversion_table { static constexpr float (*to_f32)(float) = f32_to_f32; static constexpr float (*from_f32)(float) = f32_to_f32; }; template <> struct type_conversion_table { static constexpr float (*to_f32)(ggml_bf16_t) = bf16_to_f32; static constexpr ggml_bf16_t (*from_f32)(float) = f32_to_bf16; }; template <> struct type_conversion_table { static constexpr float (*to_f32)(int32_t) = i32_to_f32; static constexpr int32_t (*from_f32)(float) = f32_to_i32; }; static std::pair get_thread_range(const struct ggml_compute_params * params, const struct ggml_tensor * src0) { const int64_t ith = params->ith; const int64_t nth = params->nth; const int64_t nr = ggml_nrows(src0); // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); return {ir0, ir1}; } #endif ggml-org-ggml-3678254/src/ggml-cpu/ggml-cpu-impl.h000066400000000000000000000314531512524704700215170ustar00rootroot00000000000000#pragma once // GGML CPU internal header #include "ggml.h" #include "ggml-impl.h" #include // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/ //#include #include #include // memcpy #include // fabsf #ifdef __cplusplus extern "C" { #endif struct ggml_compute_params { // ith = thread index, nth = number of threads int ith, nth; // work buffer for all threads size_t wsize; void * wdata; struct ggml_threadpool * threadpool; }; #if defined(_MSC_VER) #define m512bh(p) p #define m512i(p) p #else #define m512bh(p) (__m512bh)(p) #define m512i(p) (__m512i)(p) #endif // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__)) #ifndef __FMA__ #define __FMA__ #endif #ifndef __F16C__ #define __F16C__ #endif #endif // __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available #if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)) #ifndef __SSE3__ #define __SSE3__ #endif #ifndef __SSSE3__ #define __SSSE3__ #endif #endif #if defined(__s390x__) && defined(__VEC__) #ifndef __VXE__ #define __VXE__ #endif // __VXE__ #ifndef __VXE2__ #define __VXE2__ #endif // __VXE2__ #endif // __s390x__ && __VEC__ #if defined(__ARM_FEATURE_SVE) && defined(__linux__) #include #endif #if defined(__ARM_NEON) // ref: https://github.com/ggml-org/llama.cpp/pull/5404 #ifdef _MSC_VER #define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) } #else #define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) } #endif // _MSC_VER #if !defined(__aarch64__) // 32-bit ARM compatibility // vaddlvq_s16 // vpaddq_s16 // vpaddq_s32 // vaddvq_s32 // vaddvq_f32 // vmaxvq_f32 // vcvtnq_s32_f32 // vzip1_u8 // vzip2_u8 inline static int32_t vaddlvq_s16(int16x8_t v) { int32x4_t v0 = vreinterpretq_s32_s64(vpaddlq_s32(vpaddlq_s16(v))); return vgetq_lane_s32(v0, 0) + vgetq_lane_s32(v0, 2); } inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); return vcombine_s16(a0, b0); } inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) { int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a)); int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b)); return vcombine_s32(a0, b0); } inline static int32_t vaddvq_s32(int32x4_t v) { return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); } inline static float vaddvq_f32(float32x4_t v) { return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); } inline static float vmaxvq_f32(float32x4_t v) { return MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); } inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { int32x4_t res; res[0] = roundf(vgetq_lane_f32(v, 0)); res[1] = roundf(vgetq_lane_f32(v, 1)); res[2] = roundf(vgetq_lane_f32(v, 2)); res[3] = roundf(vgetq_lane_f32(v, 3)); return res; } inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { uint8x8_t res; res[0] = a[0]; res[1] = b[0]; res[2] = a[1]; res[3] = b[1]; res[4] = a[2]; res[5] = b[2]; res[6] = a[3]; res[7] = b[3]; return res; } inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { uint8x8_t res; res[0] = a[4]; res[1] = b[4]; res[2] = a[5]; res[3] = b[5]; res[4] = a[6]; res[5] = b[6]; res[6] = a[7]; res[7] = b[7]; return res; } // vld1q_s16_x2 // vld1q_u8_x2 // vld1q_u8_x4 // vld1q_s8_x2 // vld1q_s8_x4 // TODO: double-check these work correctly typedef struct ggml_int16x8x2_t { int16x8_t val[2]; } ggml_int16x8x2_t; inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { ggml_int16x8x2_t res; res.val[0] = vld1q_s16(ptr + 0); res.val[1] = vld1q_s16(ptr + 8); return res; } typedef struct ggml_uint8x16x2_t { uint8x16_t val[2]; } ggml_uint8x16x2_t; inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { ggml_uint8x16x2_t res; res.val[0] = vld1q_u8(ptr + 0); res.val[1] = vld1q_u8(ptr + 16); return res; } typedef struct ggml_uint8x16x4_t { uint8x16_t val[4]; } ggml_uint8x16x4_t; inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { ggml_uint8x16x4_t res; res.val[0] = vld1q_u8(ptr + 0); res.val[1] = vld1q_u8(ptr + 16); res.val[2] = vld1q_u8(ptr + 32); res.val[3] = vld1q_u8(ptr + 48); return res; } typedef struct ggml_int8x16x2_t { int8x16_t val[2]; } ggml_int8x16x2_t; inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { ggml_int8x16x2_t res; res.val[0] = vld1q_s8(ptr + 0); res.val[1] = vld1q_s8(ptr + 16); return res; } typedef struct ggml_int8x16x4_t { int8x16_t val[4]; } ggml_int8x16x4_t; inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { ggml_int8x16x4_t res; res.val[0] = vld1q_s8(ptr + 0); res.val[1] = vld1q_s8(ptr + 16); res.val[2] = vld1q_s8(ptr + 32); res.val[3] = vld1q_s8(ptr + 48); return res; } // NOTE: not tested inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) { int8x16_t res; res[ 0] = a[b[ 0]]; res[ 1] = a[b[ 1]]; res[ 2] = a[b[ 2]]; res[ 3] = a[b[ 3]]; res[ 4] = a[b[ 4]]; res[ 5] = a[b[ 5]]; res[ 6] = a[b[ 6]]; res[ 7] = a[b[ 7]]; res[ 8] = a[b[ 8]]; res[ 9] = a[b[ 9]]; res[10] = a[b[10]]; res[11] = a[b[11]]; res[12] = a[b[12]]; res[13] = a[b[13]]; res[14] = a[b[14]]; res[15] = a[b[15]]; return res; } // NOTE: not tested inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { uint8x16_t res; res[ 0] = a[b[ 0]]; res[ 1] = a[b[ 1]]; res[ 2] = a[b[ 2]]; res[ 3] = a[b[ 3]]; res[ 4] = a[b[ 4]]; res[ 5] = a[b[ 5]]; res[ 6] = a[b[ 6]]; res[ 7] = a[b[ 7]]; res[ 8] = a[b[ 8]]; res[ 9] = a[b[ 9]]; res[10] = a[b[10]]; res[11] = a[b[11]]; res[12] = a[b[12]]; res[13] = a[b[13]]; res[14] = a[b[14]]; res[15] = a[b[15]]; return res; } #else #define ggml_int16x8x2_t int16x8x2_t #define ggml_uint8x16x2_t uint8x16x2_t #define ggml_uint8x16x4_t uint8x16x4_t #define ggml_int8x16x2_t int8x16x2_t #define ggml_int8x16x4_t int8x16x4_t #define ggml_vld1q_s16_x2 vld1q_s16_x2 #define ggml_vld1q_u8_x2 vld1q_u8_x2 #define ggml_vld1q_u8_x4 vld1q_u8_x4 #define ggml_vld1q_s8_x2 vld1q_s8_x2 #define ggml_vld1q_s8_x4 vld1q_s8_x4 #define ggml_vqtbl1q_s8 vqtbl1q_s8 #define ggml_vqtbl1q_u8 vqtbl1q_u8 #endif // !defined(__aarch64__) #if !defined(__ARM_FEATURE_DOTPROD) inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); } #else #define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c) #endif // !defined(__ARM_FEATURE_DOTPROD) #endif // defined(__ARM_NEON) #ifdef __wasm_simd128__ #include #endif #ifdef __POWER9_VECTOR__ #include #endif #if defined(_MSC_VER) || defined(__MINGW32__) #include #elif defined(__SSE__) || defined(__SSE3__) || defined(__SSSE3__) || defined(__AVX__) || defined(__F16C__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX512BF16__) #include #endif #ifdef __riscv_v_intrinsic #include #endif #if defined(__loongarch64) #if defined(__loongarch_asx) #include #endif #if defined(__loongarch_sx) #include #endif #endif #if defined(__VXE__) || defined(__VXE2__) #include #define vec_neg(a) (-(a)) // Vector Negate #define vec_add(a, b) ((a) + (b)) // Vector Add #define vec_sub(a, b) ((a) - (b)) // Vector Subtract #define vec_mul(a, b) ((a) * (b)) // Vector Multiply #define vec_div(a, b) ((a) / (b)) // Vector Divide #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left #define vec_sra(a, b) ((a) >> (b)) // Vector Shift Right #define vec_sr(a, b) ((a) >> (b)) // Vector Shift Right Algebraic #define vec_slo(a, b) vec_slb(a, (b) << 64) // Vector Shift Left by Octet #define vec_sro(a, b) vec_srb(a, (b) << 64) // Vector Shift Right by Octet #ifndef vec_and #define vec_and(a, b) ((a) & (b)) // Vector AND #endif #ifndef vec_or #define vec_or(a, b) ((a) | (b)) // Vector OR #endif #ifndef vec_xor #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR #endif typedef signed char char8x16_t __attribute__((vector_size(16))); typedef unsigned char uchar8x16_t __attribute__((vector_size(16))); typedef int8_t int8x16_t __attribute__((vector_size(16))); typedef int16_t int16x8_t __attribute__((vector_size(16))); typedef int32_t int32x4_t __attribute__((vector_size(16))); typedef uint8_t uint8x16_t __attribute__((vector_size(16))); typedef uint16_t uint16x8_t __attribute__((vector_size(16))); typedef uint32_t uint32x4_t __attribute__((vector_size(16))); typedef float float32x4_t __attribute__((vector_size(16))); typedef double double64x2_t __attribute__((vector_size(16))); typedef signed long long long64x2_t __attribute__((vector_size(16))); typedef unsigned long long ulong64x2_t __attribute__((vector_size(16))); typedef struct ggml_uint8x16x2_t { uint8x16_t val[2]; } ggml_uint8x16x2_t; inline static ggml_uint8x16x2_t ggml_vec_xl_u8x2(const uint8_t * ptr) { ggml_uint8x16x2_t res; res.val[0] = vec_xl( 0, ptr); res.val[1] = vec_xl(16, ptr); return res; } typedef struct ggml_uint8x16x4_t { uint8x16_t val[4]; } ggml_uint8x16x4_t; inline static ggml_uint8x16x4_t ggml_vec_xl_u8x4(const uint8_t * ptr) { ggml_uint8x16x4_t res; res.val[0] = vec_xl( 0, ptr); res.val[1] = vec_xl(16, ptr); res.val[2] = vec_xl(32, ptr); res.val[3] = vec_xl(48, ptr); return res; } typedef struct ggml_int8x16x4_t { int8x16_t val[4]; } ggml_int8x16x4_t; inline static ggml_int8x16x4_t ggml_vec_xl_s8x4(const int8_t * ptr) { ggml_int8x16x4_t res; res.val[0] = vec_xl( 0, ptr); res.val[1] = vec_xl(16, ptr); res.val[2] = vec_xl(32, ptr); res.val[3] = vec_xl(48, ptr); return res; } typedef struct ggml_int16x8x2_t { int16x8_t val[2]; } ggml_int16x8x2_t; inline static ggml_int16x8x2_t ggml_vec_xl_s16x2(const int16_t * ptr) { ggml_int16x8x2_t res; res.val[0] = vec_xl( 0, ptr); res.val[1] = vec_xl(16, ptr); return res; } /* ! WARNING: Very slow. Use vec_perm if possible. Refer to iq4_xs ! or iq4_nl for example implementation. */ inline static int8x16_t ggml_vec_tbl(int8x16_t a, uint8x16_t b) { int8x16_t res; res[ 0] = a[b[ 0]]; res[ 1] = a[b[ 1]]; res[ 2] = a[b[ 2]]; res[ 3] = a[b[ 3]]; res[ 4] = a[b[ 4]]; res[ 5] = a[b[ 5]]; res[ 6] = a[b[ 6]]; res[ 7] = a[b[ 7]]; res[ 8] = a[b[ 8]]; res[ 9] = a[b[ 9]]; res[10] = a[b[10]]; res[11] = a[b[11]]; res[12] = a[b[12]]; res[13] = a[b[13]]; res[14] = a[b[14]]; res[15] = a[b[15]]; return res; } inline static int16x8_t vec_padd_s16(int16x8_t a, int16x8_t b) { const uchar8x16_t v_maske = { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; const int16x8_t v_abo = vec_pack((int32x4_t)a, (int32x4_t)b); const int16x8_t v_abe = vec_perm(a, b, v_maske); return v_abo + v_abe; } /** * @see https://github.com/ggml-org/llama.cpp/pull/14037 */ inline static float vec_hsum_f32x4(float32x4_t v) { float32x4_t v_temp = v + vec_reve(v); return v_temp[0] + v_temp[1]; } inline static int32_t vec_hsum_i32x4(int32x4_t v) { int32x4_t v_temp = v + vec_reve(v); return v_temp[0] + v_temp[1]; } inline static int32x4_t ggml_vec_dot(int32x4_t acc, int8x16_t a, int8x16_t b) { const int16x8_t p = vec_mule(a, b) + vec_mulo(a, b); return acc + (vec_unpackh(p) + vec_unpackl(p)); } #endif #if defined(__loongarch_sx) /* float type data load instructions */ static __m128 __lsx_vreplfr2vr_s(const float val) { v4f32 res = {val, val, val, val}; return (__m128)res; } #endif #if defined(__loongarch_asx) static __m256 __lasx_xvreplfr2vr_s(const float val) { v8f32 res = {val, val, val, val, val, val, val, val}; return (__m256)res; } #endif // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value); int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/ggml-cpu.c000066400000000000000000003576001512524704700205600ustar00rootroot00000000000000#define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows #define _USE_MATH_DEFINES // For M_PI on MSVC #include "ggml-backend-impl.h" #include "ggml-backend.h" #include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" #include "quants.h" #include "ggml-threading.h" #include "unary-ops.h" #include "binary-ops.h" #include "vec.h" #include "ops.h" #include "ggml.h" #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__gnu_linux__) #include #endif #ifdef GGML_USE_OPENMP #include #endif #if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8) #undef GGML_USE_LLAMAFILE #endif #ifdef GGML_USE_LLAMAFILE #include "llamafile/sgemm.h" #endif // Note: once we move threading into a separate C++ file // will use std::hardware_destructive_interference_size instead of hardcoding it here // and we'll use C++ attribute syntax. #define GGML_CACHE_LINE 64 #if defined(__clang__) || defined(__GNUC__) #define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE))) #endif #if defined(__has_feature) #if __has_feature(thread_sanitizer) #define GGML_TSAN_ENABLED 1 #endif #else // __has_feature #if defined(__SANITIZE_THREAD__) #define GGML_TSAN_ENABLED 1 #endif #endif // __has_feature #define UNUSED GGML_UNUSED #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0) // precomputed f32 table for f16 (256 KB) (simd-mappings.h) float ggml_table_f32_f16[1 << 16]; #if defined(__ARM_ARCH) struct ggml_arm_arch_features_type { int sve_cnt; } ggml_arm_arch_features = { 0 }; #endif #if defined(__riscv) struct ggml_riscv_arch_features_type { int rvv_vlen; } ggml_riscv_arch_features = { 0 }; #endif #if defined(_WIN32) #define WIN32_LEAN_AND_MEAN #ifndef NOMINMAX #define NOMINMAX #endif #include #if defined(_MSC_VER) && !defined(__clang__) #define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE)) typedef volatile LONG atomic_int; typedef atomic_int atomic_bool; typedef atomic_int atomic_flag; #define ATOMIC_FLAG_INIT 0 typedef enum { memory_order_relaxed, memory_order_consume, memory_order_acquire, memory_order_release, memory_order_acq_rel, memory_order_seq_cst } memory_order; static void atomic_store(atomic_int * ptr, LONG val) { InterlockedExchange(ptr, val); } static void atomic_store_explicit(atomic_int * ptr, LONG val, memory_order mo) { // TODO: add support for explicit memory order InterlockedExchange(ptr, val); } static LONG atomic_load(atomic_int * ptr) { return InterlockedCompareExchange(ptr, 0, 0); } static LONG atomic_load_explicit(atomic_int * ptr, memory_order mo) { // TODO: add support for explicit memory order return InterlockedCompareExchange(ptr, 0, 0); } static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) { return InterlockedExchangeAdd(ptr, inc); } static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, memory_order mo) { // TODO: add support for explicit memory order return InterlockedExchangeAdd(ptr, inc); } static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) { return InterlockedExchange(ptr, 1); } static void atomic_flag_clear(atomic_flag * ptr) { InterlockedExchange(ptr, 0); } static void atomic_thread_fence(memory_order mo) { MemoryBarrier(); } #else // clang #include #endif typedef HANDLE pthread_t; typedef DWORD thread_ret_t; static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) { (void) unused; HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL); if (handle == NULL) { return EAGAIN; } *out = handle; return 0; } static int pthread_join(pthread_t thread, void * unused) { (void) unused; int ret = (int) WaitForSingleObject(thread, INFINITE); CloseHandle(thread); return ret; } static int sched_yield (void) { Sleep (0); return 0; } #else #include #include #include #if defined(__FreeBSD__) #include #endif typedef void * thread_ret_t; #include #include #include #endif typedef pthread_t ggml_thread_t; #define GGML_THREADPOOL_N_THREADS_MASK (0xffffU) #define GGML_THREADPOOL_N_THREADS_BITS (16) #if defined(__APPLE__) #include #include #include #endif static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { [GGML_TYPE_F32] = { .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, .vec_dot_type = GGML_TYPE_F32, .nrows = 1, }, [GGML_TYPE_F16] = { .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp16, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, .vec_dot_type = GGML_TYPE_F16, .nrows = 1, }, [GGML_TYPE_Q4_0] = { .from_float = quantize_row_q4_0, .vec_dot = ggml_vec_dot_q4_0_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, #if defined (__ARM_FEATURE_MATMUL_INT8) .nrows = 2, #else .nrows = 1, #endif }, [GGML_TYPE_Q4_1] = { .from_float = quantize_row_q4_1, .vec_dot = ggml_vec_dot_q4_1_q8_1, .vec_dot_type = GGML_TYPE_Q8_1, #if defined (__ARM_FEATURE_MATMUL_INT8) .nrows = 2, #else .nrows = 1, #endif }, [GGML_TYPE_Q5_0] = { .from_float = quantize_row_q5_0, .vec_dot = ggml_vec_dot_q5_0_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, .nrows = 1, }, [GGML_TYPE_Q5_1] = { .from_float = quantize_row_q5_1, .vec_dot = ggml_vec_dot_q5_1_q8_1, .vec_dot_type = GGML_TYPE_Q8_1, .nrows = 1, }, [GGML_TYPE_Q8_0] = { .from_float = quantize_row_q8_0, .vec_dot = ggml_vec_dot_q8_0_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, #if defined (__ARM_FEATURE_MATMUL_INT8) .nrows = 2, #else .nrows = 1, #endif }, [GGML_TYPE_Q8_1] = { .from_float = quantize_row_q8_1, .vec_dot_type = GGML_TYPE_Q8_1, .nrows = 1, }, [GGML_TYPE_MXFP4] = { .from_float = quantize_row_mxfp4, .vec_dot = ggml_vec_dot_mxfp4_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, .nrows = 1, }, [GGML_TYPE_Q2_K] = { .from_float = quantize_row_q2_K, .vec_dot = ggml_vec_dot_q2_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_Q3_K] = { .from_float = quantize_row_q3_K, .vec_dot = ggml_vec_dot_q3_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_Q4_K] = { .from_float = quantize_row_q4_K, .vec_dot = ggml_vec_dot_q4_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, #if defined (__ARM_FEATURE_MATMUL_INT8) .nrows = 2, #else .nrows = 1, #endif }, [GGML_TYPE_Q5_K] = { .from_float = quantize_row_q5_K, .vec_dot = ggml_vec_dot_q5_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_Q6_K] = { .from_float = quantize_row_q6_K, .vec_dot = ggml_vec_dot_q6_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, #if defined (__ARM_FEATURE_MATMUL_INT8) .nrows = 2, #else .nrows = 1, #endif }, [GGML_TYPE_IQ2_XXS] = { .from_float = NULL, .vec_dot = ggml_vec_dot_iq2_xxs_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ2_XS] = { .from_float = NULL, .vec_dot = ggml_vec_dot_iq2_xs_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ3_XXS] = { // NOTE: from_float for iq3 and iq2_s was removed because these quants require initialization in ggml_quantize_init //.from_float = quantize_row_iq3_xxs, .vec_dot = ggml_vec_dot_iq3_xxs_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ3_S] = { //.from_float = quantize_row_iq3_s, .vec_dot = ggml_vec_dot_iq3_s_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ2_S] = { //.from_float = quantize_row_iq2_s, .vec_dot = ggml_vec_dot_iq2_s_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ1_S] = { .from_float = NULL, .vec_dot = ggml_vec_dot_iq1_s_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ1_M] = { .from_float = NULL, .vec_dot = ggml_vec_dot_iq1_m_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_IQ4_NL] = { .from_float = quantize_row_iq4_nl, .vec_dot = ggml_vec_dot_iq4_nl_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, .nrows = 1, }, [GGML_TYPE_IQ4_XS] = { .from_float = quantize_row_iq4_xs, .vec_dot = ggml_vec_dot_iq4_xs_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_Q8_K] = { .from_float = quantize_row_q8_K, }, [GGML_TYPE_BF16] = { .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_bf16, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16, .vec_dot_type = GGML_TYPE_BF16, .nrows = 1, }, [GGML_TYPE_TQ1_0] = { .from_float = quantize_row_tq1_0, .vec_dot = ggml_vec_dot_tq1_0_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_TQ2_0] = { .from_float = quantize_row_tq2_0, .vec_dot = ggml_vec_dot_tq2_0_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, [GGML_TYPE_I32] = { .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_i32, }, }; const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) { return &type_traits_cpu[type]; } // // Threading defs // typedef pthread_t ggml_thread_t; #if defined(_WIN32) typedef CONDITION_VARIABLE ggml_cond_t; typedef SRWLOCK ggml_mutex_t; #define ggml_mutex_init(m) InitializeSRWLock(m) #define ggml_mutex_destroy(m) #define ggml_mutex_lock(m) AcquireSRWLockExclusive(m) #define ggml_mutex_unlock(m) ReleaseSRWLockExclusive(m) #define ggml_mutex_lock_shared(m) AcquireSRWLockShared(m) #define ggml_mutex_unlock_shared(m) ReleaseSRWLockShared(m) #define ggml_cond_init(c) InitializeConditionVariable(c) #define ggml_cond_destroy(c) #define ggml_cond_wait(c, m) SleepConditionVariableSRW(c, m, INFINITE, CONDITION_VARIABLE_LOCKMODE_SHARED) #define ggml_cond_broadcast(c) WakeAllConditionVariable(c) #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join #else typedef pthread_cond_t ggml_cond_t; typedef pthread_mutex_t ggml_mutex_t; #define ggml_mutex_init(m) pthread_mutex_init(m, NULL) #define ggml_mutex_destroy(m) pthread_mutex_destroy(m) #define ggml_mutex_lock(m) pthread_mutex_lock(m) #define ggml_mutex_unlock(m) pthread_mutex_unlock(m) #define ggml_mutex_lock_shared(m) pthread_mutex_lock(m) #define ggml_mutex_unlock_shared(m) pthread_mutex_unlock(m) #define ggml_lock_init(x) UNUSED(x) #define ggml_lock_destroy(x) UNUSED(x) #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) #define ggml_lock_lock(x) _mm_pause() #else #define ggml_lock_lock(x) UNUSED(x) #endif #define ggml_lock_unlock(x) UNUSED(x) #define GGML_LOCK_INITIALIZER 0 #define ggml_cond_init(c) pthread_cond_init(c, NULL) #define ggml_cond_destroy(c) pthread_cond_destroy(c) #define ggml_cond_wait(c, m) pthread_cond_wait(c, m) #define ggml_cond_broadcast(c) pthread_cond_broadcast(c) #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join #endif // Threadpool def struct ggml_threadpool { ggml_mutex_t mutex; // mutex for cond.var ggml_cond_t cond; // cond.var for waiting for new work struct ggml_cgraph * cgraph; struct ggml_cplan * cplan; // synchronization primitives atomic_int n_graph; // updated when there is work to be done (i.e each graph) holds graph and active thread counts. atomic_int GGML_CACHE_ALIGN n_barrier; atomic_int GGML_CACHE_ALIGN n_barrier_passed; atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. // these are atomic as an annotation for thread-sanitizer atomic_bool stop; // Used for stopping the threadpool altogether atomic_bool pause; // Used for pausing the threadpool or individual threads atomic_int abort; // Used for aborting processing of a graph struct ggml_compute_state * workers; // per thread state int n_threads; // Number of threads in the pool int32_t prio; // Scheduling priority uint32_t poll; // Polling level (0 - no polling) enum ggml_status ec; }; // Per-thread state struct ggml_compute_state { #ifndef GGML_USE_OPENMP ggml_thread_t thrd; int last_graph; bool pending; #endif bool cpumask[GGML_MAX_N_THREADS]; struct ggml_threadpool * threadpool; int ith; }; // Helpers for polling loops #if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) ) static inline void ggml_thread_cpu_relax(void) { __asm__ volatile("yield" ::: "memory"); } #elif defined(__x86_64__) static inline void ggml_thread_cpu_relax(void) { _mm_pause(); } #elif defined(__riscv) static inline void ggml_thread_cpu_relax(void) { #ifdef __riscv_zihintpause __asm__ __volatile__ ("pause"); #else /* Encoding of the pause instruction */ __asm__ __volatile__ (".4byte 0x100000F"); #endif } #else static inline void ggml_thread_cpu_relax(void) {;} #endif // // NUMA support // #define GGML_NUMA_MAX_NODES 8 #define GGML_NUMA_MAX_CPUS 512 struct ggml_numa_node { uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node uint32_t n_cpus; }; struct ggml_numa_nodes { enum ggml_numa_strategy numa_strategy; struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system uint32_t current_node; // node on which main process is execting #if defined(__gnu_linux__) cpu_set_t cpuset; // cpuset from numactl #else uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype #endif }; // // ggml state // struct ggml_state { struct ggml_numa_nodes numa; }; static struct ggml_state g_state = {0}; void ggml_barrier(struct ggml_threadpool * tp) { int n_threads = atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK; if (n_threads == 1) { return; } #ifdef GGML_USE_OPENMP #pragma omp barrier #else int n_passed = atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed); // enter barrier (full seq-cst fence) int n_barrier = atomic_fetch_add_explicit(&tp->n_barrier, 1, memory_order_seq_cst); if (n_barrier == (n_threads - 1)) { // last thread atomic_store_explicit(&tp->n_barrier, 0, memory_order_relaxed); // exit barrier (full seq-cst fence) atomic_fetch_add_explicit(&tp->n_barrier_passed, 1, memory_order_seq_cst); return; } // wait for other threads while (atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed) == n_passed) { ggml_thread_cpu_relax(); } // exit barrier (full seq-cst fence) // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead #ifdef GGML_TSAN_ENABLED atomic_fetch_add_explicit(&tp->n_barrier_passed, 0, memory_order_seq_cst); #else atomic_thread_fence(memory_order_seq_cst); #endif #endif } void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); } int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); } #if defined(__gnu_linux__) static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; pthread_t thread; thread = pthread_self(); CPU_ZERO(&cpuset); pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); return cpuset; } #else static uint32_t ggml_get_numa_affinity(void) { return 0; // no NUMA support } #endif void ggml_numa_init(enum ggml_numa_strategy numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); return; } #if defined(__gnu_linux__) struct stat st; char path[256]; int rv; // set numa scheme g_state.numa.numa_strategy = numa_flag; GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy); g_state.numa.cpuset = ggml_get_numa_affinity(); // enumerate nodes while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) != 0) { break; } ++g_state.numa.n_nodes; } // enumerate CPUs while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) { rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus); GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) != 0) { break; } ++g_state.numa.total_cpus; } GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); // figure out which node we're on uint current_cpu; int getcpu_ret = 0; #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 33) || defined(__COSMOPOLITAN__) getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); #else // old glibc doesn't have a wrapper for this call. Fall back on direct syscall # if !defined(SYS_getcpu) && defined(SYS_get_cpu) # define SYS_getcpu SYS_get_cpu // some older glibc versions use this name # endif getcpu_ret = syscall(SYS_getcpu, ¤t_cpu, &g_state.numa.current_node); #endif if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) { g_state.numa.n_nodes = 0; return; } GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu); for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { struct ggml_numa_node * node = &g_state.numa.nodes[n]; GGML_PRINT_DEBUG("CPUs on node %u:", n); node->n_cpus = 0; for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c); GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) == 0) { node->cpus[node->n_cpus++] = c; GGML_PRINT_DEBUG(" %u", c); } } GGML_PRINT_DEBUG("\n"); } if (ggml_is_numa()) { FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r"); if (fptr != NULL) { char buf[42]; if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) { GGML_LOG_WARN("/proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n"); } fclose(fptr); } } #else UNUSED(numa_flag); // TODO #endif } bool ggml_is_numa(void) { return g_state.numa.n_nodes > 1; } #if defined(__ARM_ARCH) #if defined(__aarch64__) && defined(__ARM_FEATURE_SVE) #include static void ggml_init_arm_arch_features(void) { ggml_arm_arch_features.sve_cnt = svcntb(); } #else static void ggml_init_arm_arch_features(void) {} #endif #endif // __ARM_ARCH #if defined(__riscv) && defined(__riscv_v_intrinsic) #include static void ggml_init_riscv_arch_features(void) { ggml_riscv_arch_features.rvv_vlen = __riscv_vlenb(); } #else static void ggml_init_riscv_arch_features(void) {} #endif struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { GGML_ASSERT(!ggml_get_no_alloc(ctx)); struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); ggml_set_i32(result, value); return result; } struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { GGML_ASSERT(!ggml_get_no_alloc(ctx)); struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); ggml_set_f32(result, value); return result; } struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { const int n = ggml_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; case GGML_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; case GGML_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; case GGML_TYPE_F16: { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); } } break; case GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { ggml_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; default: { GGML_ABORT("fatal error"); } } return tensor; } struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { const int n = ggml_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; case GGML_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; case GGML_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; case GGML_TYPE_F16: { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: { assert(tensor->nb[0] == sizeof(ggml_bf16_t)); for (int i = 0; i < n; i++) { ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); } } break; case GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { ggml_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; default: { GGML_ABORT("fatal error"); } } return tensor; } int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { if (!ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]); } switch (tensor->type) { case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); return ((int8_t *)(tensor->data))[i]; } case GGML_TYPE_I16: { GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); return ((int16_t *)(tensor->data))[i]; } case GGML_TYPE_I32: { GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); return ((int32_t *)(tensor->data))[i]; } case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); } case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); return ((float *)(tensor->data))[i]; } default: { GGML_ABORT("fatal error"); } } } void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { if (!ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value); return; } switch (tensor->type) { case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); ((int8_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_I16: { GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); ((int16_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_I32: { GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); ((int32_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); } break; case GGML_TYPE_F32: { GGML_ASSERT(tensor->nb[0] == sizeof(float)); ((float *)(tensor->data))[i] = value; } break; default: { GGML_ABORT("fatal error"); } } } int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { case GGML_TYPE_I8: return ((int8_t *) data)[0]; case GGML_TYPE_I16: return ((int16_t *) data)[0]; case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: return ((float *) data)[0]; default: GGML_ABORT("fatal error"); } } void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { case GGML_TYPE_I8: { ((int8_t *)(data))[0] = value; } break; case GGML_TYPE_I16: { ((int16_t *)(data))[0] = value; } break; case GGML_TYPE_I32: { ((int32_t *)(data))[0] = value; } break; case GGML_TYPE_F16: { ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); } break; case GGML_TYPE_F32: { ((float *)(data))[0] = value; } break; default: { GGML_ABORT("fatal error"); } } } float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { if (!ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]); } switch (tensor->type) { case GGML_TYPE_I8: { return ((int8_t *)(tensor->data))[i]; } case GGML_TYPE_I16: { return ((int16_t *)(tensor->data))[i]; } case GGML_TYPE_I32: { return ((int32_t *)(tensor->data))[i]; } case GGML_TYPE_F16: { return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); } case GGML_TYPE_F32: { return ((float *)(tensor->data))[i]; } default: { GGML_ABORT("fatal error"); } } } void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { if (!ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); return; } switch (tensor->type) { case GGML_TYPE_I8: { ((int8_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_I16: { ((int16_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_I32: { ((int32_t *)(tensor->data))[i] = value; } break; case GGML_TYPE_F16: { ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); } break; case GGML_TYPE_F32: { ((float *)(tensor->data))[i] = value; } break; default: { GGML_ABORT("fatal error"); } } } float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { case GGML_TYPE_I8: return ((int8_t *) data)[0]; case GGML_TYPE_I16: return ((int16_t *) data)[0]; case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: return ((float *) data)[0]; default: GGML_ABORT("fatal error"); } } void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { case GGML_TYPE_I8: { ((int8_t *)(data))[0] = value; } break; case GGML_TYPE_I16: { ((int16_t *)(data))[0] = value; } break; case GGML_TYPE_I32: { ((int32_t *)(data))[0] = value; } break; case GGML_TYPE_F16: { ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); } break; case GGML_TYPE_F32: { ((float *)(data))[0] = value; } break; default: { GGML_ABORT("fatal error"); } } } //////////////////////////////////////////////////////////////////////////////// // ggml_compute_forward_mul_mat static void ggml_compute_forward_mul_mat_one_chunk( const struct ggml_compute_params * params, struct ggml_tensor * dst, const enum ggml_type type, const int64_t num_rows_per_vec_dot, const int64_t ir0_start, const int64_t ir0_end, const int64_t ir1_start, const int64_t ir1_end) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const bool src1_cont = ggml_is_contiguous(src1); ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; // broadcast factors const int64_t r2 = ne12 / ne02; const int64_t r3 = ne13 / ne03; //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end); // threads with no work simply yield (not sure if it helps) if (ir0_start >= ir0_end || ir1_start >= ir1_end) { return; } const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); assert(ne12 % ne02 == 0); assert(ne13 % ne03 == 0); // block-tiling attempt const int64_t blck_0 = 16; const int64_t blck_1 = 16; const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; // attempt to reduce false-sharing (does not seem to make a difference) // 16 * 2, accounting for mmla kernels float tmp[32]; for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { const int64_t i13 = (ir1 / (ne12 * ne1)); const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); // broadcast src0 into src1 const int64_t i03 = i13 / r3; const int64_t i02 = i12 / r2; const int64_t i1 = i11; const int64_t i2 = i12; const int64_t i3 = i13; const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using // the original src1 data pointer, so we should index using the indices directly // TODO: this is a bit of a hack, we should probably have a better way to handle this const char * src1_col = (const char*)wdata + (src1_cont || src1->type != vec_dot_type ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size : (i11 * nb11 + i12 * nb12 + i13 * nb13)); float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); //} for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); } for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); } } } } } void ggml_compute_forward_mul_mat( const struct ggml_compute_params * params, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows; GGML_ASSERT(ne0 == ne01); GGML_ASSERT(ne1 == ne11); GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(src0->type)); GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows // TODO: extract to "extra_op" #if GGML_USE_LLAMAFILE // broadcast factors const int64_t r2 = ne12 / ne02; const int64_t r3 = ne13 / ne03; const bool src1_cont = ggml_is_contiguous(src1); if (src1_cont) { for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) if (!llamafile_sgemm(params, ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type), (const char *)src1->data + i12*nb12 + i13*nb13, nb11/ggml_type_size(src1->type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), src0->type, src1->type, dst->type)) goto UseGgmlGemm1; return; } UseGgmlGemm1:; #endif if (src1->type != vec_dot_type) { char * wdata = params->wdata; const size_t nbw0 = ggml_type_size(vec_dot_type); const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); const size_t nbw2 = nbw1*ne11; const size_t nbw3 = nbw2*ne12; assert(params->wsize >= ne13*nbw3); GGML_ASSERT(src1->type == GGML_TYPE_F32); #if 0 for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = ith; i11 < ne11; i11 += nth) { from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), ne10); } } } #else for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = 0; i11 < ne11; ++i11) { size_t bs = ggml_blck_size(vec_dot_type); int64_t ne10_block_start = (ith * ne10/bs) / nth; int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), (ne10_block_end - ne10_block_start) * bs); } } } #endif } if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); } ggml_barrier(params->threadpool); #if GGML_USE_LLAMAFILE if (src1->type != vec_dot_type) { const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) if (!llamafile_sgemm(params, ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, nb01/ggml_type_size(src0->type), (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size/ggml_type_size(vec_dot_type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), src0->type, vec_dot_type, dst->type)) goto UseGgmlGemm2; return; } UseGgmlGemm2:; #endif // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) const int64_t nr0 = ne0; // This is the size of the rest of the dimensions of the result const int64_t nr1 = ne1 * ne2 * ne3; // Now select a reasonable chunk size. int chunk_size = 16; // We need to step up the size if it's small if (nr0 == 1 || nr1 == 1) { chunk_size = 64; } // distribute the work across the inner or outer loop based on which one is larger // The number of chunks in the 0/1 dim. // CEIL(nr0/chunk_size) int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread. // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggml-org/llama.cpp/pull/6915 // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that. if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { // distribute the thread work across the inner or outer loop based on which one is larger nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows } // The number of elements in each chunk const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; // The first chunk comes from our thread_id, the rest will get auto-assigned. int current_chunk = ith; while (current_chunk < nchunk0 * nchunk1) { const int64_t ith0 = current_chunk % nchunk0; const int64_t ith1 = current_chunk / nchunk0; const int64_t ir0_start = dr0 * ith0; const int64_t ir0_end = MIN(ir0_start + dr0, nr0); const int64_t ir1_start = dr1 * ith1; const int64_t ir1_end = MIN(ir1_start + dr1, nr1); // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols int64_t num_rows_per_vec_dot = vec_dot_num_rows; // these checks are needed to avoid crossing dim1 boundaries // can be optimized, but the logic would become more complicated, so keeping it like this for simplicity if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) { num_rows_per_vec_dot = 1; } ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); if (nth >= nchunk0 * nchunk1) { break; } current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed); } } // ggml_compute_forward_mul_mat_id #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)] struct mmid_row_mapping { int32_t i1; int32_t i2; }; static void ggml_compute_forward_mul_mat_id_one_chunk( struct ggml_tensor * dst, const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * ids, const int64_t cur_a, const int64_t ir0_start, const int64_t ir0_end, const int64_t ir1_start, const int64_t ir1_end, const char * src0_cur, const struct mmid_row_mapping * matrix_rows, const size_t row_size, const bool src1_cont, const void * wdata) { GGML_TENSOR_BINARY_OP_LOCALS const enum ggml_type type = src0->type; ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; const int64_t blck_0 = 16; const int64_t blck_1 = 16; float tmp[16]; for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) { const int64_t _i12 = ir1; // logical row index for this expert struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12); const int id = row_mapping.i1; // selected expert index const int64_t i11 = id % ne11; const int64_t i12 = row_mapping.i2; // row index in src1 const int64_t i1 = id; // selected expert index const int64_t i2 = i12; // row // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using // the original src1 data pointer, so we should index using the indices directly // TODO: this is a bit of a hack, we should probably have a better way to handle this const char * src1_col = (const char *) wdata + (src1_cont || src1->type != vec_dot_type ? (i11 + i12*ne11)*row_size : (i11*nb11 + i12*nb12)); float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2)); for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1); } memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float)); } } } } static void * incr_ptr_aligned(void ** p, size_t size, size_t align) { void * ptr = *p; ptr = (void *) GGML_PAD((uintptr_t) ptr, align); *p = (void *) ((char *) ptr + size); return ptr; } static void ggml_compute_forward_mul_mat_id( const struct ggml_compute_params * params, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; const struct ggml_tensor * ids = dst->src[2]; GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const enum ggml_type type = src0->type; const bool src1_cont = ggml_is_contiguous(src1); enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(type)); GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // row groups const int n_ids = ids->ne[0]; // n_expert_used const int n_as = ne02; // n_expert void * wdata_cur = params->wdata; if (src1->type != vec_dot_type) { incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); } int64_t * matrix_row_counts = // [n_as] incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t)); struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]] incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t)); char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as] incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE); GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata)); if (src1->type != vec_dot_type) { char * wdata = params->wdata; const size_t nbw0 = ggml_type_size(vec_dot_type); const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); const size_t nbw2 = nbw1*ne11; const size_t nbw3 = nbw2*ne12; assert(params->wsize >= ne13*nbw3); GGML_ASSERT(src1->type == GGML_TYPE_F32); #if 0 for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = ith; i12 < ne12; i12 += nth) { for (int64_t i11 = 0; i11 < ne11; ++i11) { from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), ne10); } } } #else for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = 0; i11 < ne11; ++i11) { size_t bs = ggml_blck_size(vec_dot_type); int64_t ne10_block_start = (ith * ne10/bs) / nth; int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), (ne10_block_end - ne10_block_start) * bs); } } } #endif } if (ith == 0) { // initialize matrix_row_counts memset(matrix_row_counts, 0, n_as*sizeof(int64_t)); // group rows by src0 matrix for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { for (int id = 0; id < n_ids; ++id) { const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]); assert(i02 >= 0 && i02 < n_as); MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1}; matrix_row_counts[i02] += 1; } } } // reset current_chunk for (int cur_a = ith; cur_a < n_as; cur_a += nth) { atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); *current_chunk_ctr = nth; } ggml_barrier(params->threadpool); for (int cur_a = 0; cur_a < n_as; ++cur_a) { const int64_t cne1 = matrix_row_counts[cur_a]; if (cne1 == 0) { continue; } const char * src0_cur = (const char *) src0->data + cur_a * nb02; const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); const int64_t nr0 = ne01; const int64_t nr1 = cne1; int chunk_size = 16; if (nr0 == 1 || nr1 == 1) { chunk_size = 64; } // disable for NUMA const bool disable_chunking = ggml_is_numa(); int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) { nchunk0 = nr0 > nr1 ? nth : 1; nchunk1 = nr0 > nr1 ? 1 : nth; } const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; int current_chunk = ith; atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); while (current_chunk < nchunk0 * nchunk1) { const int64_t ith0 = current_chunk % nchunk0; const int64_t ith1 = current_chunk / nchunk0; const int64_t ir0_start = dr0 * ith0; const int64_t ir0_end = MIN(ir0_start + dr0, nr0); const int64_t ir1_start = dr1 * ith1; const int64_t ir1_end = MIN(ir1_start + dr1, nr1); ggml_compute_forward_mul_mat_id_one_chunk( dst, src0, src1, ids, cur_a, ir0_start, ir0_end, ir1_start, ir1_end, src0_cur, matrix_rows, row_size, src1_cont, wdata ); if (nth >= nchunk0 * nchunk1) { break; } current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed); } } } ///////////////////////////////// static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { GGML_ASSERT(params); if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) { return; } // extra_buffer op? if (ggml_cpu_extra_compute_forward(params, tensor)) { return; } switch (tensor->op) { case GGML_OP_DUP: { ggml_compute_forward_dup(params, tensor); } break; case GGML_OP_ADD: { ggml_compute_forward_add(params, tensor); } break; case GGML_OP_ADD_ID: { ggml_compute_forward_add_id(params, tensor); } break; case GGML_OP_ADD1: { ggml_compute_forward_add1(params, tensor); } break; case GGML_OP_ACC: { ggml_compute_forward_acc(params, tensor); } break; case GGML_OP_SUB: { ggml_compute_forward_sub(params, tensor); } break; case GGML_OP_MUL: { ggml_compute_forward_mul(params, tensor); } break; case GGML_OP_DIV: { ggml_compute_forward_div(params, tensor); } break; case GGML_OP_SQR: { ggml_compute_forward_sqr(params, tensor); } break; case GGML_OP_SQRT: { ggml_compute_forward_sqrt(params, tensor); } break; case GGML_OP_LOG: { ggml_compute_forward_log(params, tensor); } break; case GGML_OP_SIN: { ggml_compute_forward_sin(params, tensor); } break; case GGML_OP_COS: { ggml_compute_forward_cos(params, tensor); } break; case GGML_OP_SUM: { ggml_compute_forward_sum(params, tensor); } break; case GGML_OP_SUM_ROWS: { ggml_compute_forward_sum_rows(params, tensor); } break; case GGML_OP_CUMSUM: { ggml_compute_forward_cumsum(params, tensor); } break; case GGML_OP_MEAN: { ggml_compute_forward_mean(params, tensor); } break; case GGML_OP_ARGMAX: { ggml_compute_forward_argmax(params, tensor); } break; case GGML_OP_COUNT_EQUAL: { ggml_compute_forward_count_equal(params, tensor); } break; case GGML_OP_REPEAT: { ggml_compute_forward_repeat(params, tensor); } break; case GGML_OP_REPEAT_BACK: { ggml_compute_forward_repeat_back(params, tensor); } break; case GGML_OP_CONCAT: { ggml_compute_forward_concat(params, tensor); } break; case GGML_OP_SILU_BACK: { ggml_compute_forward_silu_back(params, tensor); } break; case GGML_OP_NORM: { ggml_compute_forward_norm(params, tensor); } break; case GGML_OP_RMS_NORM: { ggml_compute_forward_rms_norm(params, tensor); } break; case GGML_OP_RMS_NORM_BACK: { ggml_compute_forward_rms_norm_back(params, tensor); } break; case GGML_OP_GROUP_NORM: { ggml_compute_forward_group_norm(params, tensor); } break; case GGML_OP_L2_NORM: { ggml_compute_forward_l2_norm(params, tensor); } break; case GGML_OP_MUL_MAT: { ggml_compute_forward_mul_mat(params, tensor); } break; case GGML_OP_MUL_MAT_ID: { ggml_compute_forward_mul_mat_id(params, tensor); } break; case GGML_OP_OUT_PROD: { ggml_compute_forward_out_prod(params, tensor); } break; case GGML_OP_SCALE: { ggml_compute_forward_scale(params, tensor); } break; case GGML_OP_SET: { ggml_compute_forward_set(params, tensor); } break; case GGML_OP_CPY: { ggml_compute_forward_cpy(params, tensor); } break; case GGML_OP_CONT: { ggml_compute_forward_cont(params, tensor); } break; case GGML_OP_GET_ROWS: { ggml_compute_forward_get_rows(params, tensor); } break; case GGML_OP_GET_ROWS_BACK: { ggml_compute_forward_get_rows_back(params, tensor); } break; case GGML_OP_SET_ROWS: { ggml_compute_forward_set_rows(params, tensor); } break; case GGML_OP_DIAG: { ggml_compute_forward_diag(params, tensor); } break; case GGML_OP_DIAG_MASK_INF: { ggml_compute_forward_diag_mask_inf(params, tensor); } break; case GGML_OP_DIAG_MASK_ZERO: { ggml_compute_forward_diag_mask_zero(params, tensor); } break; case GGML_OP_SOFT_MAX: { ggml_compute_forward_soft_max(params, tensor); } break; case GGML_OP_SOFT_MAX_BACK: { ggml_compute_forward_soft_max_ext_back(params, tensor); } break; case GGML_OP_ROPE: { ggml_compute_forward_rope(params, tensor); } break; case GGML_OP_ROPE_BACK: { ggml_compute_forward_rope_back(params, tensor); } break; case GGML_OP_CLAMP: { ggml_compute_forward_clamp(params, tensor); } break; case GGML_OP_CONV_TRANSPOSE_1D: { ggml_compute_forward_conv_transpose_1d(params, tensor); } break; case GGML_OP_IM2COL: { ggml_compute_forward_im2col(params, tensor); } break; case GGML_OP_IM2COL_BACK: { ggml_compute_forward_im2col_back_f32(params, tensor); } break; case GGML_OP_IM2COL_3D: { ggml_compute_forward_im2col_3d(params, tensor); } break; case GGML_OP_CONV_2D: { ggml_compute_forward_conv_2d(params, tensor); } break; case GGML_OP_CONV_3D: { ggml_compute_forward_conv_3d(params, tensor); } break; case GGML_OP_CONV_2D_DW: { ggml_compute_forward_conv_2d_dw(params, tensor); } break; case GGML_OP_CONV_TRANSPOSE_2D: { ggml_compute_forward_conv_transpose_2d(params, tensor); } break; case GGML_OP_POOL_1D: { ggml_compute_forward_pool_1d(params, tensor); } break; case GGML_OP_POOL_2D: { ggml_compute_forward_pool_2d(params, tensor); } break; case GGML_OP_POOL_2D_BACK: { ggml_compute_forward_pool_2d_back(params, tensor); } break; case GGML_OP_UPSCALE: { ggml_compute_forward_upscale(params, tensor); } break; case GGML_OP_PAD: { ggml_compute_forward_pad(params, tensor); } break; case GGML_OP_PAD_REFLECT_1D: { ggml_compute_forward_pad_reflect_1d(params, tensor); } break; case GGML_OP_ROLL: { ggml_compute_forward_roll(params, tensor); } break; case GGML_OP_ARANGE: { ggml_compute_forward_arange(params, tensor); } break; case GGML_OP_TIMESTEP_EMBEDDING: { ggml_compute_forward_timestep_embedding(params, tensor); } break; case GGML_OP_ARGSORT: { ggml_compute_forward_argsort(params, tensor); } break; case GGML_OP_TOP_K: { ggml_compute_forward_top_k(params, tensor); } break; case GGML_OP_LEAKY_RELU: { ggml_compute_forward_leaky_relu(params, tensor); } break; case GGML_OP_TRI: { ggml_compute_forward_tri(params, tensor); } break; case GGML_OP_FILL: { ggml_compute_forward_fill(params, tensor); } break; case GGML_OP_FLASH_ATTN_EXT: { ggml_compute_forward_flash_attn_ext(params, tensor); } break; case GGML_OP_FLASH_ATTN_BACK: { int32_t t = ggml_get_op_params_i32(tensor, 0); GGML_ASSERT(t == 0 || t == 1); bool masked = t != 0; ggml_compute_forward_flash_attn_back(params, masked, tensor); } break; case GGML_OP_SSM_CONV: { ggml_compute_forward_ssm_conv(params, tensor); } break; case GGML_OP_SSM_SCAN: { ggml_compute_forward_ssm_scan(params, tensor); } break; case GGML_OP_WIN_PART: { ggml_compute_forward_win_part(params, tensor); } break; case GGML_OP_WIN_UNPART: { ggml_compute_forward_win_unpart(params, tensor); } break; case GGML_OP_UNARY: { ggml_compute_forward_unary(params, tensor); } break; case GGML_OP_GLU: { ggml_compute_forward_glu(params, tensor); } break; case GGML_OP_GET_REL_POS: { ggml_compute_forward_get_rel_pos(params, tensor); } break; case GGML_OP_ADD_REL_POS: { ggml_compute_forward_add_rel_pos(params, tensor); } break; case GGML_OP_RWKV_WKV6: { ggml_compute_forward_rwkv_wkv6(params, tensor); } break; case GGML_OP_GATED_LINEAR_ATTN: { ggml_compute_forward_gla(params, tensor); } break; case GGML_OP_RWKV_WKV7: { ggml_compute_forward_rwkv_wkv7(params, tensor); } break; case GGML_OP_SOLVE_TRI: { ggml_compute_forward_solve_tri(params, tensor); } break; case GGML_OP_MAP_CUSTOM1: { ggml_compute_forward_map_custom1(params, tensor); } break; case GGML_OP_MAP_CUSTOM2: { ggml_compute_forward_map_custom2(params, tensor); } break; case GGML_OP_MAP_CUSTOM3: { ggml_compute_forward_map_custom3(params, tensor); } break; case GGML_OP_CUSTOM: { ggml_compute_forward_custom(params, tensor); } break; case GGML_OP_CROSS_ENTROPY_LOSS: { ggml_compute_forward_cross_entropy_loss(params, tensor); } break; case GGML_OP_CROSS_ENTROPY_LOSS_BACK: { ggml_compute_forward_cross_entropy_loss_back(params, tensor); } break; case GGML_OP_OPT_STEP_ADAMW: { ggml_compute_forward_opt_step_adamw(params, tensor); } break; case GGML_OP_OPT_STEP_SGD: { ggml_compute_forward_opt_step_sgd(params, tensor); } break; case GGML_OP_NONE: { // nop } break; case GGML_OP_RESHAPE: { // nop } break; case GGML_OP_PERMUTE: { // nop } break; case GGML_OP_VIEW: { // nop } break; case GGML_OP_TRANSPOSE: { // nop } break; case GGML_OP_COUNT: { GGML_ABORT("fatal error"); } } } // Android's libc implementation "bionic" does not support setting affinity #if defined(__gnu_linux__) static void set_numa_thread_affinity(int thread_n) { if (!ggml_is_numa()) { return; } int node_num; int rv; size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); switch(g_state.numa.numa_strategy) { case GGML_NUMA_STRATEGY_DISTRIBUTE: // run thread on node_num thread_n / (threads per node) node_num = thread_n % g_state.numa.n_nodes; break; case GGML_NUMA_STRATEGY_ISOLATE: // run thread on current_node node_num = g_state.numa.current_node; break; case GGML_NUMA_STRATEGY_NUMACTL: // use the cpuset that numactl gave us rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); if (rv) { fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } return; default: return; } struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); CPU_ZERO_S(setsize, cpus); for (size_t i = 0; i < node->n_cpus; ++i) { CPU_SET_S(node->cpus[i], setsize, cpus); } rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); } CPU_FREE(cpus); } static void clear_numa_thread_affinity(void) { if (!ggml_is_numa()) { return; } size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); CPU_ZERO_S(setsize, cpus); for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) { CPU_SET_S(i, setsize, cpus); } int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); } CPU_FREE(cpus); } #else // TODO: Windows etc. // (the linux implementation may also work on BSD, someone should test) static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } static void clear_numa_thread_affinity(void) {} #endif static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { int n_tasks = 0; if (ggml_is_empty(node)) { // no need to multi-thread a no-op n_tasks = 1; return n_tasks; } switch (node->op) { case GGML_OP_CPY: case GGML_OP_DUP: case GGML_OP_CONT: case GGML_OP_ADD: case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_ACC: case GGML_OP_CUMSUM: case GGML_OP_TRI: case GGML_OP_FILL: { n_tasks = n_threads; } break; case GGML_OP_SUB: case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_LOG: case GGML_OP_SIN: case GGML_OP_COS: case GGML_OP_SUM: case GGML_OP_SUM_ROWS: case GGML_OP_MEAN: case GGML_OP_ARGMAX: { n_tasks = 1; } break; case GGML_OP_COUNT_EQUAL: case GGML_OP_SOLVE_TRI: { n_tasks = n_threads; } break; case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: case GGML_OP_LEAKY_RELU: { n_tasks = 1; } break; case GGML_OP_UNARY: switch (ggml_get_unary_op(node)) { case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_STEP: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_SOFTPLUS: case GGML_UNARY_OP_EXPM1: case GGML_UNARY_OP_FLOOR: case GGML_UNARY_OP_CEIL: case GGML_UNARY_OP_ROUND: case GGML_UNARY_OP_TRUNC: { n_tasks = 1; } break; case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_XIELU: { n_tasks = n_threads; } break; default: GGML_ABORT("fatal error"); } break; case GGML_OP_GLU: switch (ggml_get_glu_op(node)) { case GGML_GLU_OP_REGLU: case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_SWIGLU: case GGML_GLU_OP_SWIGLU_OAI: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: { n_tasks = n_threads; } break; default: GGML_ABORT("fatal error"); } break; case GGML_OP_SILU_BACK: case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: case GGML_OP_L2_NORM: case GGML_OP_GROUP_NORM: case GGML_OP_CONCAT: case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: case GGML_OP_OUT_PROD: { n_tasks = n_threads; } break; case GGML_OP_GET_ROWS: case GGML_OP_SET_ROWS: { // FIXME: get_rows can use additional threads, but the cost of launching additional threads // decreases performance with GPU offloading //n_tasks = n_threads; n_tasks = 1; } break; case GGML_OP_SCALE: case GGML_OP_SET: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: case GGML_OP_GET_ROWS_BACK: case GGML_OP_DIAG: { n_tasks = 1; } break; case GGML_OP_DIAG_MASK_ZERO: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX_BACK: case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: case GGML_OP_ADD_REL_POS: { n_tasks = n_threads; } break; case GGML_OP_CLAMP: { n_tasks = 1; //TODO } break; case GGML_OP_SOFT_MAX: { n_tasks = MIN(n_threads, ggml_nrows(node->src[0])); } break; case GGML_OP_IM2COL: case GGML_OP_IM2COL_BACK: case GGML_OP_IM2COL_3D: case GGML_OP_CONV_2D: case GGML_OP_CONV_3D: case GGML_OP_CONV_2D_DW: case GGML_OP_CONV_TRANSPOSE_1D: case GGML_OP_CONV_TRANSPOSE_2D: { n_tasks = n_threads; } break; case GGML_OP_POOL_1D: case GGML_OP_POOL_2D: case GGML_OP_POOL_2D_BACK: { n_tasks = 1; } break; case GGML_OP_UPSCALE: case GGML_OP_PAD: case GGML_OP_PAD_REFLECT_1D: case GGML_OP_ROLL: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: case GGML_OP_TOP_K: case GGML_OP_FLASH_ATTN_EXT: case GGML_OP_FLASH_ATTN_BACK: case GGML_OP_SSM_CONV: case GGML_OP_SSM_SCAN: case GGML_OP_RWKV_WKV6: case GGML_OP_GATED_LINEAR_ATTN: case GGML_OP_RWKV_WKV7: { n_tasks = n_threads; } break; case GGML_OP_WIN_PART: case GGML_OP_WIN_UNPART: case GGML_OP_GET_REL_POS: { n_tasks = 1; } break; case GGML_OP_MAP_CUSTOM1: { struct ggml_map_custom1_op_params p; memcpy(&p, node->op_params, sizeof(p)); if (p.n_tasks == GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p.n_tasks, n_threads); } } break; case GGML_OP_MAP_CUSTOM2: { struct ggml_map_custom2_op_params p; memcpy(&p, node->op_params, sizeof(p)); if (p.n_tasks == GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p.n_tasks, n_threads); } } break; case GGML_OP_MAP_CUSTOM3: { struct ggml_map_custom3_op_params p; memcpy(&p, node->op_params, sizeof(p)); if (p.n_tasks == GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p.n_tasks, n_threads); } } break; case GGML_OP_CUSTOM: { struct ggml_custom_op_params p; memcpy(&p, node->op_params, sizeof(p)); if (p.n_tasks == GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p.n_tasks, n_threads); } } break; case GGML_OP_CROSS_ENTROPY_LOSS: case GGML_OP_CROSS_ENTROPY_LOSS_BACK: case GGML_OP_OPT_STEP_ADAMW: case GGML_OP_OPT_STEP_SGD: { n_tasks = n_threads; } break; case GGML_OP_NONE: { n_tasks = 1; } break; case GGML_OP_COUNT: { GGML_ABORT("fatal error"); } default: { fprintf(stderr, "%s: op not implemented: ", __func__); if (node->op < GGML_OP_COUNT) { fprintf(stderr, "%s\n", ggml_op_name(node->op)); } else { fprintf(stderr, "%d\n", node->op); } GGML_ABORT("fatal error"); } } assert(n_tasks > 0); return n_tasks; } static thread_ret_t ggml_graph_compute_secondary_thread(void* data); #if defined(_WIN32) #include "windows.h" // TODO: support > 64 CPUs static bool ggml_thread_apply_affinity(bool * mask) { HANDLE h = GetCurrentThread(); uint64_t bitmask = 0ULL; assert(GGML_MAX_N_THREADS >= 64); for (int32_t i = 0; i < 8; i++) { int32_t idx = i * 8; uint8_t val = 0; val |= mask[idx + 0] << 0; val |= mask[idx + 1] << 1; val |= mask[idx + 2] << 2; val |= mask[idx + 3] << 3; val |= mask[idx + 4] << 4; val |= mask[idx + 5] << 5; val |= mask[idx + 6] << 6; val |= mask[idx + 7] << 7; bitmask |= (uint64_t)val << idx; } for (int32_t i = 64; i < GGML_MAX_N_THREADS; i++) { if (mask[i]) { fprintf(stderr, "warn: setting thread-affinity for > 64 CPUs isn't supported on windows!\n"); break; } } DWORD_PTR m = (DWORD_PTR)bitmask; m = SetThreadAffinityMask(h, m); return m != 0; } static bool ggml_thread_apply_priority(int32_t prio) { // Note that on Windows the Process Priority Class must be updated in order to set Thread priority. // This is up to the applications. DWORD p = THREAD_PRIORITY_NORMAL; switch (prio) { case GGML_SCHED_PRIO_LOW: p = THREAD_PRIORITY_BELOW_NORMAL; break; case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break; case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break; case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break; case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break; } if (prio != GGML_SCHED_PRIO_LOW) { // Tell Windows that this thread should not be throttled (needs its own CPU core). // Newer Windows 11 versions aggresively park (offline) CPU cores and often place // all our threads onto the first 4 cores which results in terrible performance with // n_threads > 4 #if _WIN32_WINNT >= 0x0602 THREAD_POWER_THROTTLING_STATE t; ZeroMemory(&t, sizeof(t)); t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION; t.ControlMask = THREAD_POWER_THROTTLING_EXECUTION_SPEED; t.StateMask = 0; if (!SetThreadInformation(GetCurrentThread(), ThreadPowerThrottling, &t, sizeof(t))) { GGML_LOG_DEBUG("failed to disable thread power throttling %d : (%d)\n", prio, (int) GetLastError()); return false; } #endif } if (prio == GGML_SCHED_PRIO_NORMAL) { // Keep inherited policy/priority return true; } if (!SetThreadPriority(GetCurrentThread(), p)) { fprintf(stderr, "warn: failed to set thread priority %d : (%d)\n", prio, (int) GetLastError()); return false; } return true; } #elif defined(__APPLE__) #include #include static bool ggml_thread_apply_affinity(const bool * mask) { // Not supported on Apple platforms UNUSED(mask); return true; } static bool ggml_thread_apply_priority(int32_t prio) { struct sched_param p; int32_t policy = SCHED_OTHER; switch (prio) { // TODO: there seems to be no way to set lower prio on Apple platforms case GGML_SCHED_PRIO_LOW: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; } if (prio == GGML_SCHED_PRIO_NORMAL) { // Keep inherited policy/priority return true; } int32_t err = pthread_setschedparam(pthread_self(), policy, &p); if (err != 0) { fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); return false; } return true; } #elif defined(__gnu_linux__) // TODO: this may not work on BSD, to be verified static bool ggml_thread_apply_affinity(const bool * mask) { cpu_set_t cpuset; int err; CPU_ZERO(&cpuset); for (uint32_t i = 0; i < GGML_MAX_N_THREADS; i++) { if (mask[i]) { GGML_PRINT_DEBUG("Thread %lx: adding %d to cpuset\n", pthread_self(), i); CPU_SET(i, &cpuset); } } #ifdef __ANDROID__ err = sched_setaffinity(0, sizeof(cpuset), &cpuset); if (err < 0) { err = errno; } #else err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); #endif if (err != 0) { fprintf(stderr, "warn: failed to set affinity mask 0x%llx : %s (%d)\n", (unsigned long long)mask, strerror(err), err); return false; } return true; } static bool ggml_thread_apply_priority(int32_t prio) { struct sched_param p; int32_t policy = SCHED_OTHER; switch (prio) { case GGML_SCHED_PRIO_LOW: policy = SCHED_BATCH; p.sched_priority = 0; break; case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; } if (prio == GGML_SCHED_PRIO_NORMAL) { // Keep inherited policy/priority return true; } int32_t err = pthread_setschedparam(pthread_self(), policy, &p); if (err != 0) { fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); return false; } return true; } #else // unsupported platforms static bool ggml_thread_apply_affinity(const bool * mask) { UNUSED(mask); return true; } static bool ggml_thread_apply_priority(int32_t prio) { UNUSED(prio); return true; } #endif static bool ggml_thread_cpumask_is_valid(const bool * mask) { for (int i = 0; i < GGML_MAX_N_THREADS; i++) { if (mask[i]) { return true; } } return false; } static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) { if (!strict) { memcpy(local_mask, global_mask, GGML_MAX_N_THREADS); return; } else { memset(local_mask, 0, GGML_MAX_N_THREADS); int32_t base_idx = *iter; for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) { int32_t idx = base_idx + i; if (idx >= GGML_MAX_N_THREADS) { // Just a cheaper modulo idx -= GGML_MAX_N_THREADS; } if (global_mask[idx]) { local_mask[idx] = 1; *iter = idx + 1; return; } } } } void ggml_threadpool_free(struct ggml_threadpool* threadpool) { if (!threadpool) return; const int n_threads = threadpool->n_threads; #ifndef GGML_USE_OPENMP struct ggml_compute_state* workers = threadpool->workers; ggml_mutex_lock(&threadpool->mutex); threadpool->stop = true; threadpool->pause = false; ggml_cond_broadcast(&threadpool->cond); ggml_mutex_unlock(&threadpool->mutex); for (int j = 1; j < n_threads; j++) { int32_t rc = ggml_thread_join(workers[j].thrd, NULL); GGML_ASSERT(rc == GGML_EXIT_SUCCESS || rc == GGML_EXIT_ABORTED); UNUSED(rc); } ggml_mutex_destroy(&threadpool->mutex); ggml_cond_destroy(&threadpool->cond); #endif // GGML_USE_OPENMP const size_t workers_size = sizeof(struct ggml_compute_state) * n_threads; ggml_aligned_free(threadpool->workers, workers_size); ggml_aligned_free(threadpool, sizeof(struct ggml_threadpool)); } #ifndef GGML_USE_OPENMP // pause/resume must be called under mutex static void ggml_threadpool_pause_locked(struct ggml_threadpool * threadpool) { GGML_PRINT_DEBUG("Pausing threadpool\n"); threadpool->pause = true; ggml_cond_broadcast(&threadpool->cond); } static void ggml_threadpool_resume_locked(struct ggml_threadpool * threadpool) { GGML_PRINT_DEBUG("Resuming threadpool\n"); threadpool->pause = false; ggml_cond_broadcast(&threadpool->cond); } #endif void ggml_threadpool_pause(struct ggml_threadpool * threadpool) { #ifndef GGML_USE_OPENMP ggml_mutex_lock(&threadpool->mutex); if (!threadpool->pause) { ggml_threadpool_pause_locked(threadpool); } ggml_mutex_unlock(&threadpool->mutex); #else UNUSED(threadpool); #endif } void ggml_threadpool_resume(struct ggml_threadpool * threadpool) { #ifndef GGML_USE_OPENMP ggml_mutex_lock(&threadpool->mutex); if (threadpool->pause) { ggml_threadpool_resume_locked(threadpool); } ggml_mutex_unlock(&threadpool->mutex); #else UNUSED(threadpool); #endif } struct ggml_cplan ggml_graph_plan( const struct ggml_cgraph * cgraph, int n_threads, struct ggml_threadpool * threadpool) { if (threadpool == NULL) { //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads); } if (n_threads <= 0) { n_threads = threadpool ? threadpool->n_threads : GGML_DEFAULT_N_THREADS; } #if defined(__EMSCRIPTEN__) && !defined(__EMSCRIPTEN_PTHREADS__) // Emscripten without pthreads support can only use a single thread n_threads = 1; #endif size_t work_size = 0; struct ggml_cplan cplan; memset(&cplan, 0, sizeof(struct ggml_cplan)); int max_tasks = 1; // thread scheduling for the different operations + work buffer size estimation for (int i = 0; i < cgraph->n_nodes; i++) { struct ggml_tensor * node = cgraph->nodes[i]; const int n_tasks = ggml_get_n_tasks(node, n_threads); max_tasks = MAX(max_tasks, n_tasks); size_t cur = 0; if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) { switch (node->op) { case GGML_OP_CPY: case GGML_OP_DUP: { if (ggml_is_quantized(node->type) || // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32 (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16) || // conversion between F32 and I32 (node->src[0]->type == GGML_TYPE_F32 && node->src[1] && node->src[1]->type == GGML_TYPE_I32) || (node->src[0]->type == GGML_TYPE_I32 && node->src[1] && node->src[1]->type == GGML_TYPE_F32)) { cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; } } break; case GGML_OP_ADD: case GGML_OP_ADD_ID: case GGML_OP_ADD1: { if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } } break; case GGML_OP_ACC: { if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; } } break; case GGML_OP_COUNT_EQUAL: { cur = ggml_type_size(node->type)*n_tasks; } break; case GGML_OP_MUL_MAT: { const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type; if (node->src[1]->type != vec_dot_type) { cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1])); } } break; case GGML_OP_MUL_MAT_ID: { cur = 0; const struct ggml_tensor * src0 = node->src[0]; const struct ggml_tensor * src1 = node->src[1]; const struct ggml_tensor * ids = node->src[2]; const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; const int n_as = src0->ne[2]; // src1 if (src1->type != vec_dot_type) { cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t); } // matrix_row_counts cur += n_as * sizeof(int64_t) + sizeof(int64_t); // matrix_rows cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t); // atomic_current_chunk cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE; } break; case GGML_OP_OUT_PROD: { if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } } break; case GGML_OP_SOFT_MAX: case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: { cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; } break; case GGML_OP_CONV_TRANSPOSE_1D: { GGML_ASSERT(node->src[0]->ne[3] == 1); GGML_ASSERT(node->src[1]->ne[2] == 1); GGML_ASSERT(node->src[1]->ne[3] == 1); const int64_t ne00 = node->src[0]->ne[0]; // K const int64_t ne01 = node->src[0]->ne[1]; // Cout const int64_t ne02 = node->src[0]->ne[2]; // Cin const int64_t ne10 = node->src[1]->ne[0]; // L const int64_t ne11 = node->src[1]->ne[1]; // Cin if ((node->src[0]->type == GGML_TYPE_F16 || node->src[0]->type == GGML_TYPE_BF16) && node->src[1]->type == GGML_TYPE_F32) { cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; cur += sizeof(ggml_fp16_t)*ne10*ne11; } else if (node->src[0]->type == GGML_TYPE_F32 && node->src[1]->type == GGML_TYPE_F32) { cur += sizeof(float)*ne00*ne01*ne02; cur += sizeof(float)*ne10*ne11; } else { GGML_ABORT("fatal error"); } } break; case GGML_OP_CONV_2D: case GGML_OP_CONV_3D: { cur = GGML_IM2COL_WORK_SIZE; } break; case GGML_OP_CONV_TRANSPOSE_2D: { const int64_t ne00 = node->src[0]->ne[0]; // W const int64_t ne01 = node->src[0]->ne[1]; // H const int64_t ne02 = node->src[0]->ne[2]; // Channels Out const int64_t ne03 = node->src[0]->ne[3]; // Channels In const int64_t ne10 = node->src[1]->ne[0]; // W const int64_t ne11 = node->src[1]->ne[1]; // H const int64_t ne12 = node->src[1]->ne[2]; // Channels In cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; } break; case GGML_OP_TOP_K: { cur += sizeof(int32_t)*node->src[0]->ne[0]*n_tasks; } break; case GGML_OP_FLASH_ATTN_EXT: { const int64_t ne10 = node->src[1]->ne[0]; // DK const int64_t ne20 = node->src[2]->ne[0]; // DV cur = sizeof(float)*(1*ne10 + 2*ne20)*n_tasks; // 1x head size K + 2x head size V (per thread) } break; case GGML_OP_FLASH_ATTN_BACK: { const int64_t D = node->src[0]->ne[0]; const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back if (node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } else if (node->src[1]->type == GGML_TYPE_BF16) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } } break; case GGML_OP_CROSS_ENTROPY_LOSS: { cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); } break; case GGML_OP_COUNT: { GGML_ABORT("fatal error"); } default: break; } } work_size = MAX(work_size, cur); } if (work_size > 0) { work_size += CACHE_LINE_SIZE*(n_threads); } cplan.threadpool = threadpool; cplan.n_threads = MIN(max_tasks, n_threads); cplan.work_size = work_size; cplan.work_data = NULL; return cplan; } static thread_ret_t ggml_graph_compute_thread(void * data) { struct ggml_compute_state * state = (struct ggml_compute_state *) data; struct ggml_threadpool * tp = state->threadpool; const struct ggml_cgraph * cgraph = tp->cgraph; const struct ggml_cplan * cplan = tp->cplan; set_numa_thread_affinity(state->ith); struct ggml_compute_params params = { /*.ith =*/ state->ith, /*.nth =*/ atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK, /*.wsize =*/ cplan->work_size, /*.wdata =*/ cplan->work_data, /*.threadpool=*/ tp, }; GGML_PRINT_DEBUG("thread #%d compute-start cplan %p last-graph %d \n", state->ith, cplan, state->last_graph); for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) { struct ggml_tensor * node = cgraph->nodes[node_n]; if (ggml_op_is_empty(node->op)) { // skip NOPs continue; } ggml_compute_forward(¶ms, node); if (state->ith == 0 && cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) { atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed); tp->ec = GGML_STATUS_ABORTED; } if (node_n + 1 < cgraph->n_nodes) { ggml_barrier(state->threadpool); } } GGML_PRINT_DEBUG("thread #%d compute-done cplan %p last-graph %d \n", state->ith, cplan, state->last_graph); ggml_barrier(state->threadpool); return 0; } #ifndef GGML_USE_OPENMP // check if thread is ready to proceed (exit from polling or sleeping) // returns true if loops should exit, sets state->pending to indicate new work static inline bool ggml_graph_compute_thread_ready(struct ggml_compute_state * state) { struct ggml_threadpool * threadpool = state->threadpool; if (state->pending || threadpool->stop || threadpool->pause) { return true; } // check for new graph/work int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed); int n_threads = n_graph & GGML_THREADPOOL_N_THREADS_MASK; if (n_graph != state->last_graph) { state->pending = (state->ith < n_threads); state->last_graph = n_graph; return true; } return false; } // sync thread state after polling static inline void ggml_graph_compute_thread_sync(struct ggml_compute_state * state) { // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead #ifdef GGML_TSAN_ENABLED atomic_fetch_add_explicit(&state->threadpool->n_graph, 0, memory_order_seq_cst); #else atomic_thread_fence(memory_order_seq_cst); #endif UNUSED(state); } static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state * state) { struct ggml_threadpool * threadpool = state->threadpool; // This seems to make 0 ... 100 a decent range for polling level across modern processors. // Perhaps, we can adjust it dynamically based on load and things. const uint64_t n_rounds = 1024UL * 128 * threadpool->poll; for (uint64_t i=0; !ggml_graph_compute_thread_ready(state) && i < n_rounds; i++) { // No new work. Keep polling. ggml_thread_cpu_relax(); } return state->pending; } static inline bool ggml_graph_compute_check_for_work(struct ggml_compute_state * state) { struct ggml_threadpool * threadpool = state->threadpool; if (ggml_graph_compute_poll_for_work(state)) { ggml_graph_compute_thread_sync(state); return state->pending; } ggml_mutex_lock_shared(&threadpool->mutex); while (!ggml_graph_compute_thread_ready(state)) { // No new work. Wait for the signal. GGML_PRINT_DEBUG("thread #%d waiting for work (sleeping)\n", state->ith); ggml_cond_wait(&threadpool->cond, &threadpool->mutex); } ggml_mutex_unlock_shared(&threadpool->mutex); return state->pending; } static thread_ret_t ggml_graph_compute_secondary_thread(void* data) { struct ggml_compute_state * state = (struct ggml_compute_state *) data; struct ggml_threadpool * threadpool = state->threadpool; ggml_thread_apply_priority(threadpool->prio); if (ggml_thread_cpumask_is_valid(state->cpumask)) { ggml_thread_apply_affinity(state->cpumask); } while (true) { // Check if we need to sleep while (threadpool->pause) { GGML_PRINT_DEBUG("thread #%d inside pause loop\n", state->ith); ggml_mutex_lock_shared(&threadpool->mutex); if (threadpool->pause) { ggml_cond_wait(&threadpool->cond, &threadpool->mutex); } GGML_PRINT_DEBUG("thread #%d resuming after wait\n", state->ith); ggml_mutex_unlock_shared(&threadpool->mutex); } // This needs to be checked for after the cond_wait if (threadpool->stop) break; // Check if there is new work // The main thread is the only one that can dispatch new work ggml_graph_compute_check_for_work(state); if (state->pending) { state->pending = false; ggml_graph_compute_thread(state); } } return (thread_ret_t) 0; } // Start processing new graph static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int n_threads) { // Always take the mutex here because the worker threads are doing hybrid poll/wait ggml_mutex_lock(&threadpool->mutex); // Update the number of active threads and the graph count int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed) >> GGML_THREADPOOL_N_THREADS_BITS; n_graph = ((n_graph + 1) << GGML_THREADPOOL_N_THREADS_BITS) | (n_threads & GGML_THREADPOOL_N_THREADS_MASK); GGML_PRINT_DEBUG("compute-kickoff: n_threads %d n_graph %d\n", n_threads, n_graph); // Indicate the graph is ready to be processed // We need the full seq-cst fence here because of the polling threads (used in thread_sync) atomic_store_explicit(&threadpool->n_graph, n_graph, memory_order_seq_cst); if (threadpool->pause) { // Update main thread prio and affinity to match the threadpool settings ggml_thread_apply_priority(threadpool->prio); if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { ggml_thread_apply_affinity(threadpool->workers[0].cpumask); } // resume does cond broadcast ggml_threadpool_resume_locked(threadpool); } else { ggml_cond_broadcast(&threadpool->cond); } ggml_mutex_unlock(&threadpool->mutex); } #endif // GGML_USE_OPENMP static struct ggml_threadpool * ggml_threadpool_new_impl( struct ggml_threadpool_params * tpp, struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { struct ggml_threadpool * threadpool = ggml_aligned_malloc(sizeof(struct ggml_threadpool)); { threadpool->cgraph = cgraph; threadpool->cplan = cplan; threadpool->n_graph = 0; threadpool->n_barrier = 0; threadpool->n_barrier_passed = 0; threadpool->current_chunk = 0; threadpool->stop = false; threadpool->pause = tpp->paused; threadpool->abort = -1; threadpool->workers = NULL; threadpool->n_threads = tpp->n_threads; threadpool->poll = tpp->poll; threadpool->prio = tpp->prio; threadpool->ec = GGML_STATUS_SUCCESS; } // Allocate and init workers state const size_t workers_size = sizeof(struct ggml_compute_state) * tpp->n_threads; struct ggml_compute_state * workers = ggml_aligned_malloc(workers_size); memset(workers, 0, workers_size); for (int j = 0; j < tpp->n_threads; j++) { workers[j].threadpool = threadpool; workers[j].ith = j; } threadpool->workers = workers; #ifdef GGML_USE_OPENMP int32_t cpumask_iter = 0; // Compute CPU masks for each thread for (int j = 0; j < tpp->n_threads; j++) { ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); } #else // GGML_USE_OPENMP ggml_mutex_init(&threadpool->mutex); ggml_cond_init(&threadpool->cond); // Spin the threads for all workers, and update CPU placements. // Place the main thread last (towards the higher numbered CPU cores). int32_t cpumask_iter = 0; for (int j = 1; j < tpp->n_threads; j++) { ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); int32_t rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_secondary_thread, &workers[j]); GGML_ASSERT(rc == 0); } ggml_thread_cpumask_next(tpp->cpumask, workers[0].cpumask, tpp->strict_cpu, &cpumask_iter); if (!threadpool->pause) { // Update main thread prio and affinity at the start, otherwise we'll do it in resume ggml_thread_apply_priority(threadpool->prio); if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { ggml_thread_apply_affinity(threadpool->workers[0].cpumask); } } #endif // GGML_USE_OPENMP return threadpool; } struct ggml_threadpool * ggml_threadpool_new(struct ggml_threadpool_params * tpp) { return ggml_threadpool_new_impl(tpp, NULL, NULL); } enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { ggml_cpu_init(); GGML_ASSERT(cplan); GGML_ASSERT(cplan->n_threads > 0); GGML_ASSERT(cplan->work_size == 0 || cplan->work_data != NULL); int n_threads = cplan->n_threads; struct ggml_threadpool * threadpool = cplan->threadpool; bool disposable_threadpool = false; if (threadpool == NULL) { //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads); disposable_threadpool = true; struct ggml_threadpool_params ttp = ggml_threadpool_params_default(n_threads); threadpool = ggml_threadpool_new_impl(&ttp, cgraph, cplan); } else { // Reset some of the parameters that need resetting // No worker threads should be accessing the parameters below at this stage threadpool->cgraph = cgraph; threadpool->cplan = cplan; threadpool->current_chunk = 0; threadpool->abort = -1; threadpool->ec = GGML_STATUS_SUCCESS; } #ifdef GGML_USE_OPENMP if (n_threads > 1) { #pragma omp parallel num_threads(n_threads) { #pragma omp single { // update the number of threads from the actual number of threads that we got from OpenMP n_threads = omp_get_num_threads(); atomic_store_explicit(&threadpool->n_graph, n_threads, memory_order_relaxed); } // Apply thread CPU mask and priority int ith = omp_get_thread_num(); ggml_thread_apply_priority(threadpool->prio); if (ggml_thread_cpumask_is_valid(threadpool->workers[ith].cpumask)) { ggml_thread_apply_affinity(threadpool->workers[ith].cpumask); } ggml_graph_compute_thread(&threadpool->workers[ith]); } } else { atomic_store_explicit(&threadpool->n_graph, 1, memory_order_relaxed); ggml_graph_compute_thread(&threadpool->workers[0]); } #else if (n_threads > threadpool->n_threads) { GGML_LOG_WARN("cplan requested more threads (%d) than available (%d)\n", n_threads, threadpool->n_threads); n_threads = threadpool->n_threads; } // Kick all threads to start the new graph ggml_graph_compute_kickoff(threadpool, n_threads); // This is a work thread too ggml_graph_compute_thread(&threadpool->workers[0]); #endif // don't leave affinity set on the main thread clear_numa_thread_affinity(); enum ggml_status ret = threadpool->ec; if (disposable_threadpool) { ggml_threadpool_free(threadpool); } return ret; } enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) { struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads, NULL); cplan.work_data = (uint8_t *)ggml_new_buffer(ctx, cplan.work_size); return ggml_graph_compute(cgraph, &cplan); } void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) { memcpy(y, x, n * sizeof(float)); } void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { int64_t i = 0; #if defined(__F16C__) #if defined(__AVX512F__) for (; i + 15 < n; i += 16) { __m512 x_vec = _mm512_loadu_ps(x + i); __m256i y_vec = _mm512_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); _mm256_storeu_si256((__m256i *)(y + i), y_vec); } #endif for (; i + 7 < n; i += 8) { __m256 x_vec = _mm256_loadu_ps(x + i); __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); _mm_storeu_si128((__m128i *)(y + i), y_vec); } for (; i + 3 < n; i += 4) { __m128 x_vec = _mm_loadu_ps(x + i); __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); _mm_storel_epi64((__m128i *)(y + i), y_vec); } #elif defined(__riscv_zvfh) for (int vl; i < n; i += vl) { vl = __riscv_vsetvl_e32m2(n - i); vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); vfloat16m1_t vy = __riscv_vfncvt_f_f_w_f16m1(vx, vl); __riscv_vse16_v_f16m1((_Float16 *)&y[i], vy, vl); } #endif for (; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(x[i]); } } void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { int64_t i = 0; #if defined(__F16C__) #if defined(__AVX512F__) for (; i + 15 < n; i += 16) { __m256i x_vec = _mm256_loadu_si256((const __m256i *)(x + i)); __m512 y_vec = _mm512_cvtph_ps(x_vec); _mm512_storeu_ps(y + i, y_vec); } #endif for (; i + 7 < n; i += 8) { __m128i x_vec = _mm_loadu_si128((const __m128i *)(x + i)); __m256 y_vec = _mm256_cvtph_ps(x_vec); _mm256_storeu_ps(y + i, y_vec); } for (; i + 3 < n; i += 4) { __m128i x_vec = _mm_loadl_epi64((const __m128i *)(x + i)); __m128 y_vec = _mm_cvtph_ps(x_vec); _mm_storeu_ps(y + i, y_vec); } #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfhmin) // calculate step size const int epr = __riscv_vsetvlmax_e16m2(); const int step = epr * 2; const int np = (n & ~(step - 1)); // unroll by 2 for (; i < np; i += step) { vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, epr); vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, epr); __riscv_vse32_v_f32m4(y + i, ay0, epr); vfloat16m2_t ax1 = __riscv_vle16_v_f16m2((const _Float16*)x + i + epr, epr); vfloat32m4_t ay1 = __riscv_vfwcvt_f_f_v_f32m4(ax1, epr); __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); } // leftovers int vl; for (i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m2(n - i); vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, vl); vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, vl); __riscv_vse32_v_f32m4(y + i, ay0, vl); } #endif for (; i < n; ++i) { y[i] = GGML_CPU_FP16_TO_FP32(x[i]); } } void ggml_cpu_fp32_to_bf16(const float * x, ggml_bf16_t * y, int64_t n) { int64_t i = 0; for (; i < n; ++i) { y[i] = GGML_FP32_TO_BF16(x[i]); } } void ggml_cpu_fp32_to_i32(const float * x, int32_t * y, int64_t n) { int64_t i = 0; for (; i < n; ++i) { y[i] = x[i]; } } void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) { int64_t i = 0; #if defined(__AVX2__) #if defined(__AVX512F__) for (; i + 15 < n; i += 16) { _mm512_storeu_ps(y + i, _mm512_castsi512_ps( _mm512_slli_epi32( _mm512_cvtepu16_epi32( _mm256_loadu_si256( (const __m256i *)(x + i))), 16))); } #endif for (; i + 7 < n; i += 8) { _mm256_storeu_ps(y + i, _mm256_castsi256_ps( _mm256_slli_epi32( _mm256_cvtepu16_epi32( _mm_loadu_si128( (const __m128i *)(x + i))), 16))); } #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfmin) // calculate step size const int epr = __riscv_vsetvlmax_e16m2(); const int step = epr * 2; const int np = (n & ~(step - 1)); // unroll by 2 for (; i < np; i += step) { vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, epr); vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, epr); __riscv_vse32_v_f32m4(y + i, ay0, epr); vbfloat16m2_t ax1 = __riscv_vle16_v_bf16m2((const __bf16*)x + i + epr, epr); vfloat32m4_t ay1 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax1, epr); __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); } // leftovers int vl; for (i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m2(n - i); vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, vl); vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, vl); __riscv_vse32_v_f32m4(y + i, ay0, vl); } #endif for (; i < n; i++) { y[i] = GGML_BF16_TO_FP32(x[i]); } } int ggml_cpu_has_avx(void) { #if defined(__AVX__) return 1; #else return 0; #endif } int ggml_cpu_has_avx_vnni(void) { #if defined(__AVXVNNI__) return 1; #else return 0; #endif } int ggml_cpu_has_avx2(void) { #if defined(__AVX2__) return 1; #else return 0; #endif } int ggml_cpu_has_avx512(void) { #if defined(__AVX512F__) return 1; #else return 0; #endif } int ggml_cpu_has_avx512_vbmi(void) { #if defined(__AVX512VBMI__) return 1; #else return 0; #endif } int ggml_cpu_has_avx512_vnni(void) { #if defined(__AVX512VNNI__) return 1; #else return 0; #endif } int ggml_cpu_has_avx512_bf16(void) { #if defined(__AVX512BF16__) return 1; #else return 0; #endif } int ggml_cpu_has_amx_int8(void) { #if defined(__AMX_INT8__) return 1; #else return 0; #endif } int ggml_cpu_has_bmi2(void) { #if defined(__BMI2__) return 1; #else return 0; #endif } int ggml_cpu_has_fma(void) { #if defined(__FMA__) return 1; #else return 0; #endif } int ggml_cpu_has_arm_fma(void) { #if defined(__ARM_FEATURE_FMA) return 1; #else return 0; #endif } int ggml_cpu_has_riscv_v(void) { #if defined(__riscv_v_intrinsic) return 1; #else return 0; #endif } int ggml_cpu_get_rvv_vlen(void) { #if defined(__riscv) && defined(__riscv_v_intrinsic) return ggml_riscv_arch_features.rvv_vlen; #else return 0; #endif } int ggml_cpu_has_f16c(void) { #if defined(__F16C__) return 1; #else return 0; #endif } int ggml_cpu_has_fp16_va(void) { #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) return 1; #else return 0; #endif } int ggml_cpu_has_wasm_simd(void) { #if defined(__wasm_simd128__) return 1; #else return 0; #endif } int ggml_cpu_has_llamafile(void) { #if defined(GGML_USE_LLAMAFILE) return 1; #else return 0; #endif } int ggml_cpu_has_sse3(void) { #if defined(__SSE3__) return 1; #else return 0; #endif } int ggml_cpu_has_ssse3(void) { #if defined(__SSSE3__) return 1; #else return 0; #endif } int ggml_cpu_has_vsx(void) { #if defined(__POWER9_VECTOR__) return 1; #else return 0; #endif } int ggml_cpu_has_vxe(void) { #if defined(__VXE__) || defined(__VXE2__) return 1; #else return 0; #endif } int ggml_cpu_has_neon(void) { #if defined(__ARM_ARCH) && defined(__ARM_NEON) return 1; #else return 0; #endif } int ggml_cpu_has_dotprod(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) return 1; #else return 0; #endif } int ggml_cpu_has_sve(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) return 1; #else return 0; #endif } int ggml_cpu_has_matmul_int8(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) return 1; #else return 0; #endif } int ggml_cpu_get_sve_cnt(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) return ggml_arm_arch_features.sve_cnt; #else return 0; #endif } int ggml_cpu_has_sme(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME) return 1; #else return 0; #endif } void ggml_cpu_init(void) { // needed to initialize ggml_time { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } ggml_critical_section_start(); static bool is_first_call = true; if (is_first_call) { // initialize GELU, Quick GELU, SILU and EXP F32 tables { const uint64_t t_start = ggml_time_us(); UNUSED(t_start); for (int i = 0; i < (1 << 16); ++i) { union { uint16_t u16; ggml_fp16_t fp16; } u = {i}; float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16); ggml_table_f32_f16[i] = f; ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f)); ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0); #ifdef GGML_USE_OPENMP //if (!getenv("OMP_WAIT_POLICY")) { // // set the wait policy to active, so that OpenMP threads don't sleep // setenv("OMP_WAIT_POLICY", "active", 0) //} if (!getenv("KMP_BLOCKTIME")) { // set the time to wait before sleeping a thread // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases #ifdef _WIN32 _putenv_s("KMP_BLOCKTIME", "200"); // 200ms #else setenv("KMP_BLOCKTIME", "200", 0); // 200ms #endif } #endif } #if defined(__ARM_ARCH) ggml_init_arm_arch_features(); #endif #if defined(__riscv) ggml_init_riscv_arch_features(); #endif is_first_call = false; } ggml_critical_section_end(); } ggml-org-ggml-3678254/src/ggml-cpu/ggml-cpu.cpp000066400000000000000000000556251512524704700211220ustar00rootroot00000000000000#include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-cpu.h" #include "repack.h" #include "traits.h" #include "ggml-impl.h" #include "amx/amx.h" #include #include #include #ifdef GGML_USE_CPU_HBM # include "hbm.h" #endif #ifdef GGML_USE_CPU_KLEIDIAI # include "kleidiai/kleidiai.h" #endif #ifdef GGML_USE_CPU_RISCV64_SPACEMIT # include "spacemit/ime.h" #endif #if defined(_WIN32) # define WIN32_LEAN_AND_MEAN # ifndef NOMINMAX # define NOMINMAX # endif # include #else # include #endif #if defined(__APPLE__) # include # include #endif // ggml-backend interface std::vector & ggml_backend_cpu_get_extra_buffer_types() { static std::vector bufts = []() { std::vector bufts; #if defined(__AMX_INT8__) && defined(__AVX512VNNI__) if (ggml_backend_amx_buffer_type()) { bufts.push_back(ggml_backend_amx_buffer_type()); } #endif #ifdef GGML_USE_CPU_RISCV64_SPACEMIT if (ggml_backend_cpu_riscv64_spacemit_buffer_type()) { bufts.push_back(ggml_backend_cpu_riscv64_spacemit_buffer_type()); } #endif #ifdef GGML_USE_CPU_KLEIDIAI if (ggml_backend_cpu_kleidiai_buffer_type()) { bufts.push_back(ggml_backend_cpu_kleidiai_buffer_type()); } #endif #ifdef GGML_USE_CPU_REPACK if (ggml_backend_cpu_repack_buffer_type()) { bufts.push_back(ggml_backend_cpu_repack_buffer_type()); } #endif return bufts; }(); return bufts; } static ggml_backend_buffer_type_t * ggml_backend_cpu_device_get_extra_buffers_type(ggml_backend_dev_t device) { static std::vector extra_bufts = [] { std::vector bufts = ggml_backend_cpu_get_extra_buffer_types(); bufts.push_back(nullptr); return bufts; }(); return extra_bufts.data(); GGML_UNUSED(device); } static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) { for (auto * extra : ggml_backend_cpu_get_extra_buffer_types()) { if (extra == buft) { return true; } } return false; } // CPU backend - backend (stream) struct ggml_backend_cpu_context { int n_threads; ggml_threadpool_t threadpool; uint8_t * work_data; size_t work_size; ggml_abort_callback abort_callback; void * abort_callback_data; }; static const char * ggml_backend_cpu_get_name(ggml_backend_t backend) { return "CPU"; GGML_UNUSED(backend); } static void ggml_backend_cpu_free(ggml_backend_t backend) { struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; delete[] cpu_ctx->work_data; delete cpu_ctx; delete backend; } struct ggml_backend_plan_cpu { struct ggml_cplan cplan; struct ggml_cgraph cgraph; }; static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, const struct ggml_cgraph * cgraph) { struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; struct ggml_backend_plan_cpu * cpu_plan = new ggml_backend_plan_cpu; cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads, cpu_ctx->threadpool); cpu_plan->cgraph = *cgraph; // FIXME: deep copy if (cpu_plan->cplan.work_size > 0) { cpu_plan->cplan.work_data = new uint8_t[cpu_plan->cplan.work_size]; if (cpu_plan->cplan.work_data == NULL) { delete cpu_plan; return NULL; } } cpu_plan->cplan.abort_callback = cpu_ctx->abort_callback; cpu_plan->cplan.abort_callback_data = cpu_ctx->abort_callback_data; return cpu_plan; } static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan; delete[] cpu_plan->cplan.work_data; delete cpu_plan; GGML_UNUSED(backend); } static enum ggml_status ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan; return ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan); GGML_UNUSED(backend); } static enum ggml_status ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads, cpu_ctx->threadpool); if (cpu_ctx->work_size < cplan.work_size) { delete[] cpu_ctx->work_data; cpu_ctx->work_data = new uint8_t[cplan.work_size]; if (cpu_ctx->work_data == NULL) { cpu_ctx->work_size = 0; return GGML_STATUS_ALLOC_FAILED; } cpu_ctx->work_size = cplan.work_size; } cplan.work_data = (uint8_t *)cpu_ctx->work_data; cplan.abort_callback = cpu_ctx->abort_callback; cplan.abort_callback_data = cpu_ctx->abort_callback_data; return ggml_graph_compute(cgraph, &cplan); } static const struct ggml_backend_i ggml_backend_cpu_i = { /* .get_name = */ ggml_backend_cpu_get_name, /* .free = */ ggml_backend_cpu_free, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, /* .cpy_tensor_async = */ NULL, /* .synchronize = */ NULL, /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create, /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute, /* .graph_compute = */ ggml_backend_cpu_graph_compute, /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ NULL, }; static ggml_guid_t ggml_backend_cpu_guid(void) { static ggml_guid guid = { 0xaa, 0x67, 0xc7, 0x43, 0x96, 0xe6, 0xa3, 0x8a, 0xe3, 0xaf, 0xea, 0x92, 0x36, 0xbc, 0xfc, 0x89 }; return &guid; } ggml_backend_t ggml_backend_cpu_init(void) { // initialize CPU backend now to avoid slowing the first graph computation ggml_cpu_init(); struct ggml_backend_cpu_context * ctx = new ggml_backend_cpu_context; if (ctx == NULL) { return NULL; } ctx->n_threads = GGML_DEFAULT_N_THREADS; ctx->threadpool = NULL; ctx->work_data = NULL; ctx->work_size = 0; ctx->abort_callback = NULL; ctx->abort_callback_data = NULL; ggml_backend_t cpu_backend = new ggml_backend { /* .guid = */ ggml_backend_cpu_guid(), /* .iface = */ ggml_backend_cpu_i, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ ctx, }; if (cpu_backend == NULL) { delete ctx; return NULL; } return cpu_backend; } bool ggml_backend_is_cpu(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_cpu_guid()); } void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) { GGML_ASSERT(ggml_backend_is_cpu(backend_cpu)); struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context; ctx->n_threads = n_threads; } void ggml_backend_cpu_set_threadpool(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool) { GGML_ASSERT(ggml_backend_is_cpu(backend_cpu)); struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context; if (ctx->threadpool && ctx->threadpool != threadpool) { // already had a different threadpool, pause/suspend it before switching ggml_threadpool_pause(ctx->threadpool); } ctx->threadpool = threadpool; } void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data) { GGML_ASSERT(ggml_backend_is_cpu(backend_cpu)); struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context; ctx->abort_callback = abort_callback; ctx->abort_callback_data = abort_callback_data; } // CPU backend - device struct ggml_backend_cpu_device_context { std::string description = "CPU"; ggml_backend_cpu_device_context() { #ifdef __APPLE__ size_t len = 0; if (!sysctlbyname("machdep.cpu.brand_string", NULL, &len, NULL, 0)) { description.resize(len); sysctlbyname("machdep.cpu.brand_string", &description[0], &len, NULL, 0); // NOLINT } #elif defined(__linux__) FILE * f = fopen("/proc/cpuinfo", "r"); if (f) { char buf[1024]; while (fgets(buf, sizeof(buf), f)) { if (strncmp(buf, "model name", 10) == 0) { char * p = strchr(buf, ':'); if (p) { p++; while (std::isspace(*p)) { p++; } while (std::isspace(p[strlen(p) - 1])) { p[strlen(p) - 1] = '\0'; } description = p; break; } } } fclose(f); } #elif defined(_WIN32) HKEY hKey; if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT("HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0"), 0, KEY_READ, &hKey) == ERROR_SUCCESS) { DWORD cpu_brand_size = 0; if (RegQueryValueExA(hKey, "ProcessorNameString", NULL, NULL, NULL, &cpu_brand_size) == ERROR_SUCCESS) { description.resize(cpu_brand_size); if (RegQueryValueExA(hKey, "ProcessorNameString", NULL, NULL, (LPBYTE)&description[0], // NOLINT &cpu_brand_size) == ERROR_SUCCESS) { if (description.find('\0') != std::string::npos) { description.resize(description.find('\0')); } } } RegCloseKey(hKey); } #endif } }; static const char * ggml_backend_cpu_device_get_name(ggml_backend_dev_t dev) { return "CPU"; GGML_UNUSED(dev); } static const char * ggml_backend_cpu_device_get_description(ggml_backend_dev_t dev) { struct ggml_backend_cpu_device_context * ctx = (struct ggml_backend_cpu_device_context *)dev->context; return ctx->description.c_str(); } static void ggml_backend_cpu_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { #ifdef _WIN32 MEMORYSTATUSEX status; status.dwLength = sizeof(status); GlobalMemoryStatusEx(&status); *total = status.ullTotalPhys; *free = status.ullAvailPhys; #else long pages = sysconf(_SC_PHYS_PAGES); long page_size = sysconf(_SC_PAGE_SIZE); *total = pages * page_size; // "free" system memory is ill-defined, for practical purposes assume that all of it is free: *free = *total; #endif // _WIN32 GGML_UNUSED(dev); } static enum ggml_backend_dev_type ggml_backend_cpu_device_get_type(ggml_backend_dev_t dev) { return GGML_BACKEND_DEVICE_TYPE_CPU; GGML_UNUSED(dev); } static void ggml_backend_cpu_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_cpu_device_get_name(dev); props->description = ggml_backend_cpu_device_get_description(dev); props->type = ggml_backend_cpu_device_get_type(dev); ggml_backend_cpu_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ false, /* .host_buffer = */ false, /* .buffer_from_host_ptr = */ true, /* .events = */ false, }; } static ggml_backend_t ggml_backend_cpu_device_init_backend(ggml_backend_dev_t dev, const char * params) { return ggml_backend_cpu_init(); GGML_UNUSED(dev); GGML_UNUSED(params); } static ggml_backend_buffer_type_t ggml_backend_cpu_device_get_buffer_type(ggml_backend_dev_t dev) { return ggml_backend_cpu_buffer_type(); GGML_UNUSED(dev); } static ggml_backend_buffer_t ggml_backend_cpu_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { return ggml_backend_cpu_buffer_from_ptr(ptr, size); GGML_UNUSED(dev); GGML_UNUSED(max_tensor_size); } static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; if (op->op == GGML_OP_NONE || op->op == GGML_OP_RESHAPE || op->op == GGML_OP_VIEW || op->op == GGML_OP_PERMUTE || op->op == GGML_OP_TRANSPOSE) { return true; } // check extra buffer types // note: only the first sources are checked for extra buffer types to reduce overhead, increase if necessary for (int i = 0; i < 4; i++) { if (op->src[i] && op->src[i]->buffer && ggml_backend_cpu_is_extra_buffer_type(op->src[i]->buffer->buft)) { auto * buf_extra = (ggml::cpu::extra_buffer_type *) op->src[i]->buffer->buft->context; return buf_extra->supports_op(dev, op); } } switch (op->op) { case GGML_OP_CPY: case GGML_OP_SET_ROWS: return op->type != GGML_TYPE_IQ3_XXS && op->type != GGML_TYPE_IQ3_S && op->type != GGML_TYPE_IQ2_XXS && op->type != GGML_TYPE_IQ2_XS && op->type != GGML_TYPE_IQ2_S && op->type != GGML_TYPE_IQ1_S && op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float case GGML_OP_MUL_MAT: return src1->type == GGML_TYPE_F32 || src1->type == ggml_get_type_traits_cpu(src0->type)->vec_dot_type; case GGML_OP_SOFT_MAX_BACK: { if (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type != GGML_TYPE_F32) { return false; } float max_bias = 0.0f; memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float)); return max_bias == 0.0f; } case GGML_OP_IM2COL_BACK: return src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32; case GGML_OP_GET_ROWS_BACK: return src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16; case GGML_OP_OUT_PROD: return (src0->type == GGML_TYPE_F32 || (ggml_is_quantized(src0->type) && src0->ne[2] == src1->ne[2] && src0->ne[3] == src1->ne[3])) && src1->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; default: return true; } } static bool ggml_backend_cpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { return ggml_backend_buft_is_host(buft) || ggml_backend_cpu_is_extra_buffer_type(buft); GGML_UNUSED(dev); } static const struct ggml_backend_device_i ggml_backend_cpu_device_i = { /* .get_name = */ ggml_backend_cpu_device_get_name, /* .get_description = */ ggml_backend_cpu_device_get_description, /* .get_memory = */ ggml_backend_cpu_device_get_memory, /* .get_type = */ ggml_backend_cpu_device_get_type, /* .get_props = */ ggml_backend_cpu_device_get_props, /* .init_backend = */ ggml_backend_cpu_device_init_backend, /* .get_buffer_type = */ ggml_backend_cpu_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, /* .buffer_from_host_ptr = */ ggml_backend_cpu_device_buffer_from_host_ptr, /* .supports_op = */ ggml_backend_cpu_device_supports_op, /* .supports_buft = */ ggml_backend_cpu_device_supports_buft, /* .offload_op = */ NULL, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; // CPU backend - backend (reg) static const char * ggml_backend_cpu_reg_get_name(ggml_backend_reg_t reg) { return "CPU"; GGML_UNUSED(reg); } static size_t ggml_backend_cpu_reg_get_device_count(ggml_backend_reg_t reg) { return 1; GGML_UNUSED(reg); } static ggml_backend_dev_t ggml_backend_cpu_reg_get_device(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index == 0); static ggml_backend_cpu_device_context ctx; static ggml_backend_device ggml_backend_cpu_device = { /* .iface = */ ggml_backend_cpu_device_i, /* .reg = */ reg, /* .context = */ &ctx, }; return &ggml_backend_cpu_device; } // This is intended to replace the the ggml_cpu_has_* functions when loading the CPU backend dynamically, // and additionally to allow other backends to expose their own list of features that applications can query using the same API static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t reg) { static std::vector features = []() { ggml_cpu_init(); std::vector features; if (ggml_cpu_has_sse3()) { features.push_back({ "SSE3", "1" }); } if (ggml_cpu_has_ssse3()) { features.push_back({ "SSSE3", "1" }); } if (ggml_cpu_has_avx()) { features.push_back({ "AVX", "1" }); } if (ggml_cpu_has_avx_vnni()) { features.push_back({ "AVX_VNNI", "1" }); } if (ggml_cpu_has_avx2()) { features.push_back({ "AVX2", "1" }); } if (ggml_cpu_has_f16c()) { features.push_back({ "F16C", "1" }); } if (ggml_cpu_has_fma()) { features.push_back({ "FMA", "1" }); } if (ggml_cpu_has_bmi2()) { features.push_back({ "BMI2", "1" }); } if (ggml_cpu_has_avx512()) { features.push_back({ "AVX512", "1" }); } if (ggml_cpu_has_avx512_vbmi()) { features.push_back({ "AVX512_VBMI", "1" }); } if (ggml_cpu_has_avx512_vnni()) { features.push_back({ "AVX512_VNNI", "1" }); } if (ggml_cpu_has_avx512_bf16()) { features.push_back({ "AVX512_BF16", "1" }); } if (ggml_cpu_has_amx_int8()) { features.push_back({ "AMX_INT8", "1" }); } if (ggml_cpu_has_neon()) { features.push_back({ "NEON", "1" }); } if (ggml_cpu_has_arm_fma()) { features.push_back({ "ARM_FMA", "1" }); } if (ggml_cpu_has_fp16_va()) { features.push_back({ "FP16_VA", "1" }); } if (ggml_cpu_has_matmul_int8()) { features.push_back({ "MATMUL_INT8", "1" }); } if (ggml_cpu_has_sve()) { features.push_back({ "SVE", "1" }); } if (ggml_cpu_has_dotprod()) { features.push_back({ "DOTPROD", "1" }); } if (ggml_cpu_get_sve_cnt() > 0) { static std::string sve_cnt = std::to_string(ggml_cpu_get_sve_cnt()); features.push_back({ "SVE_CNT", sve_cnt.c_str() }); } if (ggml_cpu_has_sme()) { features.push_back({ "SME", "1" }); } if (ggml_cpu_has_riscv_v()) { features.push_back({ "RISCV_V", "1" }); } if (ggml_cpu_get_rvv_vlen() > 0) { static std::string rvv_vlen = std::to_string(ggml_cpu_get_rvv_vlen()); features.push_back({ "RVV_VLEN", rvv_vlen.c_str() }); } if (ggml_cpu_has_vsx()) { features.push_back({ "VSX", "1" }); } if (ggml_cpu_has_vxe()) { features.push_back({ "VXE", "1" }); } if (ggml_cpu_has_wasm_simd()) { features.push_back({ "WASM_SIMD", "1" }); } if (ggml_cpu_has_llamafile()) { features.push_back({ "LLAMAFILE", "1" }); } #ifdef GGML_USE_ACCELERATE features.push_back({ "ACCELERATE", "1" }); #endif #ifdef GGML_USE_CPU_HBM features.push_back({ "CPU_HBM", "1" }); #endif #ifdef GGML_USE_OPENMP features.push_back({ "OPENMP", "1" }); #endif #ifdef GGML_USE_CPU_KLEIDIAI features.push_back({ "KLEIDIAI", "1" }); #endif #ifdef GGML_USE_CPU_REPACK features.push_back({ "REPACK", "1" }); #endif features.push_back({ nullptr, nullptr }); return features; }(); return features.data(); GGML_UNUSED(reg); } static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (strcmp(name, "ggml_backend_set_n_threads") == 0) { ggml_backend_set_n_threads_t fct = ggml_backend_cpu_set_n_threads; return (void *)fct; } if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) { ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_cpu_device_get_extra_buffers_type; return (void *)fct; } if (strcmp(name, "ggml_backend_get_features") == 0) { return (void *)ggml_backend_cpu_get_features; } if (strcmp(name, "ggml_backend_set_abort_callback") == 0) { return (void *)ggml_backend_cpu_set_abort_callback; } if (strcmp(name, "ggml_backend_cpu_numa_init") == 0) { return (void *)ggml_numa_init; } if (strcmp(name, "ggml_backend_cpu_is_numa") == 0) { return (void *)ggml_is_numa; } // threadpool - TODO: move to ggml-base if (strcmp(name, "ggml_threadpool_new") == 0) { return (void *)ggml_threadpool_new; } if (strcmp(name, "ggml_threadpool_free") == 0) { return (void *)ggml_threadpool_free; } if (strcmp(name, "ggml_backend_cpu_set_threadpool") == 0) { return (void *)ggml_backend_cpu_set_threadpool; } return NULL; GGML_UNUSED(reg); } static const struct ggml_backend_reg_i ggml_backend_cpu_reg_i = { /* .get_name = */ ggml_backend_cpu_reg_get_name, /* .get_device_count = */ ggml_backend_cpu_reg_get_device_count, /* .get_device = */ ggml_backend_cpu_reg_get_device, /* .get_proc_address = */ ggml_backend_cpu_get_proc_address, }; ggml_backend_reg_t ggml_backend_cpu_reg(void) { // init CPU feature detection ggml_cpu_init(); static struct ggml_backend_reg ggml_backend_cpu_reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_cpu_reg_i, /* .context = */ NULL, }; return &ggml_backend_cpu_reg; } GGML_BACKEND_DL_IMPL(ggml_backend_cpu_reg) ggml-org-ggml-3678254/src/ggml-cpu/hbm.cpp000066400000000000000000000037231512524704700201450ustar00rootroot00000000000000#ifdef GGML_USE_CPU_HBM #include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" #include "hbm.h" // buffer type HBM #include static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU_HBM"; GGML_UNUSED(buft); } static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { hbw_free(buffer->context); } static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * ptr; int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size); if (result != 0) { GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size); return NULL; } ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; return buffer; } ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_max_size = */ nullptr, // defaults to SIZE_MAX /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, }, /* .context = */ nullptr, }; return &ggml_backend_cpu_buffer_type_hbm; } #endif ggml-org-ggml-3678254/src/ggml-cpu/hbm.h000066400000000000000000000002331512524704700176030ustar00rootroot00000000000000#pragma once #include "ggml-backend.h" #include "ggml.h" // GGML CPU internal header ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); ggml-org-ggml-3678254/src/ggml-cpu/kleidiai/000077500000000000000000000000001512524704700204415ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/kleidiai/kernels.cpp000066400000000000000000001710241512524704700226150ustar00rootroot00000000000000// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates // SPDX-License-Identifier: MIT // // KleidiAI micro-kernels #include "kai_matmul_clamp_f32_qsi8d32p_qsi4c32p_interface.h" #include "kai_matmul_clamp_f32_qai8dxp_qsi8cxp_interface.h" #include "kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.h" #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.h" #include "kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.h" #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.h" #include "kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa.h" #include "kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa.h" #include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot.h" #include "kai_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.h" #include "kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm.h" #include "kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod.h" #include "kai_lhs_pack_bf16p2vlx2_f32_sme.h" #include "kai_lhs_quant_pack_qsi8d32p_f32.h" #include "kai_lhs_quant_pack_qsi8d32p4x8sb_f32_neon.h" #include "kai_lhs_quant_pack_qsi8d32p_f32_neon.h" #include "kai_lhs_quant_pack_qai8dxp_f32.h" #include "kai_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme.h" #include "kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.h" #include "kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.h" #include "kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.h" #include "kai_common.h" #include "simd-mappings.h" #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "kernels.h" #define NELEMS(x) (sizeof(x) / sizeof(*x)) template static inline size_t kernel_offs_fn3(size_t a, size_t b, size_t c) { return Fn(a, b, c); } template static inline size_t kernel_offs_fn2(size_t a, size_t b, size_t) { return Fn(a, b); } template static inline void kernel_run_fn11(size_t m, size_t n, size_t k, size_t bl, const void* lhs, const void* rhs, void* dst, size_t dst_stride_row, size_t dst_stride_col, float clamp_min, float clamp_max) { Fn(m, n, k, bl, lhs, rhs, static_cast(dst), dst_stride_row, dst_stride_col, clamp_min, clamp_max); } template static inline void kernel_run_fn10(size_t m, size_t n, size_t k, size_t /*bl*/, const void* lhs, const void* rhs, void* dst, size_t dst_stride_row, size_t dst_stride_col, float clamp_min, float clamp_max) { Fn(m, n, k, lhs, rhs, dst, dst_stride_row, dst_stride_col, clamp_min, clamp_max); } template static inline void kernel_run_float_fn10(size_t m, size_t n, size_t k, size_t /*bl*/, const void* lhs, const void* rhs, void* dst, size_t dst_stride_row, size_t dst_stride_col, float clamp_min, float clamp_max) { Fn(m, n, k, lhs, rhs, static_cast(dst), dst_stride_row, dst_stride_col, clamp_min, clamp_max); } template static inline size_t lhs_ps_fn6(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr) { return Fn(m, k, bl, mr, kr, sr); } template static inline size_t lhs_ps_fn5(size_t m, size_t k, size_t /*bl*/, size_t mr, size_t kr, size_t sr) { return Fn(m, k, mr, kr, sr); } template static inline size_t lhs_offs_fn6(size_t m_idx, size_t k, size_t bl, size_t mr, size_t kr, size_t sr) { return Fn(m_idx, k, bl, mr, kr, sr); } template static inline size_t lhs_offs_fn5(size_t m_idx, size_t k, size_t /*bl*/, size_t mr, size_t kr, size_t sr) { return Fn(m_idx, k, mr, kr, sr); } template static inline void lhs_pack_float_fn10(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { Fn(m, k, bl, mr, kr, sr, m_idx_start, static_cast(lhs), lhs_stride, lhs_packed); } template static inline void lhs_pack_void_fn10(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { Fn(m, k, bl, mr, kr, sr, m_idx_start, lhs, lhs_stride, lhs_packed); } template static inline void lhs_pack_void_fn9(size_t m, size_t k, size_t /*bl*/, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const void* lhs, size_t lhs_stride, void* lhs_packed) { Fn(m, k, mr, kr, sr, m_idx_start, lhs, lhs_stride, lhs_packed); } template static inline void lhs_pack_float_fn9_no_bl(size_t m, size_t k, size_t /*bl*/, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const void * lhs, size_t lhs_stride, void * lhs_packed) { Fn(m, k, mr, kr, sr, m_idx_start, static_cast(lhs), lhs_stride, lhs_packed); } template static inline size_t rhs_ps_fn5(size_t n, size_t k, size_t nr, size_t kr, size_t bl) { return Fn(n, k, nr, kr, bl); } template static inline size_t rhs_ps_fn2(size_t n, size_t k, size_t /*nr*/, size_t /*kr*/, size_t /*bl*/) { return Fn(n, k); } template static inline size_t rhs_stride_fn4(size_t k, size_t nr, size_t kr, size_t bl) { return Fn(k, nr, kr, bl); } template static inline size_t rhs_stride_fn1(size_t k, size_t /*nr*/, size_t /*kr*/, size_t /*bl*/) { return Fn(k); } template static inline void rhs_pack_fn12(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t bl, size_t /*rhs_stride*/, const void* rhs, const void* bias, const void* /*scale*/, void* rhs_packed, size_t extra_bytes, const void* params) { Fn(num_groups, n, k, nr, kr, sr, bl, static_cast(rhs), static_cast(bias), rhs_packed, extra_bytes, static_cast(params)); } template static inline void rhs_pack_scale_fn12(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t /*bl*/, size_t /*rhs_stride*/, const void* rhs, const void* bias, const void* scale, void* rhs_packed, size_t extra_bytes, const void* params) { Fn(num_groups, n, k, nr, kr, sr, static_cast(rhs), static_cast(bias), static_cast(scale), rhs_packed, extra_bytes, static_cast(params)); } template static inline void rhs_pack_fn13(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t /*bl*/, size_t rhs_stride, const void* rhs, const void* bias, const void* scale, void* rhs_packed, size_t extra_bytes, const void* params) { Fn(num_groups, n, k, nr, kr, sr, rhs_stride, rhs, bias, scale, rhs_packed, extra_bytes, params); } static const size_t INT4_PER_BYTE = 2; static const size_t INT4_BITS = 4; static const int Q4_0_ZERO_POINT = 8; const size_t INT4_PER_UINT16 = 4; static void dequantize_row_qsi4c32pscalef16( const void *packed_data, int32_t row_idx, int64_t nc, float *out, size_t nr_pack, size_t packed_row_stride, size_t kr, size_t bl, size_t num_bytes_multiplier ) { size_t group_idx = row_idx / nr_pack; size_t row_in_group = row_idx % nr_pack; const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride; size_t num_blocks = nc / bl; const uint8_t *block_ptr = packed_group; for (size_t b = 0; b < num_blocks; ++b) { uint16_t scale_f16 = *((const uint16_t *)(block_ptr + row_in_group * num_bytes_multiplier)); float scale = GGML_CPU_FP16_TO_FP32(scale_f16); const uint8_t *segment_ptr = block_ptr + nr_pack * num_bytes_multiplier; size_t num_segments = bl / kr; size_t num_bytes_per_segment = kr / INT4_PER_BYTE; for (size_t s = 0; s < num_segments; ++s) { const uint8_t *seg_base = segment_ptr + s * nr_pack * num_bytes_per_segment; const uint8_t *qbytes = seg_base + row_in_group * num_bytes_per_segment; for (size_t k = 0; k < num_bytes_per_segment; ++k) { uint8_t byte = qbytes[k] ^ 0x88; int x0 = (byte & 0x0F) - Q4_0_ZERO_POINT; int x1 = (byte >> INT4_BITS) - Q4_0_ZERO_POINT; out[b * bl + s * num_bytes_per_segment + k] = x0 * scale; out[b * bl + s * num_bytes_per_segment + k + bl/2] = x1 * scale; } } block_ptr += nr_pack * num_bytes_multiplier + num_segments * nr_pack * num_bytes_per_segment; } } static void dequantize_row_qsi4c32ps1s0scalef16( const void *packed_data, int32_t row_idx, int64_t k, float *out, size_t nr, size_t packed_row_stride, size_t kr, size_t bl, size_t num_bytes_multiplier ) { const size_t num_blocks = k / bl; const size_t bl4 = bl / INT4_PER_UINT16; size_t group_idx = row_idx / nr; size_t row_in_group = row_idx % nr; const uint8_t *packed_group = (const uint8_t *)packed_data + group_idx * packed_row_stride; const uint16_t *qdata = (const uint16_t *)packed_group; const uint16_t *scales = (const uint16_t *)(packed_group + packed_row_stride - (nr * num_blocks * num_bytes_multiplier)); for (size_t block_idx = 0; block_idx < num_blocks; ++block_idx) { uint16_t scale_f16 = scales[row_in_group + block_idx * nr]; float scale = GGML_CPU_FP16_TO_FP32(scale_f16); for (size_t bl4_idx = 0; bl4_idx < bl4; ++bl4_idx) { uint16_t q = qdata[(block_idx * bl4 + bl4_idx) * nr + row_in_group]; for (size_t qidx = 0; qidx < INT4_PER_UINT16; ++qidx) { int v = ((q >> (qidx * 4)) & 0xF) - Q4_0_ZERO_POINT; out[block_idx * bl + bl4_idx * INT4_BITS + qidx] = v * scale; } } } GGML_UNUSED(kr); } static void dequantize_row_qsi8cxp( const void *packed_data, int32_t row_idx, int64_t k, float *out, size_t nr, size_t packed_row_stride, size_t kr, size_t bl, size_t num_bytes_multiplier ) { GGML_UNUSED(bl); GGML_UNUSED(num_bytes_multiplier); const size_t k_internal = ((size_t) k + QK8_0 - 1) / QK8_0 * QK8_0; const size_t group_idx = row_idx / nr; const size_t row_in_group = row_idx % nr; const uint8_t * group_ptr = static_cast(packed_data) + group_idx * packed_row_stride; const int8_t * data_base = reinterpret_cast(group_ptr); const size_t num_blocks = k_internal / kr; for (size_t block = 0; block < num_blocks; ++block) { const int8_t * block_ptr = data_base + (block * nr + row_in_group) * kr; for (size_t i = 0; i < kr; ++i) { const size_t k_idx = block * kr + i; if (k_idx < (size_t) k) { out[k_idx] = static_cast(block_ptr[i]); } } } const uint8_t * sums_ptr = group_ptr + nr * k_internal; GGML_UNUSED(sums_ptr); const float * scale_ptr = reinterpret_cast(sums_ptr + nr * sizeof(int32_t)); const float scale = scale_ptr[row_in_group]; if (scale == 0.0f) { for (size_t i = 0; i < (size_t) k; ++i) { out[i] = 0.0f; } return; } for (size_t i = 0; i < (size_t) k; ++i) { out[i] *= scale; } } static ggml_kleidiai_kernels gemm_gemv_kernels[] = { #if defined(__ARM_FEATURE_SME) { /* SME GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32_neon, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* SME GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32_neon, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon, /* .to_float = */ dequantize_row_qsi4c32ps1s0scalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_SME, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, { /* SME GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_fn10, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_pack_bf16p2vlx2_f32_sme, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_void_fn9, }, /* SME GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa, /* .get_lhs_offset_ex = */ nullptr, /* .get_rhs_packed_offset_ex = */ nullptr, /* .run_kernel_ex = */ nullptr, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_pack_bf16p2vlx2_f32_sme, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_void_fn9, }, /* .rhs_info = */ { /* .packed_stride = */ nullptr, /* .to_float = */ nullptr, /* .packed_size_ex = */ &rhs_ps_fn2, /* .packed_stride_ex = */ &rhs_stride_fn1, /* .pack_func_ex = */ &rhs_pack_fn13, }, /* .required_cpu = */ CPU_FEATURE_SME, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_F16, /* .op_type = */ GGML_TYPE_F32, }, #endif #if defined(__APPLE__) #if defined(__ARM_FEATURE_DOTPROD) { /* DOTPROD GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* DOTPROD GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, /* .to_float = */ dequantize_row_qsi4c32pscalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #if defined(__ARM_FEATURE_MATMUL_INT8) { /* i8mm GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* i8mm GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, /* .to_float = */ dequantize_row_qsi4c32pscalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #else #if defined(__ARM_FEATURE_SVE) { /* SVE i8mm GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p8x8_16x8_sve_i8mm, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* SVE dotprod GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p8x8_1x8_sve_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, /* .to_float = */ dequantize_row_qsi4c32pscalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_SVE | CPU_FEATURE_I8MM | CPU_FEATURE_DOTPROD, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #if defined(__ARM_FEATURE_MATMUL_INT8) { /* i8mm GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p4x8sb_f32_neon, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* i8mm GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, /* .to_float = */ dequantize_row_qsi4c32pscalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, #endif // __ARM_FEATURE_MATMUL_INT8 #if defined(__ARM_FEATURE_DOTPROD) { /* DOTPROD GEMM */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* DOTPROD GEMV */ /* .kern_info = */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn3, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn3, /* .run_kernel_ex = */ &kernel_run_fn11, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn6, /* .packed_size_ex = */ &lhs_ps_fn6, /* .pack_func_ex = */ &lhs_pack_float_fn10, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, /* .to_float = */ dequantize_row_qsi4c32pscalef16, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q4_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #endif { /* Sentinel */ } }; static ggml_kleidiai_kernels gemm_gemv_kernels_q8[] = { #if defined(__ARM_FEATURE_SME) { /* SME GEMM */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* SME GEMV */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, /* .to_float = */ dequantize_row_qsi8cxp, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_scale_fn12, }, /* .required_cpu = */ CPU_FEATURE_SME, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q8_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #if defined(__ARM_FEATURE_MATMUL_INT8) { /* I8MM GEMM */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* I8MM GEMV (dotprod fallback) */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, /* .to_float = */ dequantize_row_qsi8cxp, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_scale_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q8_0, /* .op_type = */ GGML_TYPE_F32, }, #endif #if defined(__ARM_FEATURE_DOTPROD) { /* DOTPROD GEMM */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemm_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* DOTPROD GEMV */ { /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, /* .get_lhs_offset_ex = */ &kernel_offs_fn2, /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, /* .run_kernel_ex = */ &kernel_run_float_fn10, }, /* .gemv_lhs_info = */ { /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, /* .get_packed_offset_ex = */ &lhs_offs_fn5, /* .packed_size_ex = */ &lhs_ps_fn5, /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, }, /* .rhs_info = */ { /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, /* .to_float = */ dequantize_row_qsi8cxp, /* .packed_size_ex = */ &rhs_ps_fn5, /* .packed_stride_ex = */ &rhs_stride_fn4, /* .pack_func_ex = */ &rhs_pack_scale_fn12, }, /* .required_cpu = */ CPU_FEATURE_DOTPROD, /* .lhs_type = */ GGML_TYPE_F32, /* .rhs_type = */ GGML_TYPE_Q8_0, /* .op_type = */ GGML_TYPE_F32, }, #endif { /* Sentinel */ } }; ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, const ggml_tensor * tensor) { ggml_kleidiai_kernels * kernel = nullptr; if (tensor->op == GGML_OP_MUL_MAT && tensor->src[0] != nullptr && tensor->src[1] != nullptr) { #if defined(__ARM_FEATURE_SME) || \ defined(__ARM_FEATURE_DOTPROD) || \ defined(__ARM_FEATURE_MATMUL_INT8) || \ defined(__ARM_FEATURE_SVE) auto try_table = [&](auto & table) { for (size_t i = 0; i < NELEMS(table) - 1; ++i) { if ((cpu_features & table[i].required_cpu) == table[i].required_cpu && table[i].lhs_type == tensor->src[1]->type && table[i].rhs_type == tensor->src[0]->type && table[i].op_type == tensor->type) { kernel = &table[i]; return true; } } return false; }; if (tensor->src[0]->type == GGML_TYPE_Q8_0) { try_table(gemm_gemv_kernels_q8); } else { try_table(gemm_gemv_kernels); } #else GGML_UNUSED(gemm_gemv_kernels); GGML_UNUSED(gemm_gemv_kernels_q8); GGML_UNUSED(cpu_features); #endif } return kernel; } ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q4_0(cpu_feature features) { ggml_kleidiai_kernels * kernels = nullptr; #if defined(__ARM_FEATURE_SME) || \ defined(__ARM_FEATURE_DOTPROD) || \ defined(__ARM_FEATURE_MATMUL_INT8) || \ defined(__ARM_FEATURE_SVE) for (size_t i = 0; i < NELEMS(gemm_gemv_kernels) - 1; ++i) { if ((features & gemm_gemv_kernels[i].required_cpu) == gemm_gemv_kernels[i].required_cpu) { kernels = &gemm_gemv_kernels[i]; break; } } #else GGML_UNUSED(features); #endif return kernels; } ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q8_0(cpu_feature features) { ggml_kleidiai_kernels * kernels = nullptr; #if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_DOTPROD) || defined(__ARM_FEATURE_MATMUL_INT8) for (size_t i = 0; i < NELEMS(gemm_gemv_kernels_q8) - 1; ++i) { if ((features & gemm_gemv_kernels_q8[i].required_cpu) == gemm_gemv_kernels_q8[i].required_cpu) { kernels = &gemm_gemv_kernels_q8[i]; break; } } #else GGML_UNUSED(features); #endif return kernels; } ggml-org-ggml-3678254/src/ggml-cpu/kleidiai/kernels.h000066400000000000000000000060411512524704700222560ustar00rootroot00000000000000// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates // SPDX-License-Identifier: MIT // #pragma once #include "ggml.h" enum cpu_feature { CPU_FEATURE_NONE = 0, CPU_FEATURE_DOTPROD = 1, CPU_FEATURE_I8MM = 2, CPU_FEATURE_SVE = 4, CPU_FEATURE_SME = 8 }; inline cpu_feature& operator|=(cpu_feature& lhs, cpu_feature rhs) { lhs = static_cast(lhs | rhs); return lhs; } inline cpu_feature operator|(cpu_feature lhs, cpu_feature rhs) { return static_cast(static_cast(lhs) | static_cast(rhs)); } struct kernel_info { size_t (*get_m_step)(void); size_t (*get_n_step)(void); size_t (*get_mr)(void); size_t (*get_nr)(void); size_t (*get_kr)(void); size_t (*get_sr)(void); size_t (*get_dst_offset)(size_t m_idx, size_t n_idx, size_t stride); size_t (*get_dst_size)(size_t m, size_t n); size_t (*get_lhs_offset_ex)(size_t m_idx, size_t k, size_t bl); size_t (*get_rhs_packed_offset_ex)(size_t n_idx, size_t k, size_t bl); void (*run_kernel_ex)( size_t m, size_t n, size_t k, size_t bl, const void* lhs_packed, const void* rhs_packed, void* dst, size_t dst_stride_row, size_t dst_stride_col, float clamp_min, float clamp_max); }; struct lhs_packing_info { size_t (*get_offset)(size_t m_idx, size_t lhs_stride); size_t (*get_packed_offset_ex)(size_t m_idx, size_t k, size_t bl, size_t mr, size_t kr, size_t sr); size_t (*packed_size_ex)(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr); void (*pack_func_ex)(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const void * lhs, size_t lhs_stride, void * lhs_packed); }; struct rhs_packing_info { size_t (*packed_stride)(size_t k, size_t nr, size_t kr, size_t bl); void (*to_float)(const void *packed_data, int32_t row_idx, int64_t nc, float *out, size_t nr_pack, size_t packed_row_stride, size_t kr, size_t bl, size_t num_bytes_multiplier); size_t (*packed_size_ex)(size_t n, size_t k, size_t nr, size_t kr, size_t bl); size_t (*packed_stride_ex)(size_t k, size_t nr, size_t kr, size_t bl); void (*pack_func_ex)(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t bl, size_t rhs_stride, const void * rhs, const void * bias, const void * scale, void * rhs_packed, size_t extra_bytes, const void * params); }; struct ggml_kleidiai_kernels { kernel_info gemm; lhs_packing_info gemm_lhs_info; kernel_info gemv; lhs_packing_info gemv_lhs_info; rhs_packing_info rhs_info; cpu_feature required_cpu; ggml_type lhs_type; ggml_type rhs_type; ggml_type op_type; }; ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, const ggml_tensor * tensor); ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q4_0(cpu_feature features); ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q8_0(cpu_feature features); ggml-org-ggml-3678254/src/ggml-cpu/kleidiai/kleidiai.cpp000066400000000000000000000772231512524704700227330ustar00rootroot00000000000000// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates // SPDX-License-Identifier: MIT // #include #include #include #include #include #include #include #include #include #include #include #if defined(__linux__) #include #include #elif defined(__APPLE__) #include #include #include #elif defined(_WIN32) #include #include #endif #include "kleidiai.h" #include "ggml-cpu.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-threading.h" #include "traits.h" #include "kernels.h" #include "kai_common.h" #define GGML_COMMON_DECL_CPP #include "ggml-common.h" struct ggml_kleidiai_context { cpu_feature features; ggml_kleidiai_kernels * kernels_q4; ggml_kleidiai_kernels * kernels_q8; } static ctx = { CPU_FEATURE_NONE, NULL, NULL }; static const char* cpu_feature_to_string(cpu_feature f) { if (f == CPU_FEATURE_NONE) { return "NONE"; } else if ((f & CPU_FEATURE_SME) == CPU_FEATURE_SME) { return "SME"; } else if ((f & CPU_FEATURE_SVE) == CPU_FEATURE_SVE) { return "SVE"; } else if ((f & CPU_FEATURE_I8MM) == CPU_FEATURE_I8MM) { return "I8MM"; } else if ((f & CPU_FEATURE_DOTPROD) == CPU_FEATURE_DOTPROD) { return "DOTPROD"; } else { return "UNKNOWN"; } } static void init_kleidiai_context(void) { ggml_critical_section_start(); static bool initialized = false; if (!initialized) { initialized = true; const char *env_var = getenv("GGML_KLEIDIAI_SME"); int sme_enabled = 0; ctx.features = (ggml_cpu_has_dotprod() ? CPU_FEATURE_DOTPROD : CPU_FEATURE_NONE) | (ggml_cpu_has_matmul_int8() ? CPU_FEATURE_I8MM : CPU_FEATURE_NONE) | ((ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) ? CPU_FEATURE_SVE : CPU_FEATURE_NONE); if (env_var) { sme_enabled = atoi(env_var); } if (sme_enabled != 0) { ctx.features |= ggml_cpu_has_sme() ? CPU_FEATURE_SME : CPU_FEATURE_NONE; } ctx.kernels_q4 = ggml_kleidiai_select_kernels_q4_0(ctx.features); ctx.kernels_q8 = ggml_kleidiai_select_kernels_q8_0(ctx.features); #ifndef NDEBUG if (ctx.kernels_q4) { GGML_LOG_DEBUG("kleidiai: using q4 kernel with CPU feature %s\n", cpu_feature_to_string(ctx.kernels_q4->required_cpu)); } if (ctx.kernels_q8) { GGML_LOG_DEBUG("kleidiai: using q8 kernel with CPU feature %s\n", cpu_feature_to_string(ctx.kernels_q8->required_cpu)); } #endif } ggml_critical_section_end(); } static inline int64_t ggml_ne(const ggml_tensor * tensor, int dim) { GGML_ASSERT(dim >= 0 && dim < GGML_MAX_DIMS); return tensor->ne[dim]; } namespace ggml::cpu::kleidiai { static size_t round_down(size_t x, size_t y) { return y == 0 ? x : x - (x % y); } static void transpose_f32kxn_f16nxk(size_t n, size_t k, float * dst, const uint16_t * src, size_t rhs_stride) { size_t src_stride = rhs_stride / sizeof(uint16_t); size_t dst_stride = n; for (size_t k_idx = 0; k_idx < k; ++k_idx) { for (size_t n_idx = 0; n_idx < n; ++n_idx) { uint16_t v = *(src + k_idx + n_idx * src_stride); *(dst + n_idx + k_idx * dst_stride) = kai_cast_f32_f16(v); } } } class tensor_traits : public ggml::cpu::tensor_traits { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { if (op->op != GGML_OP_MUL_MAT) { return false; } ggml_kleidiai_kernels *kernels = ggml_kleidiai_select_kernels(ctx.features, op); if (!kernels) { return false; } bool is_gemv = op->src[1]->ne[1] == 1; kernel_info * kernel = is_gemv ? &kernels->gemv : &kernels->gemm; lhs_packing_info * lhs_info = is_gemv ? &kernels->gemv_lhs_info : &kernels->gemm_lhs_info; size_t k = op->src[0]->ne[0]; size_t n = op->src[0]->ne[1]; size_t m = op->src[1]->ne[1]; size_t mr = kernel->get_mr(); size_t kr = kernel->get_kr(); size_t sr = kernel->get_sr(); if (kernels->rhs_type == GGML_TYPE_Q4_0) { if (!lhs_info->packed_size_ex) return false; size = lhs_info->packed_size_ex(m, k, QK4_0, mr, kr, sr); } else if (kernels->rhs_type == GGML_TYPE_Q8_0) { if (!lhs_info->packed_size_ex) return false; size = lhs_info->packed_size_ex(m, k, QK8_0, mr, kr, sr); } else if (kernels->rhs_type == GGML_TYPE_F16) { if (!lhs_info->packed_size_ex || !kernels->rhs_info.packed_size_ex) return false; const int64_t lhs_batch_size0 = op->src[1]->ne[2]; const int64_t rhs_batch_size0 = op->src[0]->ne[2]; const int64_t r = lhs_batch_size0 / rhs_batch_size0; size = lhs_info->packed_size_ex(m * r, k, 0, mr, kr, sr) + kernels->rhs_info.packed_size_ex(n, k, kernel->get_nr(), kernel->get_kr(), 0) + k * n * sizeof(float) + n * sizeof(float); } else { return false; } return true; } bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * dst) override { if (dst->op == GGML_OP_MUL_MAT) { if (dst->src[0]->type == GGML_TYPE_Q4_0) { return compute_forward_q4_0(params, dst); } else if (dst->src[0]->type == GGML_TYPE_Q8_0) { return compute_forward_q8_0(params, dst); } else if (dst->src[0]->type == GGML_TYPE_F16) { return compute_forward_fp16(params, dst); } } else if (dst->op == GGML_OP_GET_ROWS) { if (dst->src[0]->type == GGML_TYPE_Q4_0 || dst->src[0]->type == GGML_TYPE_Q8_0) { return compute_forward_get_rows(params, dst); } } return false; } bool compute_forward_fp16(ggml_compute_params * params, struct ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS ggml_kleidiai_kernels *kernels = ggml_kleidiai_select_kernels(ctx.features, dst); if (!kernels) { return false; } const bool is_gemv = src1->ne[1] == 1; kernel_info * kernel = is_gemv ? &kernels->gemv : &kernels->gemm; lhs_packing_info * lhs_info = is_gemv ? &kernels->gemv_lhs_info : &kernels->gemm_lhs_info; GGML_ASSERT(kernel); if (!kernels->rhs_info.pack_func_ex || !kernel->get_lhs_offset_ex || !kernel->get_rhs_packed_offset_ex || !kernel->run_kernel_ex) { return false; } const int nth = params->nth; const int ith = params->ith; const int64_t lhs_batch_size0 = ne12; const int64_t rhs_batch_size0 = ne02; const int64_t batch_size = lhs_batch_size0; GGML_ASSERT(rhs_batch_size0 > 0); GGML_ASSERT(lhs_batch_size0 % rhs_batch_size0 == 0); const int64_t r = lhs_batch_size0 / rhs_batch_size0; const int64_t m_group = ne11; const int64_t m = m_group; const int64_t n = ne01; const int64_t k = ne00; const size_t lhs_stride = src1->nb[1]; const size_t rhs_stride = src0->nb[1]; const size_t dst_stride = dst->nb[1]; const int64_t mr = (int64_t) kernel->get_mr(); const int64_t nr = (int64_t) kernel->get_nr(); const int64_t kr = (int64_t) kernel->get_kr(); const int64_t sr = (int64_t) kernel->get_sr(); const size_t lhs_packed_size = lhs_info->packed_size_ex(m, k, 0, mr, kr, sr); const size_t rhs_packed_size = kernels->rhs_info.packed_size_ex(n, k, nr, kr, 0); const size_t kxn_size = k * n * sizeof(float); const size_t bias_size = n * sizeof(float); const size_t wsize_required = lhs_packed_size + rhs_packed_size + kxn_size + bias_size; GGML_ASSERT(wsize_required <= params->wsize); uint8_t * lhs_packed = static_cast(params->wdata); uint8_t * rhs_packed = lhs_packed + lhs_packed_size; uint8_t * rhs_kxn = rhs_packed + rhs_packed_size; uint8_t * bias = rhs_kxn + kxn_size; for (int64_t batch_idx = 0; batch_idx < batch_size; ++batch_idx) { const int64_t rhs_batch_idx = batch_idx / r; const uint8_t * rhs_batch_base = static_cast(src0->data) + rhs_batch_idx * src0->nb[2]; uint8_t * dst_batch_base = static_cast(dst->data) + batch_idx * dst->nb[2]; // LHS packing (threaded over m, honoring mr alignment and KV groups) { const int64_t m_roundup_mr = kai_roundup(m, mr); const int64_t num_threads = KAI_MIN(m_roundup_mr / mr, nth); if (ith < num_threads) { const int64_t num_m_per_thread0 = round_down((size_t)(m_roundup_mr / num_threads), (size_t)mr); const int64_t num_m_per_threadN_1 = m - (num_threads - 1) * num_m_per_thread0; const int64_t m_start = ith * num_m_per_thread0; const int64_t m_count = (ith == num_threads - 1) ? num_m_per_threadN_1 : num_m_per_thread0; // Base packed offset (aligned) and per-row stride in bytes const size_t base_packed_off = lhs_info->get_packed_offset_ex(m_start, k, 0, mr, kr, sr); const size_t next_block_off = lhs_info->get_packed_offset_ex(m_start + mr, k, 0, mr, kr, sr); const size_t row_stride_bytes = (next_block_off - base_packed_off) / (size_t)mr; int64_t remaining = m_count; int64_t cur = m_start; while (remaining > 0) { const int64_t row_in_group = cur; const int64_t avail = m_group - row_in_group; const int64_t take = std::min(avail, remaining); const uint8_t * lhs_batch_base = static_cast(src1->data) + batch_idx * src1->nb[2]; const void * src_ptr = lhs_batch_base + (size_t)row_in_group * lhs_stride; const size_t dst_off = base_packed_off + (size_t)(cur - m_start) * row_stride_bytes; void * dst_ptr = lhs_packed + dst_off; lhs_info->pack_func_ex(take, k, 0, mr, kr, sr, 0, src_ptr, lhs_stride, dst_ptr); cur += take; remaining -= take; } } } // RHS packing (single thread), then synchronize if (ith == 0) { memset(bias, 0, (size_t)n * sizeof(float)); transpose_f32kxn_f16nxk((size_t)n, (size_t)k, reinterpret_cast(rhs_kxn), reinterpret_cast(rhs_batch_base), rhs_stride); kernels->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, 0, n * sizeof(float), rhs_kxn, bias, nullptr, rhs_packed, 0, nullptr); } ggml_barrier(params->threadpool); // Matmul (threaded over n) { const int64_t n_step = (int64_t) kernel->get_n_step(); int64_t num_threads_n = KAI_MIN(n / n_step, nth); if (num_threads_n <= 0) { num_threads_n = 1; } if (ith < num_threads_n) { const int64_t num_n_per_thread0 = round_down((size_t)(n / num_threads_n), (size_t)n_step); const int64_t num_n_per_threadN_1 = n - (num_threads_n - 1) * num_n_per_thread0; const int64_t n_start = ith * num_n_per_thread0; const int64_t n_to_process = (ith == num_threads_n - 1) ? num_n_per_threadN_1 : num_n_per_thread0; // LHS packed base at row 0 (consistent with packing above) const size_t lhs_packed_offset0 = lhs_info->get_packed_offset_ex(0, k, 0, mr, kr, sr); const size_t rhs_packed_offset = kernel->get_rhs_packed_offset_ex(n_start, k, 0); const size_t dst_offset = kernel->get_dst_offset((size_t)0, (size_t)n_start, dst_stride); const void * lhs_ptr = lhs_packed + lhs_packed_offset0; const void * rhs_ptr = rhs_packed + rhs_packed_offset; float * dst_ptr = reinterpret_cast(dst_batch_base + dst_offset); kernel->run_kernel_ex(m, n_to_process, k, 0, lhs_ptr, rhs_ptr, dst_ptr, dst_stride, sizeof(float), -FLT_MAX, FLT_MAX); } } if (batch_idx != batch_size - 1) { ggml_barrier(params->threadpool); } } return true; } bool compute_forward_q4_0(struct ggml_compute_params * params, struct ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_Q4_0); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS ggml_kleidiai_kernels *kernels = ggml_kleidiai_select_kernels(ctx.features, dst); if (!kernels) { return false; } bool is_gemv = src1->ne[1] == 1; kernel_info * kernel = is_gemv ? &kernels->gemv : &kernels->gemm; lhs_packing_info * lhs_info = is_gemv ? &kernels->gemv_lhs_info : &kernels->gemm_lhs_info; GGML_ASSERT(kernel); if (!lhs_info->get_packed_offset_ex || !lhs_info->pack_func_ex || !kernel->get_rhs_packed_offset_ex || !kernel->run_kernel_ex || !kernel->get_dst_offset) { return false; } const int ith = params->ith; const int nth_raw = params->nth; const int nth = nth_raw > 0 ? nth_raw : 1; const size_t k = ne00; const size_t m = ne11; const size_t n = ne01; size_t mr = kernel->get_mr(); size_t kr = kernel->get_kr(); size_t sr = kernel->get_sr(); const uint8_t * lhs = static_cast(src1->data); uint8_t * lhs_packed = (uint8_t*)params->wdata; const uint8_t * rhs_packed = static_cast(src0->data); const size_t n_step = kernel->get_n_step(); const size_t num_n_per_thread = kai_roundup(kai_roundup(n, nth) / nth, n_step); const size_t n_start = ith * num_n_per_thread; size_t n_to_process = 0; if (n_start < n) { n_to_process = num_n_per_thread; if ((n_start + n_to_process) > n) { n_to_process = n - n_start; } } // Calculate number of columns to be processed per thread const size_t num_m_per_thread = kai_roundup(m, mr * nth) / nth; const size_t m_start = ith * num_m_per_thread; size_t m_to_process = num_m_per_thread; if ((m_start + m_to_process) > m) { m_to_process = m - m_start; } if (m_start < m) { // Transform LHS const size_t src_stride = src1->nb[1]; const float * src_ptr = reinterpret_cast(lhs + lhs_info->get_offset(m_start, dst->src[1]->nb[1])); const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(m_start, k, QK4_0, mr, kr, sr); void * lhs_packed_ptr = static_cast(lhs_packed + lhs_packed_offset); // Pack this thread's chunk with m_idx_start = 0 and per-thread output pointer lhs_info->pack_func_ex(m_to_process, k, QK4_0, mr, kr, sr, 0, src_ptr, src_stride, lhs_packed_ptr); } ggml_barrier(params->threadpool); // Perform the operation const size_t dst_stride = dst->nb[1]; const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(0, k, QK4_0, mr, kr, sr); const size_t rhs_packed_offset = kernel->get_rhs_packed_offset_ex(n_start, k, QK4_0); const size_t dst_offset = kernel->get_dst_offset(0, n_start, dst_stride); const void * rhs_ptr = static_cast(rhs_packed + rhs_packed_offset); const void* lhs_ptr = (const void*)((const char *)lhs_packed + lhs_packed_offset); float *dst_ptr = reinterpret_cast(static_cast(dst->data) + dst_offset); if (n_to_process > 0) { kernel->run_kernel_ex(m, n_to_process, k, QK4_0, lhs_ptr, rhs_ptr, dst_ptr, dst_stride, sizeof(float), -FLT_MAX, FLT_MAX); } return true; } bool compute_forward_q8_0(struct ggml_compute_params * params, struct ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_Q8_0); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS ggml_kleidiai_kernels *kernels = ggml_kleidiai_select_kernels(ctx.features, dst); if (!kernels) { return false; } bool is_gemv = src1->ne[1] == 1; kernel_info * kernel = is_gemv ? &kernels->gemv : &kernels->gemm; lhs_packing_info * lhs_info = is_gemv ? &kernels->gemv_lhs_info : &kernels->gemm_lhs_info; if (!kernel || !lhs_info->get_packed_offset_ex || !lhs_info->pack_func_ex || !kernel->get_rhs_packed_offset_ex || !kernel->run_kernel_ex || !kernel->get_dst_offset) { return false; } const int ith = params->ith; const int nth_raw = params->nth; const int nth = nth_raw > 0 ? nth_raw : 1; const size_t k = ne00; const size_t m = ne11; const size_t n = ne01; size_t mr = kernel->get_mr(); size_t kr = kernel->get_kr(); size_t sr = kernel->get_sr(); const uint8_t * lhs = static_cast(src1->data); uint8_t * lhs_packed = static_cast(params->wdata); const uint8_t * rhs_packed = static_cast(src0->data); const size_t n_step = kernel->get_n_step(); const size_t num_n_per_thread = kai_roundup(kai_roundup(n, nth) / nth, n_step); const size_t n_start = ith * num_n_per_thread; size_t n_to_process = 0; if (n_start < n) { n_to_process = num_n_per_thread; if ((n_start + n_to_process) > n) { n_to_process = n - n_start; } } const size_t num_m_per_thread = kai_roundup(m, mr * nth) / nth; const size_t m_start = ith * num_m_per_thread; size_t m_to_process = num_m_per_thread; if ((m_start + m_to_process) > m) { m_to_process = m - m_start; } if (m_start < m) { const size_t src_stride = src1->nb[1]; const float * src_ptr = reinterpret_cast(lhs + lhs_info->get_offset(m_start, dst->src[1]->nb[1])); const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(m_start, k, 0, mr, kr, sr); void * lhs_packed_ptr = static_cast(lhs_packed + lhs_packed_offset); lhs_info->pack_func_ex(m_to_process, k, 0, mr, kr, sr, 0, src_ptr, src_stride, lhs_packed_ptr); } ggml_barrier(params->threadpool); const size_t dst_stride = dst->nb[1]; const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(0, k, 0, mr, kr, sr); const size_t rhs_packed_offset = kernel->get_rhs_packed_offset_ex(n_start, k, 0); const size_t dst_offset = kernel->get_dst_offset(0, n_start, dst_stride); const void * rhs_ptr = static_cast(rhs_packed + rhs_packed_offset); const void * lhs_ptr = static_cast(lhs_packed + lhs_packed_offset); float * dst_ptr = reinterpret_cast(static_cast(dst->data) + dst_offset); if (n_to_process > 0) { kernel->run_kernel_ex(m, n_to_process, k, 0, lhs_ptr, rhs_ptr, dst_ptr, dst_stride, sizeof(float), -FLT_MAX, FLT_MAX); } return true; } bool compute_forward_get_rows(struct ggml_compute_params * params, struct ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS ggml_kleidiai_kernels * kernels = nullptr; size_t block_len = 0; size_t num_bytes_multiplier = 0; if (dst->src[0]->type == GGML_TYPE_Q4_0) { if (!ctx.kernels_q4) { return false; } kernels = ctx.kernels_q4; block_len = QK4_0; num_bytes_multiplier = sizeof(uint16_t); } else if (dst->src[0]->type == GGML_TYPE_Q8_0) { if (!ctx.kernels_q8) { return false; } kernels = ctx.kernels_q8; block_len = QK8_0; num_bytes_multiplier = sizeof(float); } else { return false; } rhs_packing_info * rhs_info = &kernels->rhs_info; kernel_info * kernel = &kernels->gemm; if (!rhs_info->to_float || !kernel->get_nr) { return false; } const int64_t nc = ne00; const int64_t nr = ggml_nelements(src1); const size_t block_rows = kernel->get_nr(); const size_t kr = kernel->get_kr(); const size_t packed_stride = rhs_info->packed_stride(nc, block_rows, kr, block_len); const int ith = params->ith; const int nth = params->nth; const int dr = (nr + nth - 1) / nth; const int ir0 = dr * ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t i = ir0; i < ir1; ++i) { GGML_ASSERT(src1->type == GGML_TYPE_I32); int64_t row_idx = ((const int32_t *)src1->data)[i]; GGML_ASSERT(row_idx >= 0 && row_idx < src0->ne[1]); float *out = (float *)((char *)dst->data + i * nb1); rhs_info->to_float(src0->data, row_idx, nc, out, block_rows, packed_stride, kr, block_len, num_bytes_multiplier); } return true; } public: int repack(struct ggml_tensor * tensor, const void * data, size_t data_size) { const size_t n = tensor->ne[1]; const size_t k = tensor->ne[0]; if (tensor->type == GGML_TYPE_Q4_0) { if (!ctx.kernels_q4) { return -1; } size_t nr = ctx.kernels_q4->gemm.get_nr(); size_t kr = ctx.kernels_q4->gemm.get_kr(); size_t sr = ctx.kernels_q4->gemm.get_sr(); struct kai_rhs_pack_qs4cxs1s0_param params; params.lhs_zero_point = 1; params.rhs_zero_point = 8; ctx.kernels_q4->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, QK4_0, 0, static_cast(data), nullptr, nullptr, tensor->data, 0, ¶ms); GGML_UNUSED(data_size); return 0; } else if (tensor->type == GGML_TYPE_Q8_0) { if (!ctx.kernels_q8) { return -1; } const size_t row_stride = tensor->nb[1]; const size_t k_blocks = (k + QK8_0 - 1) / QK8_0; std::vector qdata(n * k, 0); std::vector scales(n, 0.0f); for (size_t row = 0; row < n; ++row) { const auto * row_blocks = reinterpret_cast( static_cast(data) + row * row_stride); float max_abs = 0.0f; for (size_t block = 0; block < k_blocks; ++block) { const block_q8_0 & blk = row_blocks[block]; const float d = GGML_FP16_TO_FP32(blk.d); for (size_t l = 0; l < QK8_0; ++l) { const size_t linear_idx = block * QK8_0 + l; if (linear_idx >= k) { break; } const float value = d * blk.qs[l]; max_abs = std::max(max_abs, std::fabs(value)); } } float scale = max_abs > 0.0f ? max_abs / 127.0f : 0.0f; scales[row] = scale; const float inv_scale = scale > 0.0f ? 1.0f / scale : 0.0f; for (size_t block = 0; block < k_blocks; ++block) { const block_q8_0 & blk = row_blocks[block]; const float d = GGML_FP16_TO_FP32(blk.d); for (size_t l = 0; l < QK8_0; ++l) { const size_t linear_idx = block * QK8_0 + l; if (linear_idx >= k) { break; } const float value = d * blk.qs[l]; int32_t q = scale > 0.0f ? static_cast(std::lround(value * inv_scale)) : 0; q = std::clamp(q, -127, 127); qdata[row * k + linear_idx] = static_cast(q); } } } size_t nr = ctx.kernels_q8->gemm.get_nr(); size_t kr = ctx.kernels_q8->gemm.get_kr(); size_t sr = ctx.kernels_q8->gemm.get_sr(); struct kai_rhs_pack_qsi8cx_params params; params.lhs_zero_point = 1; params.scale_multiplier = 1.0f; ctx.kernels_q8->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, 0, 0, qdata.data(), nullptr, scales.data(), tensor->data, 0, ¶ms); GGML_UNUSED(data_size); return 0; } GGML_UNUSED(data_size); return -1; } }; static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { static tensor_traits traits; return &traits; } } // namespace ggml::cpu::kleidiai static enum ggml_status ggml_backend_cpu_kleidiai_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { tensor->extra = (void *) ggml::cpu::kleidiai::get_tensor_traits(buffer, tensor); return GGML_STATUS_SUCCESS; GGML_UNUSED(buffer); } static void ggml_backend_cpu_kleidiai_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); auto tensor_traits = (ggml::cpu::kleidiai::tensor_traits *) tensor->extra; auto OK = tensor_traits->repack(tensor, data, size); GGML_ASSERT(OK == 0); GGML_UNUSED(buffer); } static const char * ggml_backend_cpu_kleidiai_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU_KLEIDIAI"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); if (buffer == nullptr) { return nullptr; } buffer->buft = buft; buffer->iface.init_tensor = ggml_backend_cpu_kleidiai_buffer_init_tensor; buffer->iface.set_tensor = ggml_backend_cpu_kleidiai_buffer_set_tensor; buffer->iface.get_tensor = nullptr; buffer->iface.cpy_tensor = nullptr; return buffer; } static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return TENSOR_ALIGNMENT; GGML_UNUSED(buft); } static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { GGML_UNUSED(buft); const size_t n = tensor->ne[1]; const size_t k = tensor->ne[0]; ggml_kleidiai_kernels * kernels = nullptr; size_t block_len = 0; if (tensor->type == GGML_TYPE_Q4_0) { GGML_ASSERT(ctx.kernels_q4); kernels = ctx.kernels_q4; block_len = QK4_0; } else if (tensor->type == GGML_TYPE_Q8_0) { GGML_ASSERT(ctx.kernels_q8); kernels = ctx.kernels_q8; block_len = QK8_0; } else { return 0; } const size_t nr = kernels->gemm.get_nr(); const size_t kr = kernels->gemm.get_kr(); const size_t packed = kernels->rhs_info.packed_size_ex(n, k, nr, kr, block_len); const size_t raw = ggml_nbytes(tensor); return packed > raw ? packed : raw; } namespace ggml::cpu::kleidiai { class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if ((op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_GET_ROWS) && (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_Q8_0) && op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 2) && op->src[0]->buffer->buft == ggml_backend_cpu_kleidiai_buffer_type()) { if (((op->src[0]->type == GGML_TYPE_Q4_0) ? ctx.kernels_q4 : ctx.kernels_q8) == nullptr) { return false; } if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } if ((op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_I32) && ggml_ne(op->src[1], 2) == 1 && ggml_ne(op->src[1], 3) == 1) { return true; } } return false; } ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_GET_ROWS) { if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_kleidiai_buffer_type()) { return (ggml::cpu::tensor_traits *) op->src[0]->extra; } else if (ggml_kleidiai_select_kernels(ctx.features, op) && op->src[1]->ne[1] > 1) { if ((op->src[0]->nb[1] * op->src[0]->ne[1] != op->src[0]->nb[2]) || (op->src[1]->nb[1] * op->src[1]->ne[1] != op->src[1]->nb[2])) { return nullptr; } return ggml::cpu::kleidiai::get_tensor_traits(NULL, NULL); } } return nullptr; } }; } // namespace ggml::cpu::kleidiai ggml_backend_buffer_type_t ggml_backend_cpu_kleidiai_buffer_type(void) { static ggml::cpu::kleidiai::extra_buffer_type ctx; static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_kleidiai = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_kleidiai_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_kleidiai_buffer_type_get_alignment, /* .get_max_size = */ nullptr, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size, /* .is_host = */ nullptr, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ &ctx, }; init_kleidiai_context(); return &ggml_backend_cpu_buffer_type_kleidiai; } ggml-org-ggml-3678254/src/ggml-cpu/kleidiai/kleidiai.h000066400000000000000000000005031512524704700223630ustar00rootroot00000000000000// SPDX-FileCopyrightText: Copyright 2025 Arm Limited and/or its affiliates // SPDX-License-Identifier: MIT // #pragma once #include "ggml-alloc.h" #ifdef __cplusplus extern "C" { #endif ggml_backend_buffer_type_t ggml_backend_cpu_kleidiai_buffer_type(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/llamafile/000077500000000000000000000000001512524704700206145ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/llamafile/sgemm-ppc.h000066400000000000000000000355741512524704700226730ustar00rootroot00000000000000#pragma once typedef vector unsigned char vec_t; typedef __vector_quad acc_t; template class tinyBLAS_Q0_PPC { public: tinyBLAS_Q0_PPC(int64_t k, const TA *A, int64_t lda, const block_q8_0 *B, int64_t ldb, float *C, int64_t ldc, int ith, int nth); void matmul(int64_t m, int64_t n); void matmul_tiled_q0(int64_t m, int64_t n, int64_t mc, int64_t nc, int64_t kc) { vec_t A_pack[mc*kc*2]; vec_t B_pack[nc*kc*2]; int comparray[mc*kc]; constexpr bool is_Ablock_q4 = std::is_same_v; int64_t ytiles = m / mc; int64_t xtiles = n / nc; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) { end = tiles; } for (int64_t job = start; job < end; ++job) { int64_t ii = (job / xtiles) * mc; int64_t jj = (job % xtiles) * nc; for (int64_t kk = 0; kk < k; kk += kc) { if constexpr(is_Ablock_q4) { packNormalInt4_large(A + ii*lda + kk, lda, mc, 4, (int8_t*)A_pack, comparray); } else { packNormal_large(A + ii*lda + kk, lda, mc, 8, (int8_t*)A_pack, false, comparray); } packNormal_large(B + jj*ldb + kk, ldb, nc, 8, (uint8_t*)B_pack, true); KERNEL_Q0(ii, jj, mc, nc, kc, kk, A_pack, B_pack, comparray); } } } private: inline void save_res(int ii, int jj, int idx, vector float* fin_res, int RM=4, int RN=4) { for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J); } } } inline void add_save_res(int ii, int jj, int idx, vector float* fin_res, int RM=4, int RN=4) { for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { float * c_ptr = (float *)(C+ii+((jj+J)*ldc)+I); *c_ptr += *((float*)&fin_res[idx+I]+J); } } } template inline void compute(acc_t* ACC, int c_idx, int s_idx, ArrayType& comparray, vector float* vs, vector float* fin_res) { vector signed int vec_C[4]; vector float CA[4] = {0}; vector float res[4] = {0}; __builtin_mma_disassemble_acc(vec_C, ACC); for (int i = 0; i < 4; i++) { CA[i] = vec_splats((float)(((double)comparray[c_idx+i]) * -128.0)); res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]); fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]); } } inline void process_q4_elements(vector signed char (&c)[2], int* ca) { const vector signed char lowMask = vec_splats((signed char)0xF); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector signed char v8 = vec_splats((signed char)0x8); vector signed int vsum = {0}; vector signed int vsum2 = {0}; c[0] = vec_and(c[1], lowMask); c[1] = vec_sr(c[1], v4); c[0] = vec_sub(c[0], v8); c[1] = vec_sub(c[1], v8); vsum = vec_sum4s(c[0], vsum); vsum2 = vec_sum4s(c[1], vsum2); vsum = vec_add(vsum, vsum2); *(ca) = vsum[0] + vsum[1] + vsum[2] + vsum[3]; } template inline void vector_permute_store(V2 &s1, V2 &s2, V2 &s3, V2 &s4, V1 *vecOffset, bool flip) { vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}; vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}; V2 t1, t2, t3, t4, t5, t6, t7, t8; vector unsigned char xor_vector; uint8_t flip_vec = 0x80; xor_vector = vec_splats(flip_vec); t1 = vec_perm(s1, s2, swiz1); t2 = vec_perm(s1, s2, swiz2); t3 = vec_perm(s3, s4, swiz1); t4 = vec_perm(s3, s4, swiz2); t5 = vec_perm(t1, t3, swiz3); t6 = vec_perm(t1, t3, swiz4); t7 = vec_perm(t2, t4, swiz3); t8 = vec_perm(t2, t4, swiz4); if (flip == true) { t5 = vec_xor(t5, xor_vector); t6 = vec_xor(t6, xor_vector); t7 = vec_xor(t7, xor_vector); t8 = vec_xor(t8, xor_vector); } vec_xst(t5, 0, vecOffset); vec_xst(t6, 0, vecOffset+16); vec_xst(t7, 0, vecOffset+32); vec_xst(t8, 0, vecOffset+48); } template inline void kernel(int64_t ii, int64_t jj) { if constexpr(RM == 4 && RN == 8) { KERNEL_4x8(ii,jj); } else if constexpr(RM == 8 && RN == 4) { KERNEL_8x4(ii,jj); } else if constexpr(RM == 8 && RN == 8) { KERNEL_8x8(ii,jj); } else { assert(false && "RN/RM values not supported"); } } template void packNormalInt4(const TA* a, int64_t lda, int rows, int cols, int8_t* vec, std::array& comparray); template void packNormal(const block_q8_0* a, int64_t lda, int rows, int cols, VA* vec, bool flip); void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n); void KERNEL_4x8(int64_t ii, int64_t jj); void KERNEL_8x4(int64_t ii, int64_t jj); void KERNEL_8x8(int64_t ii, int64_t jj); void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN); template void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n); void compute_scale(int64_t ii, int64_t jj, int blk, vector float* vs){ for (int I = 0; I<8; I++) { float a_scale = unhalf((A+((ii+I)*lda)+blk)->d); for (int J = 0; J<4; J++) { *((float*)&vs[I]+J) = (a_scale * unhalf((B+((jj+J)*ldb)+blk)->d)); *((float*)&vs[I+8]+J) = (a_scale * unhalf((B+((jj+J+4)*ldb)+blk)->d)); } } } inline void process_q8_elements(const int8_t *qs, int *ca) { vector signed char c1 = vec_xl(0, qs); vector signed char c2 = vec_xl(16, qs); vector signed int vsum1 = {0}; vector signed int vsum2 = {0}; vsum1 = vec_sum4s(c1, vsum1); vsum2 = vec_sum4s(c2, vsum2); vector signed int vsum = vec_add(vsum1, vsum2); *ca = vsum[0] + vsum[1] + vsum[2] + vsum[3]; } template void packNormal_large(const block_q8_0* a, int64_t lda, int rows, int cols, VA* vec, bool flip, int* comparray=nullptr) { int64_t i, j; block_q8_0 *aoffset = NULL; VA *vecOffset = NULL; block_q8_0* aoffsets[8]; __vector_pair arr[8]; VB c[8][2] = {0}; VB c1[8] = {0}; VB c2[8] = {0}; aoffset = const_cast(a); vecOffset = vec; j = (rows >> 3); int index = 0; if (j > 0) { do { for (int it = 0; it < 8; it++) aoffsets[it] = aoffset + it*lda; aoffset += 8 * lda; for (int blk = 0; blk < kc; blk++) { for (int it = 0; it < 8; it++) { arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)(aoffsets[it]+blk)->qs); __builtin_vsx_disassemble_pair(c[it], &arr[it]); c1[it] = c[it][0]; c2[it] = c[it][1]; if (comparray){ process_q8_elements((aoffsets[it]+ blk)->qs, &comparray[index + 8*blk + it]); } } vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); vector_permute_store(c1[4], c1[5], c1[6], c1[7], vecOffset+128, flip); vector_permute_store(c2[4], c2[5], c2[6], c2[7], vecOffset+192, flip); vecOffset += 256; } j--; index += 8*kc; } while(j > 0); } } void packNormalInt4_large(const TA* a, int64_t lda, int rows, int cols, int8_t* vec, int*comparray) { int64_t i, j; TA *aoffset = NULL; int8_t *vecOffset = NULL; TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; vector signed char c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; vector signed char c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; aoffset = const_cast(a); vecOffset = vec; int index = 0; j = (rows >> 3); if (j > 0) { do { aoffset1 = aoffset; aoffset2 = aoffset1 + lda; aoffset3 = aoffset2 + lda; aoffset4 = aoffset3 + lda; aoffset5 = aoffset4 + lda; aoffset6 = aoffset5 + lda; aoffset7 = aoffset6 + lda; aoffset8 = aoffset7 + lda; aoffset += 8 * lda; for (int blk = 0; blk < kc; blk++) { c1[1] = reinterpret_cast(vec_xl(0, (aoffset1+blk)->qs)); c2[1] = reinterpret_cast(vec_xl(0, (aoffset2+blk)->qs)); c3[1] = reinterpret_cast(vec_xl(0, (aoffset3+blk)->qs)); c4[1] = reinterpret_cast(vec_xl(0, (aoffset4+blk)->qs)); c5[1] = reinterpret_cast(vec_xl(0, (aoffset5+blk)->qs)); c6[1] = reinterpret_cast(vec_xl(0, (aoffset6+blk)->qs)); c7[1] = reinterpret_cast(vec_xl(0, (aoffset7+blk)->qs)); c8[1] = reinterpret_cast(vec_xl(0, (aoffset8+blk)->qs)); process_q4_elements(c1, &comparray[index + 8*blk+0]); process_q4_elements(c2, &comparray[index + 8*blk+1]); process_q4_elements(c3, &comparray[index + 8*blk+2]); process_q4_elements(c4, &comparray[index + 8*blk+3]); process_q4_elements(c5, &comparray[index + 8*blk+4]); process_q4_elements(c6, &comparray[index + 8*blk+5]); process_q4_elements(c7, &comparray[index + 8*blk+6]); process_q4_elements(c8, &comparray[index + 8*blk+7]); vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); vector_permute_store(c5[0], c6[0], c7[0], c8[0], vecOffset+128, false); vector_permute_store(c5[1], c6[1], c7[1], c8[1], vecOffset+192, false); vecOffset += 256; } j--; index += 8*kc; } while (j > 0); } } void KERNEL_Q0(int64_t ii, int64_t jj, int64_t mc, int64_t nc, int64_t kc, int64_t l, vec_t *vec_A, vec_t *vec_B, int *comparray) { acc_t acc[8]; for (int i = 0; i < mc ; i += 8) { for (int j = 0; j < nc; j += 8) { vector float fin_res[16] = {0}; vector float vs[16] = {0}; for (int64_t kk = 0; kk < kc; kk+=2) { for (int x = 0; x < 8; x++) { __builtin_mma_xxsetaccz(&acc[x]); } int A_block_idx = (i/8)*(16*kc) + kk*16; int B_block_idx = (j/8)*(16*kc)+ kk*16; vec_t *A_block = &vec_A[A_block_idx]; vec_t *B_block = &vec_B[B_block_idx]; for (int x = 0; x < 8; x++) { __builtin_mma_xvi8ger4pp(&acc[0], A_block[x], B_block[x]); __builtin_mma_xvi8ger4pp(&acc[1], A_block[x + 8], B_block[x]); __builtin_mma_xvi8ger4pp(&acc[2], A_block[x], B_block[x+8]); __builtin_mma_xvi8ger4pp(&acc[3], A_block[x+8], B_block[x+8]); } compute_scale(ii+i, jj+j, l+kk, vs); int c_index = (i/8)*(8*kc)+ kk*8; int* c_block = &comparray[c_index]; compute(&acc[0], 0, 0, c_block, vs, fin_res); compute(&acc[1], 4, 4, c_block, vs, fin_res); compute(&acc[2], 0, 8, c_block, vs, fin_res); compute(&acc[3], 4, 12, c_block, vs, fin_res); A_block_idx = (i/8)*(16*kc) + (kk+1)*16; B_block_idx = (j/8)*(16*kc)+ (kk+1)*16; A_block = &vec_A[A_block_idx]; B_block = &vec_B[B_block_idx]; for (int x = 0; x < 8; x++) { __builtin_mma_xvi8ger4pp(&acc[4], A_block[x], B_block[x]); __builtin_mma_xvi8ger4pp(&acc[5], A_block[x + 8], B_block[x]); __builtin_mma_xvi8ger4pp(&acc[6], A_block[x], B_block[x+8]); __builtin_mma_xvi8ger4pp(&acc[7], A_block[x+8], B_block[x+8]); } compute_scale(ii+i, jj+j, l+kk+1, vs); c_index = (i/8)*(8*kc)+ (kk+1)*8; c_block = &comparray[c_index]; compute(&acc[4], 0, 0, c_block, vs, fin_res); compute(&acc[5], 4, 4, c_block, vs, fin_res); compute(&acc[6], 0, 8, c_block, vs, fin_res); compute(&acc[7], 4, 12, c_block, vs, fin_res); } if (l == 0) { save_res(ii+i, jj+j, 0, fin_res); save_res(ii+i+4, jj+j, 4, fin_res); save_res(ii+i, jj+j+4, 8, fin_res); save_res(ii+i+4, jj+j+4, 12, fin_res); } else { add_save_res(ii+i, jj+j, 0, fin_res); add_save_res(ii+i+4, jj+j, 4, fin_res); add_save_res(ii+i, jj+j+4, 8, fin_res); add_save_res(ii+i+4, jj+j+4, 12, fin_res); } } } } const TA *const A; const block_q8_0 *const B; float *C; const int64_t k; int64_t kc; const int64_t lda; const int64_t ldb; const int64_t ldc; const int ith; const int nth; }; ggml-org-ggml-3678254/src/ggml-cpu/llamafile/sgemm.cpp000066400000000000000000004172421512524704700224420ustar00rootroot00000000000000// Copyright 2024 Mozilla Foundation // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // _ _ ___ _ _ ___ // | |_(_)_ _ _ _| _ ) | /_\ / __| // | _| | ' \ || | _ \ |__ / _ \\__ \. // \__|_|_||_\_, |___/____/_/ \_\___/ // |__/ // // BASIC LINEAR ALGEBRA SUBPROGRAMS // // // This file implements multithreaded CPU matrix multiplication for the // common contiguous use case C = Aᵀ * B. These kernels are designed to // have excellent performance[1] for matrices that fit in the CPU cache // without imposing any overhead such as cache filling or malloc calls. // // This implementation does not guarantee any upper bound with rounding // errors, which grow along with k. Our goal's to maximally exploit the // hardware for performance, and then use whatever resources remain for // improving numerical accuracy. // // [1] J. Tunney, ‘LLaMA Now Goes Faster on CPUs’, Mar. 2024. [Online]. // Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024]. #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpedantic" #pragma GCC diagnostic ignored "-Wignored-attributes" #endif #include "sgemm.h" #include "ggml-impl.h" #include "ggml-cpu-impl.h" #include "ggml-quants.h" #include "simd-mappings.h" #include #include #ifdef _MSC_VER #define NOINLINE __declspec(noinline) #else #define NOINLINE __attribute__((__noinline__)) #endif #if defined(__ARM_NEON) || defined(__AVX512F__) || defined(__VXE__) || defined(__VXE2__) #define VECTOR_REGISTERS 32 #else #define VECTOR_REGISTERS 16 #endif #if defined(__riscv_v_intrinsic) #define LMUL 4 #endif #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) namespace { inline float unhalf(ggml_fp16_t d) { return GGML_CPU_FP16_TO_FP32(d); } //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED ARITHMETIC OPERATIONS #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline __m128 add(__m128 x, __m128 y) { return _mm_add_ps(x, y); } inline __m128 sub(__m128 x, __m128 y) { return _mm_sub_ps(x, y); } inline __m128 mul(__m128 x, __m128 y) { return _mm_mul_ps(x, y); } #endif // __SSE__ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline __m256 add(__m256 x, __m256 y) { return _mm256_add_ps(x, y); } inline __m256 sub(__m256 x, __m256 y) { return _mm256_sub_ps(x, y); } inline __m256 mul(__m256 x, __m256 y) { return _mm256_mul_ps(x, y); } #endif // __AVX__ #if defined(__AVX512F__) inline __m512 add(__m512 x, __m512 y) { return _mm512_add_ps(x, y); } inline __m512 sub(__m512 x, __m512 y) { return _mm512_sub_ps(x, y); } inline __m512 mul(__m512 x, __m512 y) { return _mm512_mul_ps(x, y); } #endif // __AVX512F__ #if defined(__ARM_NEON) inline float32x4_t add(float32x4_t x, float32x4_t y) { return vaddq_f32(x, y); } inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vsubq_f32(x, y); } inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vmulq_f32(x, y); } #endif // __ARM_NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) inline float16x8_t add(float16x8_t x, float16x8_t y) { return vaddq_f16(x, y); } inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); } inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #if defined(__VXE__) || defined(__VXE2__) inline float32x4_t add(float32x4_t x, float32x4_t y) { return vec_add(x, y); } inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vec_sub(x, y); } inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vec_mul(x, y); } #endif #if defined(__MMA__) #include "sgemm-ppc.h" #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED FUSED MULTIPLY ADD /** * Computes a * b + c. */ template inline U madd(T a, T b, U c) { return add(mul(a, b), c); } #if defined(__FMA__) #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) template <> inline __m256 madd(__m256 a, __m256 b, __m256 c) { return _mm256_fmadd_ps(a, b, c); } #endif #if defined(__AVX512F__) template <> inline __m512 madd(__m512 a, __m512 b, __m512 c) { return _mm512_fmadd_ps(a, b, c); } #endif #if defined(__AVX512BF16__) template <> inline __m512 madd(__m512bh a, __m512bh b, __m512 c) { return _mm512_dpbf16_ps(c, a, b); } template <> inline __m256 madd(__m256bh a, __m256bh b, __m256 c) { return _mm256_dpbf16_ps(c, a, b); } #endif #endif #if defined(__ARM_FEATURE_FMA) template <> inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { return vfmaq_f32(c, b, a); } #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) template <> inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) { return vfmaq_f16(c, b, a); } #endif #endif #if defined(__VXE__) || defined(__VXE2__) template <> inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { return vec_madd(a, b, c); } #endif #if defined(__riscv_zvfh) template <> inline vfloat32m1_t madd(vfloat16mf2_t a, vfloat16mf2_t b, vfloat32m1_t c) { return __riscv_vfwmacc_vv_f32m1(c, a, b, __riscv_vsetvlmax_e32m1()); } inline vfloat32m2_t madd(vfloat16m1_t a, vfloat16m1_t b, vfloat32m2_t c) { return __riscv_vfwmacc_vv_f32m2(c, a, b, __riscv_vsetvlmax_e32m2()); } inline vfloat32m4_t madd(vfloat16m2_t a, vfloat16m2_t b, vfloat32m4_t c) { return __riscv_vfwmacc_vv_f32m4(c, a, b, __riscv_vsetvlmax_e32m4()); } inline vfloat32m8_t madd(vfloat16m4_t a, vfloat16m4_t b, vfloat32m8_t c) { return __riscv_vfwmacc_vv_f32m8(c, a, b, __riscv_vsetvlmax_e32m8()); } inline vfloat32m1_t madd(vfloat32m1_t a, vfloat32m1_t b, vfloat32m1_t c) { return __riscv_vfmacc_vv_f32m1(c, a, b, __riscv_vsetvlmax_e32m1()); } inline vfloat32m2_t madd(vfloat32m2_t a, vfloat32m2_t b, vfloat32m2_t c) { return __riscv_vfmacc_vv_f32m2(c, a, b, __riscv_vsetvlmax_e32m2()); } inline vfloat32m4_t madd(vfloat32m4_t a, vfloat32m4_t b, vfloat32m4_t c) { return __riscv_vfmacc_vv_f32m4(c, a, b, __riscv_vsetvlmax_e32m4()); } inline vfloat32m8_t madd(vfloat32m8_t a, vfloat32m8_t b, vfloat32m8_t c) { return __riscv_vfmacc_vv_f32m8(c, a, b, __riscv_vsetvlmax_e32m8()); } #endif #if defined(__riscv_zvfbfwma) inline vfloat32m1_t madd(vbfloat16mf2_t a, vbfloat16mf2_t b, vfloat32m1_t c) { return __riscv_vfwmaccbf16_vv_f32m1(c, a, b, __riscv_vsetvlmax_e32m1()); } inline vfloat32m2_t madd(vbfloat16m1_t a, vbfloat16m1_t b, vfloat32m2_t c) { return __riscv_vfwmaccbf16_vv_f32m2(c, a, b, __riscv_vsetvlmax_e32m2()); } inline vfloat32m4_t madd(vbfloat16m2_t a, vbfloat16m2_t b, vfloat32m4_t c) { return __riscv_vfwmaccbf16_vv_f32m4(c, a, b, __riscv_vsetvlmax_e32m4()); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED HORIZONTAL SUM #if defined(__ARM_NEON) inline float hsum(float32x4_t x) { return vaddvq_f32(x); } #endif // __ARM_NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) inline float hsum(float16x8_t x) { return vaddvq_f32(vaddq_f32(vcvt_f32_f16(vget_low_f16(x)), vcvt_f32_f16(vget_high_f16(x)))); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #if defined(__VXE__) || defined(__VXE2__) inline float hsum(float32x4_t x) { float32x4_t tmp = x + vec_reve(x); return tmp[0] + tmp[1]; } #endif #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline float hsum(__m128 x) { #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) x = _mm_add_ps(x, _mm_movehl_ps(x, x)); x = _mm_add_ss(x, _mm_movehdup_ps(x)); #else __m128 t; t = _mm_shuffle_ps(x, x, _MM_SHUFFLE(2, 3, 0, 1)); x = _mm_add_ps(x, t); t = _mm_movehl_ps(t, x); x = _mm_add_ss(x, t); #endif return _mm_cvtss_f32(x); } #endif #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline float hsum(__m256 x) { return hsum(_mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x))); } #endif // __AVX__ #if defined(__AVX512F__) inline float hsum(__m512 x) { return _mm512_reduce_add_ps(x); } #endif // __AVX512F__ #if defined(__riscv_zvfh) inline float hsum(vfloat32m1_t x) { return __riscv_vfmv_f_s_f32m1_f32( __riscv_vfredusum_vs_f32m1_f32m1(x, __riscv_vfmv_v_f_f32m1(0, 1), __riscv_vsetvlmax_e32m1())); } inline float hsum(vfloat32m2_t x) { return __riscv_vfmv_f_s_f32m1_f32( __riscv_vfredusum_vs_f32m2_f32m1(x, __riscv_vfmv_v_f_f32m1(0, 1), __riscv_vsetvlmax_e32m2())); } inline float hsum(vfloat32m4_t x) { return __riscv_vfmv_f_s_f32m1_f32( __riscv_vfredusum_vs_f32m4_f32m1(x, __riscv_vfmv_v_f_f32m1(0, 1), __riscv_vsetvlmax_e32m4())); } inline float hsum(vfloat32m8_t x) { return __riscv_vfmv_f_s_f32m1_f32( __riscv_vfredusum_vs_f32m8_f32m1(x, __riscv_vfmv_v_f_f32m1(0, 1), __riscv_vsetvlmax_e32m8())); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED MEMORY LOADING template T load(const U *); #if defined(__ARM_NEON) template <> inline float32x4_t load(const float *p) { return vld1q_f32(p); } #if !defined(_MSC_VER) // FIXME: this should check for __ARM_FEATURE_FP16_VECTOR_ARITHMETIC template <> inline float16x8_t load(const ggml_fp16_t *p) { return vld1q_f16((const float16_t *)p); } template <> inline float32x4_t load(const ggml_fp16_t *p) { return vcvt_f32_f16(vld1_f16((const float16_t *)p)); } #endif // _MSC_VER #endif // __ARM_NEON #if defined(__VXE__) || defined(__VXE2__) template <> inline float32x4_t load(const ggml_fp16_t * p) { float tmp[4]; for (int i = 0; i < 4; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(p[i]); } return vec_xl(0, (const float *)(tmp)); } template <> inline float32x4_t load(const float * p) { return vec_xl(0, p); } #endif #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) template <> inline __m128 load(const float *p) { return _mm_loadu_ps(p); } #endif // __SSE__ #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) template <> inline __m256 load(const float *p) { return _mm256_loadu_ps(p); } #endif // __AVX__ #if defined(__AVX2__) || defined(__AVX512F__) template <> inline __m256 load(const ggml_bf16_t *p) { return _mm256_castsi256_ps( _mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)p)), 16)); } #endif // __AVX2__ #if defined(__F16C__) template <> inline __m256 load(const ggml_fp16_t *p) { return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p)); } #endif // __F16C__ #if defined(__AVX512F__) template <> inline __m512 load(const float *p) { return _mm512_loadu_ps(p); } template <> inline __m512 load(const ggml_fp16_t *p) { return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p)); } template <> inline __m512 load(const ggml_bf16_t *p) { return _mm512_castsi512_ps( _mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)p)), 16)); } #endif // __AVX512F__ #if defined(__AVX512BF16__) template <> inline __m512bh load(const ggml_bf16_t *p) { return (__m512bh)_mm512_loadu_ps((const float *)p); } template <> inline __m256bh load(const ggml_bf16_t *p) { return (__m256bh)_mm256_loadu_ps((const float *)p); } template <> inline __m512bh load(const float *p) { return _mm512_cvtne2ps_pbh(_mm512_loadu_ps(p + 16), _mm512_loadu_ps(p)); } template <> inline __m256bh load(const float *p) { return _mm512_cvtneps_pbh(_mm512_loadu_ps(p)); } #endif #if defined(__riscv_zvfh) template <> inline vfloat16mf2_t load(const ggml_fp16_t *p) { return __riscv_vle16_v_f16mf2(reinterpret_cast(p), __riscv_vsetvlmax_e16mf2()); } template <> inline vfloat16m1_t load(const ggml_fp16_t *p) { return __riscv_vle16_v_f16m1(reinterpret_cast(p), __riscv_vsetvlmax_e16m1()); } template <> inline vfloat16m2_t load(const ggml_fp16_t *p) { return __riscv_vle16_v_f16m2(reinterpret_cast(p), __riscv_vsetvlmax_e16m2()); } template <> inline vfloat16m4_t load(const ggml_fp16_t *p) { return __riscv_vle16_v_f16m4(reinterpret_cast(p), __riscv_vsetvlmax_e16m4()); } template <> inline vfloat32m1_t load(const float *p) { return __riscv_vle32_v_f32m1(p, __riscv_vsetvlmax_e32m1()); } template <> inline vfloat32m2_t load(const float *p) { return __riscv_vle32_v_f32m2(p, __riscv_vsetvlmax_e32m2()); } template <> inline vfloat32m4_t load(const float *p) { return __riscv_vle32_v_f32m4(p, __riscv_vsetvlmax_e32m4()); } template <> inline vfloat32m8_t load(const float *p) { return __riscv_vle32_v_f32m8(p, __riscv_vsetvlmax_e32m8()); } #endif #if defined(__riscv_zvfbfwma) template <> inline vbfloat16mf2_t load(const ggml_bf16_t *p) { return __riscv_vle16_v_bf16mf2(reinterpret_cast(p), __riscv_vsetvlmax_e16mf2()); } template <> inline vbfloat16m1_t load(const ggml_bf16_t *p) { return __riscv_vle16_v_bf16m1(reinterpret_cast(p), __riscv_vsetvlmax_e16m1()); } template <> inline vbfloat16m2_t load(const ggml_bf16_t *p) { return __riscv_vle16_v_bf16m2(reinterpret_cast(p), __riscv_vsetvlmax_e16m2()); } #endif #if defined(__riscv_zvfh) template T set_zero(); template <> inline vfloat16mf2_t set_zero() { return __riscv_vfmv_v_f_f16mf2(0, __riscv_vsetvlmax_e16mf2()); } template <> inline vfloat16m1_t set_zero() { return __riscv_vfmv_v_f_f16m1(0, __riscv_vsetvlmax_e16m1()); } template <> inline vfloat16m2_t set_zero() { return __riscv_vfmv_v_f_f16m2(0, __riscv_vsetvlmax_e16m2()); } template <> inline vfloat16m4_t set_zero() { return __riscv_vfmv_v_f_f16m4(0, __riscv_vsetvlmax_e16m4()); } template <> inline vfloat32m1_t set_zero() { return __riscv_vfmv_v_f_f32m1(0.0f, __riscv_vsetvlmax_e32m1()); } template <> inline vfloat32m2_t set_zero() { return __riscv_vfmv_v_f_f32m2(0, __riscv_vsetvlmax_e32m2()); } template <> inline vfloat32m4_t set_zero() { return __riscv_vfmv_v_f_f32m4(0, __riscv_vsetvlmax_e32m4()); } template <> inline vfloat32m8_t set_zero() { return __riscv_vfmv_v_f_f32m8(0, __riscv_vsetvlmax_e32m8()); } #endif #if defined(__riscv_v_intrinsic) template size_t vlmax() { if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e16mf2(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e16m1(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e16m2(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e16m4(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e32m1(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e32m2(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e32m4(); } else if constexpr (std::is_same_v) { return __riscv_vsetvlmax_e32m8(); } return 0; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // FLOATING POINT MATRIX MULTIPLICATION template static inline int64_t BLOCK_SIZE(size_t m) { const int64_t NB_BLOC_M = (m + M - 1) / M; return (m % NB_BLOC_M == 0) ? m / NB_BLOC_M : (m / NB_BLOC_M) + 1; } static constexpr inline int64_t BLOC_POS(int64_t ib, int64_t ibN, int64_t bloc_size) { return ib < ibN ? ib * bloc_size : ibN * bloc_size + (ib - ibN) * (bloc_size - 1); } template class tinyBLAS { public: tinyBLAS(const ggml_compute_params * params, int64_t k, const TA *A, int64_t lda, const TB *B, int64_t ldb, TC *C, int64_t ldc) : params(params), A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc) { } bool matmul(int64_t m, int64_t n) { if (k % KN != 0) return false; // compute RM for only need tile with size RM&RM-1 #if VECTOR_REGISTERS == 32 if (m % 16 == 0 && (m/16 >= params->nth)) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 4>(m, n, SIZE_N, 12); return true; } if (m % 8 == 0 ) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 2>(m, n, SIZE_N, 12); return true; } if (m % 4 == 0) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 1>(m, n, SIZE_N, 12); return true; } #else // VECTOR_REGISTERS == 16 if (m % 16 == 0 && (m/16 >= params->nth)) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 4>(m, n, SIZE_N, 24); return true; } if (m % 8 == 0 ) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 2>(m, n, SIZE_N, 24); return true; } if (m % 4 == 0) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 1>(m, n, SIZE_N, 24); return true; } #endif return false; } private: template inline void mnpack(int64_t m, int64_t n, int64_t SIZE_N, int64_t BN) { if (SIZE_N == RN) { return gemm(m, n, BN); } if constexpr (RN > 1) { return mnpack(m, n, SIZE_N, BN); } else { GGML_LOG_ERROR("mnpack<%d, %d> bloc size not supported\n", RM, (int)SIZE_N); GGML_ASSERT(false); // we have miss something. } } template inline void gemm_bloc(int64_t ii, int64_t jj) { D Cv[RN][RM] = {}; for (int64_t l = 0; l < k; l += KN) { // help compiler for op order. if constexpr (RM <= RN) { V Av[RM]; for (int64_t i = 0; i < RM; ++i) { Av[i] = load(A + lda * (ii + i) + l); } for (int64_t j = 0; j < RN; ++j) { V Bv = load(B + ldb * (jj + j) + l); for (int64_t i = 0; i < RM; ++i) { Cv[j][i] = madd(Av[i], Bv, Cv[j][i]); } } } else { V Bv[RN]; for (int64_t j = 0; j < RN; ++j) { Bv[j] = load(B + ldb * (jj + j) + l); } for (int64_t i = 0; i < RM; ++i) { V Av = load(A + lda * (ii + i) + l); for (int64_t j = 0; j < RN; ++j) { Cv[j][i] = madd(Av, Bv[j], Cv[j][i]); } } } } for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; const int64_t jj_RN = (xtiles - (xtiles * RN - n)); // "round" bloc_size to "nearest" BN const int64_t NB_BN = xtiles < BN ? 1 : (xtiles + BN / 2) / BN; const int64_t SIZE_BN = xtiles % NB_BN == 0 ? xtiles / NB_BN : xtiles / NB_BN + 1; const int64_t jj_BN = (NB_BN - (NB_BN * SIZE_BN - xtiles)); const int64_t nb_job = ytiles * NB_BN; if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); int64_t job = params->ith; while (job < nb_job) { const int64_t ii = (job % ytiles) * RM * BM; const int64_t jb = job / ytiles; const int64_t jr0 = BLOC_POS(jb , jj_BN, SIZE_BN); const int64_t jrN = BLOC_POS(jb+1, jj_BN, SIZE_BN); const int64_t jj0 = BLOC_POS(jr0, jj_RN, RN); const int64_t jj2 = BLOC_POS(jrN, jj_RN, RN); const int64_t jj1 = jj2 < jj_RN * RN ? jj2 : jj_RN * RN; for (int64_t bi = 0; bi < BM * RM; bi += RM) { int64_t jj = jj0; for (; jj < jj1; jj += RN) { gemm_bloc(ii + bi, jj); } if constexpr (RN > 1) { for (; jj < jj2; jj += RN - 1) { gemm_bloc(ii + bi, jj); } } GGML_ASSERT(jj == jj2); } job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); return; } const ggml_compute_params * params; const TA *const A; const TB *const B; TC *const C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; }; #if defined(__riscv_v_intrinsic) template class tinyBLAS_RVV { public: tinyBLAS_RVV(const ggml_compute_params * params, int64_t k, const TA *A, int64_t lda, const TB *B, int64_t ldb, TC *C, int64_t ldc) : params(params), A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc) { } bool matmul(int64_t m, int64_t n) { if (k % vlmax() != 0) { return false; } #if LMUL == 1 if (m % 16 == 0 && (m/16 >= params->nth)) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 4>(m, n, SIZE_N, 12); return true; } if (m % 8 == 0 ) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 2>(m, n, SIZE_N, 12); return true; } if (m % 4 == 0) { const int64_t SIZE_N = BLOCK_SIZE<6>(n); mnpack<4, 6, 1>(m, n, SIZE_N, 12); return true; } #elif LMUL == 2 if (m % 16 == 0 && (m/16 >= params->nth)) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 4>(m, n, SIZE_N, 24); return true; } if (m % 8 == 0 ) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 2>(m, n, SIZE_N, 24); return true; } if (m % 4 == 0) { const int64_t SIZE_N = BLOCK_SIZE<3>(n); mnpack<4, 3, 1>(m, n, SIZE_N, 24); return true; } #else // LMUL = 4 if (m % 16 == 0 && (m/16 >= params->nth)) { const int64_t SIZE_N = BLOCK_SIZE<2>(n); mnpack<2, 2, 8>(m, n, SIZE_N, 36); return true; } if (m % 8 == 0 ) { const int64_t SIZE_N = BLOCK_SIZE<2>(n); mnpack<2, 2, 4>(m, n, SIZE_N, 36); return true; } if (m % 4 == 0) { const int64_t SIZE_N = BLOCK_SIZE<2>(n); mnpack<2, 2, 2>(m, n, SIZE_N, 36); return true; } #endif return false; } private: template inline void mnpack(int64_t m, int64_t n, int64_t SIZE_N, int64_t BN) { if (SIZE_N == RN) { return gemm(m, n, BN); } if constexpr (RN > 1) { return mnpack(m, n, SIZE_N, BN); } else { GGML_LOG_ERROR("mnpack<%d, %d> bloc size not supported\n", RM, (int)SIZE_N); GGML_ASSERT(false); // we have miss something. } } inline void gemm_bloc_4x6(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); D Cv12 = set_zero(); D Cv13 = set_zero(); D Cv20 = set_zero(); D Cv21 = set_zero(); D Cv22 = set_zero(); D Cv23 = set_zero(); D Cv30 = set_zero(); D Cv31 = set_zero(); D Cv32 = set_zero(); D Cv33 = set_zero(); D Cv40 = set_zero(); D Cv41 = set_zero(); D Cv42 = set_zero(); D Cv43 = set_zero(); D Cv50 = set_zero(); D Cv51 = set_zero(); D Cv52 = set_zero(); D Cv53 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Bv0 = load(B + ldb * (jj + 0) + l); V Bv1 = load(B + ldb * (jj + 1) + l); V Bv2 = load(B + ldb * (jj + 2) + l); V Bv3 = load(B + ldb * (jj + 3) + l); V Bv4 = load(B + ldb * (jj + 4) + l); V Bv5 = load(B + ldb * (jj + 5) + l); V Av0 = load(A + lda * (ii + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv10 = madd(Av0, Bv1, Cv10); Cv20 = madd(Av0, Bv2, Cv20); Cv30 = madd(Av0, Bv3, Cv30); Cv40 = madd(Av0, Bv4, Cv40); Cv50 = madd(Av0, Bv5, Cv50); V Av1 = load(A + lda * (ii + 1) + l); Cv01 = madd(Av1, Bv0, Cv01); Cv11 = madd(Av1, Bv1, Cv11); Cv21 = madd(Av1, Bv2, Cv21); Cv31 = madd(Av1, Bv3, Cv31); Cv41 = madd(Av1, Bv4, Cv41); Cv51 = madd(Av1, Bv5, Cv51); V Av2 = load(A + lda * (ii + 2) + l); Cv02 = madd(Av2, Bv0, Cv02); Cv12 = madd(Av2, Bv1, Cv12); Cv22 = madd(Av2, Bv2, Cv22); Cv32 = madd(Av2, Bv3, Cv32); Cv42 = madd(Av2, Bv4, Cv42); Cv52 = madd(Av2, Bv5, Cv52); V Av3 = load(A + lda * (ii + 3) + l); Cv03 = madd(Av3, Bv0, Cv03); Cv13 = madd(Av3, Bv1, Cv13); Cv23 = madd(Av3, Bv2, Cv23); Cv33 = madd(Av3, Bv3, Cv33); Cv43 = madd(Av3, Bv4, Cv43); Cv53 = madd(Av3, Bv5, Cv53); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); C[ldc * (jj + 1) + (ii + 2)] = hsum(Cv12); C[ldc * (jj + 1) + (ii + 3)] = hsum(Cv13); C[ldc * (jj + 2) + (ii + 0)] = hsum(Cv20); C[ldc * (jj + 2) + (ii + 1)] = hsum(Cv21); C[ldc * (jj + 2) + (ii + 2)] = hsum(Cv22); C[ldc * (jj + 2) + (ii + 3)] = hsum(Cv23); C[ldc * (jj + 3) + (ii + 0)] = hsum(Cv30); C[ldc * (jj + 3) + (ii + 1)] = hsum(Cv31); C[ldc * (jj + 3) + (ii + 2)] = hsum(Cv32); C[ldc * (jj + 3) + (ii + 3)] = hsum(Cv33); C[ldc * (jj + 4) + (ii + 0)] = hsum(Cv40); C[ldc * (jj + 4) + (ii + 1)] = hsum(Cv41); C[ldc * (jj + 4) + (ii + 2)] = hsum(Cv42); C[ldc * (jj + 4) + (ii + 3)] = hsum(Cv43); C[ldc * (jj + 5) + (ii + 0)] = hsum(Cv50); C[ldc * (jj + 5) + (ii + 1)] = hsum(Cv51); C[ldc * (jj + 5) + (ii + 2)] = hsum(Cv52); C[ldc * (jj + 5) + (ii + 3)] = hsum(Cv53); } inline void gemm_bloc_4x5(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); D Cv12 = set_zero(); D Cv13 = set_zero(); D Cv20 = set_zero(); D Cv21 = set_zero(); D Cv22 = set_zero(); D Cv23 = set_zero(); D Cv30 = set_zero(); D Cv31 = set_zero(); D Cv32 = set_zero(); D Cv33 = set_zero(); D Cv40 = set_zero(); D Cv41 = set_zero(); D Cv42 = set_zero(); D Cv43 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Bv0 = load(B + ldb * (jj + 0) + l); V Bv1 = load(B + ldb * (jj + 1) + l); V Bv2 = load(B + ldb * (jj + 2) + l); V Bv3 = load(B + ldb * (jj + 3) + l); V Bv4 = load(B + ldb * (jj + 4) + l); V Av0 = load(A + lda * (ii + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv10 = madd(Av0, Bv1, Cv10); Cv20 = madd(Av0, Bv2, Cv20); Cv30 = madd(Av0, Bv3, Cv30); Cv40 = madd(Av0, Bv4, Cv40); V Av1 = load(A + lda * (ii + 1) + l); Cv01 = madd(Av1, Bv0, Cv01); Cv11 = madd(Av1, Bv1, Cv11); Cv21 = madd(Av1, Bv2, Cv21); Cv31 = madd(Av1, Bv3, Cv31); Cv41 = madd(Av1, Bv4, Cv41); V Av2 = load(A + lda * (ii + 2) + l); Cv02 = madd(Av2, Bv0, Cv02); Cv12 = madd(Av2, Bv1, Cv12); Cv22 = madd(Av2, Bv2, Cv22); Cv32 = madd(Av2, Bv3, Cv32); Cv42 = madd(Av2, Bv4, Cv42); V Av3 = load(A + lda * (ii + 3) + l); Cv03 = madd(Av3, Bv0, Cv03); Cv13 = madd(Av3, Bv1, Cv13); Cv23 = madd(Av3, Bv2, Cv23); Cv33 = madd(Av3, Bv3, Cv33); Cv43 = madd(Av3, Bv4, Cv43); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); C[ldc * (jj + 1) + (ii + 2)] = hsum(Cv12); C[ldc * (jj + 1) + (ii + 3)] = hsum(Cv13); C[ldc * (jj + 2) + (ii + 0)] = hsum(Cv20); C[ldc * (jj + 2) + (ii + 1)] = hsum(Cv21); C[ldc * (jj + 2) + (ii + 2)] = hsum(Cv22); C[ldc * (jj + 2) + (ii + 3)] = hsum(Cv23); C[ldc * (jj + 3) + (ii + 0)] = hsum(Cv30); C[ldc * (jj + 3) + (ii + 1)] = hsum(Cv31); C[ldc * (jj + 3) + (ii + 2)] = hsum(Cv32); C[ldc * (jj + 3) + (ii + 3)] = hsum(Cv33); C[ldc * (jj + 4) + (ii + 0)] = hsum(Cv40); C[ldc * (jj + 4) + (ii + 1)] = hsum(Cv41); C[ldc * (jj + 4) + (ii + 2)] = hsum(Cv42); C[ldc * (jj + 4) + (ii + 3)] = hsum(Cv43); } inline void gemm_bloc_4x4(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); D Cv12 = set_zero(); D Cv13 = set_zero(); D Cv20 = set_zero(); D Cv21 = set_zero(); D Cv22 = set_zero(); D Cv23 = set_zero(); D Cv30 = set_zero(); D Cv31 = set_zero(); D Cv32 = set_zero(); D Cv33 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Av2 = load(A + lda * (ii + 2) + l); V Av3 = load(A + lda * (ii + 3) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); Cv02 = madd(Av2, Bv0, Cv02); Cv03 = madd(Av3, Bv0, Cv03); V Bv1 = load(B + ldb * (jj + 1) + l); Cv10 = madd(Av0, Bv1, Cv10); Cv11 = madd(Av1, Bv1, Cv11); Cv12 = madd(Av2, Bv1, Cv12); Cv13 = madd(Av3, Bv1, Cv13); V Bv2 = load(B + ldb * (jj + 2) + l); Cv20 = madd(Av0, Bv2, Cv20); Cv21 = madd(Av1, Bv2, Cv21); Cv22 = madd(Av2, Bv2, Cv22); Cv23 = madd(Av3, Bv2, Cv23); V Bv3 = load(B + ldb * (jj + 3) + l); Cv30 = madd(Av0, Bv3, Cv30); Cv31 = madd(Av1, Bv3, Cv31); Cv32 = madd(Av2, Bv3, Cv32); Cv33 = madd(Av3, Bv3, Cv33); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); C[ldc * (jj + 1) + (ii + 2)] = hsum(Cv12); C[ldc * (jj + 1) + (ii + 3)] = hsum(Cv13); C[ldc * (jj + 2) + (ii + 0)] = hsum(Cv20); C[ldc * (jj + 2) + (ii + 1)] = hsum(Cv21); C[ldc * (jj + 2) + (ii + 2)] = hsum(Cv22); C[ldc * (jj + 2) + (ii + 3)] = hsum(Cv23); C[ldc * (jj + 3) + (ii + 0)] = hsum(Cv30); C[ldc * (jj + 3) + (ii + 1)] = hsum(Cv31); C[ldc * (jj + 3) + (ii + 2)] = hsum(Cv32); C[ldc * (jj + 3) + (ii + 3)] = hsum(Cv33); } inline void gemm_bloc_4x3(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); D Cv12 = set_zero(); D Cv13 = set_zero(); D Cv20 = set_zero(); D Cv21 = set_zero(); D Cv22 = set_zero(); D Cv23 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Av2 = load(A + lda * (ii + 2) + l); V Av3 = load(A + lda * (ii + 3) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); Cv02 = madd(Av2, Bv0, Cv02); Cv03 = madd(Av3, Bv0, Cv03); V Bv1 = load(B + ldb * (jj + 1) + l); Cv10 = madd(Av0, Bv1, Cv10); Cv11 = madd(Av1, Bv1, Cv11); Cv12 = madd(Av2, Bv1, Cv12); Cv13 = madd(Av3, Bv1, Cv13); V Bv2 = load(B + ldb * (jj + 2) + l); Cv20 = madd(Av0, Bv2, Cv20); Cv21 = madd(Av1, Bv2, Cv21); Cv22 = madd(Av2, Bv2, Cv22); Cv23 = madd(Av3, Bv2, Cv23); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); C[ldc * (jj + 1) + (ii + 2)] = hsum(Cv12); C[ldc * (jj + 1) + (ii + 3)] = hsum(Cv13); C[ldc * (jj + 2) + (ii + 0)] = hsum(Cv20); C[ldc * (jj + 2) + (ii + 1)] = hsum(Cv21); C[ldc * (jj + 2) + (ii + 2)] = hsum(Cv22); C[ldc * (jj + 2) + (ii + 3)] = hsum(Cv23); } inline void gemm_bloc_4x2(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); D Cv12 = set_zero(); D Cv13 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Av2 = load(A + lda * (ii + 2) + l); V Av3 = load(A + lda * (ii + 3) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); Cv02 = madd(Av2, Bv0, Cv02); Cv03 = madd(Av3, Bv0, Cv03); V Bv1 = load(B + ldb * (jj + 1) + l); Cv10 = madd(Av0, Bv1, Cv10); Cv11 = madd(Av1, Bv1, Cv11); Cv12 = madd(Av2, Bv1, Cv12); Cv13 = madd(Av3, Bv1, Cv13); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); C[ldc * (jj + 1) + (ii + 2)] = hsum(Cv12); C[ldc * (jj + 1) + (ii + 3)] = hsum(Cv13); } inline void gemm_bloc_4x1(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv02 = set_zero(); D Cv03 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Av2 = load(A + lda * (ii + 2) + l); V Av3 = load(A + lda * (ii + 3) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); Cv02 = madd(Av2, Bv0, Cv02); Cv03 = madd(Av3, Bv0, Cv03); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 0) + (ii + 2)] = hsum(Cv02); C[ldc * (jj + 0) + (ii + 3)] = hsum(Cv03); } inline void gemm_bloc_2x2(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); D Cv10 = set_zero(); D Cv11 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); V Bv1 = load(B + ldb * (jj + 1) + l); Cv10 = madd(Av0, Bv1, Cv10); Cv11 = madd(Av1, Bv1, Cv11); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); C[ldc * (jj + 1) + (ii + 0)] = hsum(Cv10); C[ldc * (jj + 1) + (ii + 1)] = hsum(Cv11); } inline void gemm_bloc_2x1(int64_t ii, int64_t jj) { size_t vl = vlmax(); D Cv00 = set_zero(); D Cv01 = set_zero(); for (int64_t l = 0; l < k; l += vl) { V Av0 = load(A + lda * (ii + 0) + l); V Av1 = load(A + lda * (ii + 1) + l); V Bv0 = load(B + ldb * (jj + 0) + l); Cv00 = madd(Av0, Bv0, Cv00); Cv01 = madd(Av1, Bv0, Cv01); } C[ldc * (jj + 0) + (ii + 0)] = hsum(Cv00); C[ldc * (jj + 0) + (ii + 1)] = hsum(Cv01); } template inline void gemm_bloc(int64_t ii, int64_t jj) { if constexpr (RM == 4) { if constexpr (RN == 6) { return gemm_bloc_4x6(ii, jj); } if constexpr (RN == 5) { return gemm_bloc_4x5(ii, jj); } if constexpr (RN == 4) { return gemm_bloc_4x4(ii, jj); } if constexpr (RN == 3) { return gemm_bloc_4x3(ii, jj); } if constexpr (RN == 2) { return gemm_bloc_4x2(ii, jj); } if constexpr (RN == 1) { return gemm_bloc_4x1(ii, jj); } } else if constexpr (RM == 2) { if constexpr (RN == 2) { return gemm_bloc_2x2(ii, jj); } if constexpr (RN == 1) { return gemm_bloc_2x1(ii, jj); } } } template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; const int64_t jj_RN = (xtiles - (xtiles * RN - n)); // "round" bloc_size to "nearest" BN const int64_t NB_BN = xtiles < BN ? 1 : (xtiles + BN / 2) / BN; const int64_t SIZE_BN = xtiles % NB_BN == 0 ? xtiles / NB_BN : xtiles / NB_BN + 1; const int64_t jj_BN = (NB_BN - (NB_BN * SIZE_BN - xtiles)); const int64_t nb_job = ytiles * NB_BN; if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); int64_t job = params->ith; while (job < nb_job) { const int64_t ii = (job % ytiles) * RM * BM; const int64_t jb = job / ytiles; const int64_t jr0 = BLOC_POS(jb , jj_BN, SIZE_BN); const int64_t jrN = BLOC_POS(jb+1, jj_BN, SIZE_BN); const int64_t jj0 = BLOC_POS(jr0, jj_RN, RN); const int64_t jj2 = BLOC_POS(jrN, jj_RN, RN); const int64_t jj1 = jj2 < jj_RN * RN ? jj2 : jj_RN * RN; for (int64_t bi = 0; bi < BM * RM; bi += RM) { int64_t jj = jj0; for (; jj < jj1; jj += RN) { gemm_bloc(ii + bi, jj); } if constexpr (RN > 1) { for (; jj < jj2; jj += RN - 1) { gemm_bloc(ii + bi, jj); } } GGML_ASSERT(jj == jj2); } job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); return; } const ggml_compute_params * params; const TA *const A; const TB *const B; TC *const C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; }; #endif ////////////////////////////////////////////////////////////////////////////////////////// // QUANT ZERO MATRIX MULTIPLICATION #if defined(__ARM_FEATURE_DOTPROD) template class tinyBLAS_Q0_ARM { public: tinyBLAS_Q0_ARM(int64_t k, const TA *A, int64_t lda, const block_q8_0 *B, int64_t ldb, float *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } void matmul(int64_t m, int64_t n) { mnpack(0, m, 0, n); } private: NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t mc, nc, mp, np; switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) { case 0x33: mc = 3; nc = 3; gemm<3, 3>(m0, m, n0, n); break; case 0x32: mc = 3; nc = 2; gemm<3, 2>(m0, m, n0, n); break; case 0x23: mc = 2; nc = 3; gemm<2, 3>(m0, m, n0, n); break; case 0x22: mc = 2; nc = 2; gemm<2, 2>(m0, m, n0, n); break; case 0x31: mc = 3; nc = 1; gemm<3, 1>(m0, m, n0, n); break; case 0x13: mc = 1; nc = 3; gemm<1, 3>(m0, m, n0, n); break; case 0x21: mc = 2; nc = 1; gemm<2, 1>(m0, m, n0, n); break; case 0x12: mc = 1; nc = 2; gemm<1, 2>(m0, m, n0, n); break; case 0x11: mc = 1; nc = 1; gemm<1, 1>(m0, m, n0, n); break; default: return; } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } template NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; float32x4_t Cv[RN][RM] = {}; for (int64_t l = 0; l < k; ++l) for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) Cv[j][i] = vmlaq_n_f32(Cv[j][i], vcvtq_f32_s32(vdotq_s32( vdotq_s32(vdupq_n_s32(0), load_lo(A + lda * (ii + i) + l), load_lo(B + ldb * (jj + j) + l)), load_hi(A + lda * (ii + i) + l), load_hi(B + ldb * (jj + j) + l))), unhalf(A[lda * (ii + i) + l].d) * unhalf(B[ldb * (jj + j) + l].d)); for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } } inline int8x16_t load_lo(const block_q8_0 *b) { return vld1q_s8(b->qs); } inline int8x16_t load_hi(const block_q8_0 *b) { return vld1q_s8(b->qs + 16); } inline int8x16_t load_lo(const block_q4_0 *b) { return vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vld1q_u8(b->qs), vdupq_n_u8(0x0f))), vdupq_n_s8(0x8)); } inline int8x16_t load_hi(const block_q4_0 *b) { return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)), vdupq_n_s8(0x8)); } const TA *const A; const block_q8_0 *const B; float *const C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; const int ith; const int nth; }; #endif // __ARM_FEATURE_DOTPROD #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) template class tinyBLAS_Q0_AVX { public: tinyBLAS_Q0_AVX(int64_t k, const TA *A, int64_t lda, const TB *B, int64_t ldb, TC *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { const int8_t kvalues_iq4nl[16] = { -127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113 }; iq4nlt = _mm_loadu_si128((const __m128i *)kvalues_iq4nl); } void matmul(int64_t m, int64_t n) { mnpack(0, m, 0, n); } private: void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t mc, nc, mp, np; switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) { #if VECTOR_REGISTERS == 32 case 0x44: mc = 4; nc = 4; #if defined(__AVX2__) && defined(__F16C__) gemm4xN<4>(m0, m, n0, n); #else gemm<4, 4>(m0, m, n0, n); #endif break; case 0x43: mc = 4; nc = 3; #if defined(__AVX2__) && defined(__F16C__) gemm4xN<3>(m0, m, n0, n); #else gemm<4, 3>(m0, m, n0, n); #endif break; case 0x34: mc = 3; nc = 4; #if defined(__AVX2__) && defined(__F16C__) gemmMx4<3>(m0, m, n0, n); #else gemm<3, 4>(m0, m, n0, n); #endif break; case 0x33: mc = 3; nc = 3; gemm<3, 3>(m0, m, n0, n); break; case 0x42: mc = 4; nc = 2; #if defined(__AVX2__) && defined(__F16C__) gemm4xN<2>(m0, m, n0, n); #else gemm<4, 2>(m0, m, n0, n); #endif break; case 0x24: mc = 2; nc = 4; #if defined(__AVX2__) && defined(__F16C__) gemmMx4<2>(m0, m, n0, n); #else gemm<2, 4>(m0, m, n0, n); #endif break; #else case 0x44: case 0x43: case 0x42: mc = 4; nc = 2; #if defined(__AVX2__) && defined(__F16C__) gemm4xN<2>(m0, m, n0, n); #else gemm<4, 2>(m0, m, n0, n); #endif break; case 0x34: case 0x24: mc = 2; nc = 4; #if defined(__AVX2__) && defined(__F16C__) gemmMx4<2>(m0, m, n0, n); #else gemm<2, 4>(m0, m, n0, n); #endif break; case 0x33: #endif case 0x32: mc = 3; nc = 2; gemm<3, 2>(m0, m, n0, n); break; case 0x23: mc = 2; nc = 3; gemm<2, 3>(m0, m, n0, n); break; case 0x41: mc = 4; nc = 1; #if defined(__AVX2__) && defined(__F16C__) gemm4xN<1>(m0, m, n0, n); #else gemm<4, 1>(m0, m, n0, n); #endif break; case 0x22: mc = 2; nc = 2; gemm<2, 2>(m0, m, n0, n); break; case 0x14: mc = 1; nc = 4; #if defined(__AVX2__) && defined(__F16C__) gemmMx4<1>(m0, m, n0, n); #else gemm<1, 4>(m0, m, n0, n); #endif break; case 0x31: mc = 3; nc = 1; gemm<3, 1>(m0, m, n0, n); break; case 0x13: mc = 1; nc = 3; gemm<1, 3>(m0, m, n0, n); break; case 0x21: mc = 2; nc = 1; gemm<2, 1>(m0, m, n0, n); break; case 0x12: mc = 1; nc = 2; gemm<1, 2>(m0, m, n0, n); break; case 0x11: mc = 1; nc = 1; gemm<1, 1>(m0, m, n0, n); break; default: return; } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } #if defined(__AVX2__) && defined(__F16C__) // Templated functions for gemm of dimensions 4xN template NOINLINE void gemm4xN(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / 4; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * 4; int64_t jj = n0 + job % xtiles * RN; __m256 Cv[RN][4] = {}; for (int64_t l = 0; l < k; ++l) { uint64_t a_delta = ((uint64_t)A[lda * (ii + 3) + l].d << 48) | ((uint64_t)A[lda * (ii + 2) + l].d << 32) | ((uint64_t)A[lda * (ii + 1) + l].d << 16) | (A[lda * (ii + 0) + l].d); // Convert delta values for four blocks to float values __m128 da = _mm_cvtph_ps(_mm_set_epi64x(0, a_delta)); __m256i avec0 = load(A + lda * (ii + 0) + l); __m256i avec1 = load(A + lda * (ii + 1) + l); __m256i avec2 = load(A + lda * (ii + 2) + l); __m256i avec3 = load(A + lda * (ii + 3) + l); for (int64_t j = 0; j < RN; ++j) { __m128 db = _mm_set1_ps(unhalf(B[ldb * (jj + j) + l].d)); // Computation of product of delta values for four blocks and replicate it across 256 bit lane __m256 dvec = _mm256_castps128_ps256(_mm_mul_ps(da, db)); dvec = _mm256_permute2f128_ps(dvec ,dvec, 0); // Computation of dot product and multiplication with appropriate delta value products Cv[j][0] = madd(_mm256_shuffle_ps(dvec, dvec, 0), updot(_mm256_sign_epi8(avec0, avec0), _mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec0)), Cv[j][0]); Cv[j][1] = madd(_mm256_shuffle_ps(dvec, dvec, 85), updot(_mm256_sign_epi8(avec1, avec1), _mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec1)), Cv[j][1]); Cv[j][2] = madd(_mm256_shuffle_ps(dvec, dvec, 170), updot(_mm256_sign_epi8(avec2, avec2), _mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec2)), Cv[j][2]); Cv[j][3] = madd(_mm256_shuffle_ps(dvec, dvec, 255), updot(_mm256_sign_epi8(avec3, avec3), _mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec3)), Cv[j][3]); } } for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < 4; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } } // Templated functions for gemm of dimensions Mx4 template NOINLINE void gemmMx4(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / 4; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * 4; __m256 Cv[4][RM] = {}; for (int64_t l = 0; l < k; ++l) { uint64_t b_delta = ((uint64_t)B[ldb * (jj + 3) + l].d << 48) | ((uint64_t)B[ldb * (jj + 2) + l].d << 32) | ((uint64_t)B[ldb * (jj + 1) + l].d << 16) | (B[ldb * (jj + 0) + l].d); // Convert delta values for four blocks to float values __m128 db = _mm_cvtph_ps(_mm_set_epi64x(0, b_delta)); __m256i bvec0 = load(B + ldb * (jj + 0) + l); __m256i bvec1 = load(B + ldb * (jj + 1) + l); __m256i bvec2 = load(B + ldb * (jj + 2) + l); __m256i bvec3 = load(B + ldb * (jj + 3) + l); for (int64_t i = 0; i < RM; ++i) { __m128 da = _mm_set1_ps(unhalf((A[lda * (ii + i) + l].d))); // Computation of product of delta values for four blocks and replicate it across 256 bit lane __m256 dvec = _mm256_castps128_ps256(_mm_mul_ps(da, db)); dvec = _mm256_permute2f128_ps(dvec ,dvec, 0); // Computation of dot product and multiplication with appropriate delta value products Cv[0][i] = madd(_mm256_shuffle_ps(dvec, dvec, 0), updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), load(A + lda * (ii + i) + l)), _mm256_sign_epi8(bvec0, load(A + lda * (ii + i) + l))), Cv[0][i]); Cv[1][i] = madd(_mm256_shuffle_ps(dvec, dvec, 85), updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), load(A + lda * (ii + i) + l)), _mm256_sign_epi8(bvec1, load(A + lda * (ii + i) + l))), Cv[1][i]); Cv[2][i] = madd(_mm256_shuffle_ps(dvec, dvec, 170), updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), load(A + lda * (ii + i) + l)), _mm256_sign_epi8(bvec2, load(A + lda * (ii + i) + l))), Cv[2][i]); Cv[3][i] = madd(_mm256_shuffle_ps(dvec, dvec, 255), updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), load(A + lda * (ii + i) + l)), _mm256_sign_epi8(bvec3, load(A + lda * (ii + i) + l))), Cv[3][i]); } } for (int64_t j = 0; j < 4; ++j) for (int64_t i = 0; i < RM; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } } #endif template NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; __m256 Cv[RN][RM] = {}; for (int64_t l = 0; l < k; ++l) for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) { #if defined(__AVX2__) __m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), load(A + lda * (ii + i) + l)), _mm256_sign_epi8(load(B + ldb * (jj + j) + l), load(A + lda * (ii + i) + l))); #else __m128i ali0 = load0(A + lda * (ii + i) + l); __m128i ali1 = load1(A + lda * (ii + i) + l); __m128i blj0 = load0(B + ldb * (jj + j) + l); __m128i blj1 = load1(B + ldb * (jj + j) + l); __m128i sepAA0 = _mm_sign_epi8(ali0, ali0); __m128i sepAA1 = _mm_sign_epi8(ali1, ali1); __m128i sepBA0 = _mm_sign_epi8(blj0, ali0); __m128i sepBA1 = _mm_sign_epi8(blj1, ali1); // updot const __m128i oneFill = _mm_set1_epi16(1); __m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0); __m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1); __m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0))); #endif Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) * unhalf(B[ldb * (jj + j) + l].d)), udTmp, Cv[j][i]); } for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); } } inline __m256i load(const block_q8_0 *b) { return _mm256_loadu_si256((const __m256i *)b->qs); } inline __m128i load0(const block_q8_0 *b) { return _mm_loadu_si128((const __m128i *)b->qs); } inline __m128i load1(const block_q8_0 *b) { return _mm_loadu_si128(((const __m128i *)b->qs) + 1); } inline __m256i load(const block_q4_0 *b) { return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8)); } inline __m128i load0(const block_q4_0 *b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8)); } inline __m128i load1(const block_q4_0 *b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8)); } inline __m256i load(const block_q5_0 *b) { return _mm256_or_si256(denibble(b->qs), bittobyte(b->qh)); } inline __m128i load0(const block_q5_0* b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); uint32_t x32; memcpy(&x32, b->qh, sizeof(uint32_t)); __m128i qxl = _mm_and_si128(_mm_set1_epi8(15), x); __m128i bytesl = _mm_cmpeq_epi8(_mm_set1_epi64x(-1), _mm_or_si128(_mm_set1_epi64x(0x7fbfdfeff7fbfdfe), _mm_shuffle_epi8(_mm_set1_epi32(x32), _mm_set_epi64x(0x0101010101010101, 0x0000000000000000)))); bytesl = _mm_andnot_si128(bytesl, _mm_set1_epi8((char)0xF0)); return _mm_or_si128(qxl, bytesl); } inline __m128i load1(const block_q5_0* b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); uint32_t x32; memcpy(&x32, b->qh, sizeof(uint32_t)); __m128i qxh = _mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)); __m128i bytesh = _mm_cmpeq_epi8(_mm_set1_epi64x(-1), _mm_or_si128(_mm_set1_epi64x(0x7fbfdfeff7fbfdfe), _mm_shuffle_epi8(_mm_set1_epi32(x32), _mm_set_epi64x(0x0303030303030303, 0x0202020202020202)))); bytesh = _mm_andnot_si128(bytesh, _mm_set1_epi8((char)0xF0)); return _mm_or_si128(qxh, bytesh); } inline __m256i load(const block_iq4_nl *b) { return MM256_SET_M128I(load1(b), load0(b)); } inline __m128i load0(const block_iq4_nl *b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); return _mm_shuffle_epi8(iq4nlt, _mm_and_si128(_mm_set1_epi8(15), x)); } inline __m128i load1(const block_iq4_nl *b) { const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); return _mm_shuffle_epi8(iq4nlt, _mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4))); } inline __m256 updot(__m256i u, __m256i s) { __m256i res; #if defined(__AVX512VNNI__) && defined(__AVX512VL__) res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s); #elif defined(__AVXVNNI__) res = _mm256_dpbusd_avx_epi32(_mm256_setzero_si256(), u, s); #else res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s)); #endif return _mm256_cvtepi32_ps(res); } static inline __m256i denibble(const uint8_t *p) { __m128i x = _mm_loadu_si128((const __m128i *)p); return _mm256_and_si256(_mm256_set1_epi8(15), _mm256_insertf128_si256(_mm256_castsi128_si256(x), _mm_srli_epi16(x, 4), 1)); } static inline __m256i bittobyte(const uint8_t *p) { uint32_t x32; memcpy(&x32, p, sizeof(uint32_t)); __m256i bytes = _mm256_cmpeq_epi8(_mm256_set1_epi64x(-1), _mm256_or_si256(_mm256_set1_epi64x(0x7fbfdfeff7fbfdfe), _mm256_shuffle_epi8(_mm256_set1_epi32(x32), _mm256_set_epi64x(0x0303030303030303, 0x0202020202020202, 0x0101010101010101, 0x0000000000000000)))); return _mm256_andnot_si256(bytes, _mm256_set1_epi8((char)0xF0)); } const TA *const A; const TB *const B; TC *const C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; const int ith; const int nth; __m128i iq4nlt; }; #endif // __AVX__ //PPC Implementation #if defined(__MMA__) #define SAVE_ACC(ACC, ii, jj) \ __builtin_mma_disassemble_acc(vec_C, ACC); \ for (int I = 0; I < 4; I++) { \ for (int J = 0; J < 4; J++) { \ *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J); \ } \ } \ template class tinyBLAS_BF16_PPC { public: tinyBLAS_BF16_PPC(int64_t k, const TA *A, int64_t lda, const TB *B, int64_t ldb, TC *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } void matmul(int64_t m, int64_t n) { mnpack(0, m, 0, n); } private: void vector_permute_store(vec_t *c, int numVec, unsigned char *vecOffset) { vec_t t[8], s[8]; vec_t swiz1 = {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}; vec_t swiz2 = {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}; vec_t swiz3 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; vec_t swiz4 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; if (numVec == 2) { t[0] = vec_perm(c[0], c[1], swiz1); t[1] = vec_perm(c[2], c[3], swiz1); s[0] = vec_perm(t[0], t[1], swiz3); s[1] = vec_perm(t[0], t[1], swiz4); vec_xst(s[0], 0, (vec_t*)vecOffset); vec_xst(s[1], 0, (vec_t*)(vecOffset + 16)); } else if (numVec == 4) { t[0] = vec_perm(c[0], c[1], swiz1); t[1] = vec_perm(c[0], c[1], swiz2); t[2] = vec_perm(c[2], c[3], swiz1); t[3] = vec_perm(c[2], c[3], swiz2); s[0] = vec_perm(t[0], t[2], swiz3); s[1] = vec_perm(t[0], t[2], swiz4); s[2] = vec_perm(t[1], t[3], swiz3); s[3] = vec_perm(t[1], t[3], swiz4); for (int i = 0; i < 4; ++i) vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16)); } else if (numVec == 8) { for (int i = 0; i < 4; i += 2) { t[i+0] = vec_perm(c[i+0], c[i+1], swiz1); t[i+1] = vec_perm(c[i+0], c[i+1], swiz2); } for (int i = 4; i < 8; i += 2) { t[i+0] = vec_perm(c[i+0], c[i+1], swiz1); t[i+1] = vec_perm(c[i+0], c[i+1], swiz2); } s[0] = vec_perm(t[0], t[2], swiz3); s[1] = vec_perm(t[0], t[2], swiz4); s[2] = vec_perm(t[1], t[3], swiz3); s[3] = vec_perm(t[1], t[3], swiz4); s[4] = vec_perm(t[4], t[6], swiz3); s[5] = vec_perm(t[4], t[6], swiz4); s[6] = vec_perm(t[5], t[7], swiz3); s[7] = vec_perm(t[5], t[7], swiz4); for (int i = 0; i < 8; ++i) vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16)); } } void packNormal(const TA* a, int64_t lda, int rows, int cols, unsigned char* vec) { int64_t i, j; TA *aoffset = NULL; unsigned char *vecOffset = NULL; TA * aoffsets[8]; vector unsigned char c_arr[8]; aoffset = const_cast(a); vecOffset = vec; j = (rows >> 3); if (j > 0) { do { if (cols == 4) { aoffsets[0] = aoffset; for (int it = 1; it < 4; ++it) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 4 * lda; for (int i = 0; i < 4; ++i) c_arr[i] = vec_xl(0, (vector unsigned char*)aoffsets[i]); vector_permute_store(c_arr, 4, vecOffset); for (int i = 0; i<4; i++) aoffsets[i] = aoffsets[i]+lda; vecOffset +=64; } i = (cols >> 3); if (i > 0) { aoffsets[0] = aoffset; for (int it = 1; it < 8; ++it) { aoffsets[it] = aoffsets[it-1] + lda; } aoffset += 8 * lda; do { for (int it = 0; it < 8; ++it) c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]); vector_permute_store(c_arr, 8, vecOffset); for (int it = 0; it < 8; ++it) aoffsets[it] = aoffsets[it] + 8*lda; vecOffset += 128; i--; } while(i > 0); } j--; } while(j > 0); } if (rows & 4) { aoffsets[0] = aoffset; for (int it = 1; it < 4; ++it) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 4 * lda; if (cols == 4) { for (int it = 0; it < 4; ++it) c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]); vector_permute_store(c_arr, 2, vecOffset); for (int it = 0; it< 4; it++) aoffsets[it] = aoffsets[it] + lda; vecOffset += 32; } i = (cols >> 3); if (i > 0) { do { for (int it = 0; it < 4; ++it) c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]); vector_permute_store(c_arr, 4, vecOffset); for (int it = 0; it< 4; it++) aoffsets[it] = aoffsets[it] + 8*lda; vecOffset += 64; i--; } while(i > 0); } } if (rows & 3) { aoffsets[0] = aoffset; for (int it = 1; it < 4; ++it) aoffsets[it] = aoffsets[it-1] + lda; if (cols == 4) { switch(rows) { case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]); case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]); case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]); break; } vector_permute_store(c_arr, 2, vecOffset); for (int it = 0; it< 4; it++) aoffsets[it] = aoffsets[it] + lda; vecOffset += 32; } i = (cols >> 3); if (i > 0) { do { switch(rows) { case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]); case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]); case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]); break; } vector_permute_store(c_arr, 4, vecOffset); for (int it = 0; it <4; it++) aoffsets[it] = aoffsets[it] + 8* lda; vecOffset += 64; i--; } while(i > 0); } } } void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t mc, nc, mp, np; int m_rem = MIN(m - m0, 8); int n_rem = MIN(n - n0, 8); if (m_rem >= 8 && n_rem >= 8) { mc = 8; nc = 8; gemm<8,8>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 8) { mc = 4; nc = 8; gemm<4,8>(m0, m, n0, n); } else if (m_rem >=8 && n_rem >=4){ mc = 8; nc = 4; gemm<8,4>(m0, m, n0, n); } else if ((m_rem < 4) && (n_rem >= 8)) { nc = 8; switch(m_rem) { case 1: mc = 1; gemm_Mx8<1>(m0, m, n0, n); break; case 2: mc = 2; gemm_Mx8<2>(m0, m, n0, n); break; case 3: mc = 3; gemm_Mx8<3>(m0, m, n0, n); break; default: return; } } else if (m_rem >= 4 && n_rem >= 4) { mc = 4; nc = 4; gemm_small<4, 4>(m0, m, n0, n); } else if ((m_rem > 4) && (n_rem < 4)) { mc = 4; switch(n_rem) { case 1: nc = 1; gemm_small<4, 1>(m0, m, n0, n); break; case 2: nc = 2; gemm_small<4, 2>(m0, m, n0, n); break; case 3: nc = 3; gemm_small<4, 3>(m0, m, n0, n); break; default: return; } } else { switch((m_rem << 4) | n_rem) { case 0x43: mc = 4; nc = 3; gemm_small<4, 3>(m0, m, n0, n); break; case 0x42: mc = 4; nc = 2; gemm_small<4, 2>(m0, m, n0, n); break; case 0x41: mc = 4; nc = 1; gemm_small<4, 1>(m0, m, n0, n); break; case 0x34: mc = 3; nc = 4; gemm_small<3, 4>(m0, m, n0, n); break; case 0x33: mc = 3; nc = 3; gemm_small<3, 3>(m0, m, n0, n); break; case 0x32: mc = 3; nc = 2; gemm_small<3, 2>(m0, m, n0, n); break; case 0x31: mc = 3; nc = 1; gemm_small<3, 1>(m0, m, n0, n); break; case 0x24: mc = 2; nc = 4; gemm_small<2,4>(m0, m, n0, n); break; case 0x23: mc = 2; nc = 3; gemm_small<2, 3>(m0, m, n0, n); break; case 0x22: mc = 2; nc = 2; gemm_small<2, 2>(m0, m, n0, n); break; case 0x21: mc = 2; nc = 1; gemm_small<2, 1>(m0, m, n0, n); break; case 0x14: mc = 1; nc = 4; gemm_small<1, 4>(m0, m, n0, n); break; case 0x13: mc = 1; nc = 3; gemm_small<1, 3>(m0, m, n0, n); break; case 0x12: mc = 1; nc = 2; gemm_small<1, 2>(m0, m, n0, n); break; case 0x11: mc = 1; nc = 1; gemm_small<1, 1>(m0, m, n0, n); break; default: return; } } mp = m0 + (m - m0) / mc * mc; np = n0 + (n - n0) / nc * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } void KERNEL_4x8(int64_t ii, int64_t jj) { vec_t vec_A[4], vec_B[8] , vec_C[4]; acc_t acc_0, acc_1; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int l = 0; l < k; l+=8) { packNormal((A+(ii*lda)+l), lda, 4, 8, (uint8_t*)vec_A); packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B); for (int x = 0; x < 4; x++) { __builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x], vec_B[x+4]); } } SAVE_ACC(&acc_0, ii, jj); SAVE_ACC(&acc_1, ii, jj+4); } void KERNEL_8x4(int64_t ii, int64_t jj) { vec_t vec_A[8], vec_B[4] , vec_C[4]; acc_t acc_0, acc_1; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int l = 0; l < k; l+=8) { packNormal((A+(ii*lda)+l), lda, 8, 8, (uint8_t*)vec_A); packNormal((B+(jj*ldb)+l), ldb, 8, 4, (uint8_t*)vec_B); for (int x = 0; x < 4; x++) { __builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x+4], vec_B[x]); } } SAVE_ACC(&acc_0, ii, jj); SAVE_ACC(&acc_1, ii+4, jj); } void KERNEL_8x8(int64_t ii, int64_t jj) { vec_t vec_A[8], vec_B[8], vec_C[4]; acc_t acc_0, acc_1, acc_2, acc_3; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); for (int l = 0; l < k; l+=8) { packNormal(A+(ii*lda)+l, lda, 8, 8, (uint8_t*)vec_A); packNormal(B+(jj*ldb)+l, ldb, 8, 8, (uint8_t*)vec_B); for (int x = 0; x < 4; x++) { __builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvbf16ger2pp(&acc_1, (vec_t)vec_A[x], (vec_t)vec_B[x+4]); __builtin_mma_xvbf16ger2pp(&acc_2, (vec_t)vec_A[x+4], (vec_t)vec_B[x]); __builtin_mma_xvbf16ger2pp(&acc_3, (vec_t)vec_A[x+4], (vec_t)vec_B[x+4]); } } SAVE_ACC(&acc_0, ii, jj); SAVE_ACC(&acc_1, ii, jj+4); SAVE_ACC(&acc_2, ii+4, jj); SAVE_ACC(&acc_3, ii+4, jj+4); } template void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; vec_t vec_C[4]; acc_t acc_0; __builtin_mma_xxsetaccz(&acc_0); vec_t vec_A[2], vec_B[2]; for (int l=0; l void gemm_Mx8(int64_t m0, int64_t m, int64_t n0, int64_t n) { int RN = 8; int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; vec_t vec_C[4]; acc_t acc_0, acc_1; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); vec_t vec_A[4], vec_B[8]; for (int l=0; l inline void kernel(int64_t ii, int64_t jj) { if constexpr(RM == 4 && RN == 8) { KERNEL_4x8(ii,jj); } else if constexpr(RM == 8 && RN == 8) { KERNEL_8x8(ii,jj); } else if constexpr(RM == 8 && RN == 4) { KERNEL_8x4(ii,jj); } else { assert(false && "RN/RM values not supported"); } } template NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; kernel(ii, jj); } } const TA *const A; const TB *const B; TC *C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; const int ith; const int nth; }; template tinyBLAS_Q0_PPC::tinyBLAS_Q0_PPC(int64_t k, const TA *A, int64_t lda, const block_q8_0 *B, int64_t ldb, float *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { kc = 64; } template void tinyBLAS_Q0_PPC::matmul(int64_t m, int64_t n) { int mc = 64; int nc = 64; if (n % 8 == 0 && n < nc) { nc = n; mc = 32 ; kc = 32; } const bool is_aligned = ((m & (mc - 1)) == 0) & ((n & (nc - 1)) == 0) & ((k & (kc - 1)) == 0); if (is_aligned) { this->matmul_tiled_q0(m, n, mc, nc, kc); } else { mnpack(0, m, 0, n); } } template template void tinyBLAS_Q0_PPC::packNormalInt4(const TA* a, int64_t lda, int rows, int cols, int8_t* vec, std::array& comparray) { int64_t i, j; TA *aoffset = NULL; int8_t *vecOffset = NULL; TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; vector signed char c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; vector signed char c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; aoffset = const_cast(a); vecOffset = vec; j = (rows >> 3); if (j > 0) { do { aoffset1 = aoffset; aoffset2 = aoffset1 + lda; aoffset3 = aoffset2 + lda; aoffset4 = aoffset3 + lda; aoffset5 = aoffset4 + lda; aoffset6 = aoffset5 + lda; aoffset7 = aoffset6 + lda; aoffset8 = aoffset7 + lda; aoffset += 8 * lda; i = (cols >> 2); if (i > 0) { do { c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); c5[1] = reinterpret_cast(vec_xl(0, aoffset5->qs)); c6[1] = reinterpret_cast(vec_xl(0, aoffset6->qs)); c7[1] = reinterpret_cast(vec_xl(0, aoffset7->qs)); c8[1] = reinterpret_cast(vec_xl(0, aoffset8->qs)); process_q4_elements(c1, &comparray[0]); process_q4_elements(c2, &comparray[1]); process_q4_elements(c3, &comparray[2]); process_q4_elements(c4, &comparray[3]); process_q4_elements(c5, &comparray[4]); process_q4_elements(c6, &comparray[5]); process_q4_elements(c7, &comparray[6]); process_q4_elements(c8, &comparray[7]); vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); vector_permute_store(c5[0], c6[0], c7[0], c8[0], vecOffset+128, false); vector_permute_store(c5[1], c6[1], c7[1], c8[1], vecOffset+192, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; aoffset4 += lda; aoffset5 += lda; aoffset6 += lda; aoffset7 += lda; aoffset8 += lda; vecOffset += 256; i--; } while (i > 0); } j--; } while (j > 0); } if (rows & 4) { aoffset1 = aoffset; aoffset2 = aoffset1 + lda; aoffset3 = aoffset2 + lda; aoffset4 = aoffset3 + lda; aoffset += 4 * lda; i = (cols >> 2); if (i > 0) { do { c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); process_q4_elements(c1, &comparray[0]); process_q4_elements(c2, &comparray[1]); process_q4_elements(c3, &comparray[2]); process_q4_elements(c4, &comparray[3]); vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; aoffset4 += lda; vecOffset += 128; i--; } while (i > 0); } } if (rows & 3) { aoffset1 = aoffset; aoffset2 = aoffset1 + lda; aoffset3 = aoffset2 + lda; i = (cols >> 2); if (i > 0) { do { switch(rows) { case 3: c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); case 2: c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); case 1: c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); break; } process_q4_elements(c1, &comparray[0]); process_q4_elements(c2, &comparray[1]); process_q4_elements(c3, &comparray[2]); process_q4_elements(c4, &comparray[3]); vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; vecOffset += 128; i--; } while(i > 0); } } } template template void tinyBLAS_Q0_PPC::packNormal(const block_q8_0* a, int64_t lda, int rows, int cols, VA* vec, bool flip) { int64_t i, j; block_q8_0 *aoffset = NULL; VA *vecOffset = NULL; block_q8_0* aoffsets[8]; __vector_pair arr[8]; VB c[8][2] = {0}; VB c1[8] = {0}; VB c2[8] = {0}; aoffset = const_cast(a); vecOffset = vec; j = (rows >> 3); if (j > 0) { do { aoffsets[0] = aoffset; for (int it = 1; it < 8; it++) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 8 * lda; i = (cols >> 3); if (i > 0) { do { for (int it = 0; it < 8; it++) { arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]->qs); __builtin_vsx_disassemble_pair(c[it], &arr[it]); c1[it] = c[it][0]; c2[it] = c[it][1]; } vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); vector_permute_store(c1[4], c1[5], c1[6], c1[7], vecOffset+128, flip); vector_permute_store(c2[4], c2[5], c2[6], c2[7], vecOffset+192, flip); for (int it = 0; it < 8; it++) aoffsets[it] += lda; vecOffset += 256; i--; } while(i > 0); } j--; } while(j > 0); } if (rows & 4) { aoffsets[0] = aoffset; for (int it = 1; it < 4; it++ ) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 4 * lda; i = (cols >> 3); if (i > 0) { do { for (int it = 0; it < 4; it++) { arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]->qs); __builtin_vsx_disassemble_pair(c[it], &arr[it]); c1[it] = c[it][0]; c2[it] = c[it][1]; } vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); for (int it = 0; it < 4; it++) { aoffsets[it] += lda; } vecOffset += 128; i--; } while(i > 0); } } if (rows & 3) { aoffsets[0] = aoffset; for (int it = 1; it < 3; it++ ) aoffsets[it] = aoffsets[it-1] + lda; i = (cols >> 3); if (i > 0) { do { switch(rows) { case 3: arr[2] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[2]->qs); __builtin_vsx_disassemble_pair(c[2], &arr[2]); c1[2] = c[2][0]; c2[2] = c[2][1]; case 2: arr[1] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[1]->qs); __builtin_vsx_disassemble_pair(c[1], &arr[1]); c1[1] = c[1][0]; c2[1] = c[1][1]; case 1: arr[0] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[0]->qs); __builtin_vsx_disassemble_pair(c[0], &arr[0]); c1[0] = c[0][0]; c2[0] = c[0][1]; break; } vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); for (int it = 0; it < 3; it++) aoffsets[it] += lda; vecOffset += 128; i--; } while(i > 0); } } } template void tinyBLAS_Q0_PPC::mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { int m_rem = MIN(m - m0, 16); int n_rem = MIN(n - n0, 16); int mc = 0, nc = 0; if (m_rem >= 8 && n_rem >= 8) { mc = 8; nc = 8; gemm<8, 8>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 8) { mc = 4; nc = 8; gemm<4, 8>(m0, m, n0, n); } else if (m_rem >= 8 && n_rem >= 4) { mc = 8; nc = 4; gemm<8, 4>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 4) { mc = 4; nc = 4; gemm_small(m0, m, n0, n, mc, nc); } else { mc = (m_rem >= 4) ? 4 : m_rem; nc = (n_rem >= 4) ? 4 : n_rem; if (mc == 0 || nc == 0) return; gemm_small(m0, m, n0, n, mc, nc); } int64_t mp = m0 + ((m - m0) / mc) * mc; int64_t np = n0 + ((n - n0) / nc) * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } template void tinyBLAS_Q0_PPC::KERNEL_4x8(int64_t ii, int64_t jj) { vec_t vec_A[8], vec_B[16] = {0}; acc_t acc_0, acc_1; std::array comparray {}; vector float fin_res[8] = {0}; vector float vs[8] = {0}; bool isAblock_q4 = std::is_same_v; for (int l = 0; l < k; l++) { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); if (std::is_same_v) { packNormalInt4<4>((A+(ii*lda)+l), lda, 4, 4, (int8_t*)vec_A, comparray); } else { packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x], vec_B[x+8]); } for (int I = 0; I<4; I++) { for (int J = 0; J<4; J++) { *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); *((float*)&vs[I+4]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d)); } } if (!isAblock_q4) { auto aoffset = A+(ii*lda)+l; for (int i = 0; i < 4; i++) { comparray[i] = 0; int ca = 0; auto *at = aoffset->qs; for (int j = 0; j < 32; j++) ca += (int)*at++; comparray[i] = ca; aoffset += lda; } } compute(&acc_0, 0, 0, comparray, vs, fin_res); compute(&acc_1, 0, 4, comparray, vs, fin_res); } save_res(ii, jj, 0, fin_res); save_res(ii, jj+4, 4, fin_res); } template void tinyBLAS_Q0_PPC::KERNEL_8x4(int64_t ii, int64_t jj) { vec_t vec_A[16], vec_B[8] = {0}; acc_t acc_0, acc_1; std::array comparray {}; vector float fin_res[8] = {0}; vector float vs[8] = {0}; bool isAblock_q4 = std::is_same_v; for (int l = 0; l < k; l++) { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); if (std::is_same_v) { packNormalInt4<8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); } else { packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]); } for (int I = 0; I<8; I++) { for (int J = 0; J<4; J++) { *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); } } if (!isAblock_q4) { auto aoffset = A+(ii*lda)+l; for (int i = 0; i < 8; i++) { comparray[i] = 0; int ca = 0; auto *at = aoffset->qs; for (int j = 0; j < 32; j++) ca += (int)*at++; comparray[i] = ca; aoffset += lda; } } compute(&acc_0, 0, 0, comparray, vs, fin_res); compute(&acc_1, 4, 4, comparray, vs, fin_res); } save_res(ii, jj, 0, fin_res); save_res(ii+4, jj, 4, fin_res); } template void tinyBLAS_Q0_PPC::KERNEL_8x8(int64_t ii, int64_t jj) { vec_t vec_A[16], vec_B[16] = {0}; acc_t acc_0, acc_1, acc_2, acc_3; acc_t acc_4, acc_5, acc_6, acc_7; std::array comparray {}; vector float fin_res[16] = {0}; vector float vs[16] = {0}; bool isAblock_q4 = std::is_same_v; for (int l = 0; l < k; l++) { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); if (std::is_same_v) { packNormalInt4<8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); } else { packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]); __builtin_mma_xvi8ger4pp(&acc_2, vec_A[x], vec_B[x+8]); __builtin_mma_xvi8ger4pp(&acc_3, vec_A[x+8], vec_B[x+8]); } for (int I = 0; I<8; I++) { for (int J = 0; J<4; J++) { *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); *((float*)&vs[I+8]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d)); } } if (!isAblock_q4) { auto aoffset = A+(ii*lda)+l; for (int i = 0; i < 8; i++) { comparray[i] = 0; int ca = 0; auto *at = aoffset->qs; for (int j = 0; j < 32; j++) ca += (int)*at++; comparray[i] = ca; aoffset += lda; } } compute(&acc_0, 0, 0, comparray, vs, fin_res); compute(&acc_1, 4, 4, comparray, vs, fin_res); compute(&acc_2, 0, 8, comparray, vs, fin_res); compute(&acc_3, 4, 12, comparray, vs, fin_res); } save_res(ii, jj, 0, fin_res); save_res(ii+4, jj, 4, fin_res); save_res(ii, jj+4, 8, fin_res); save_res(ii+4, jj+4, 12, fin_res); } template void tinyBLAS_Q0_PPC::gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; vec_t vec_A[8] = {0}, vec_B[8] = {0}; vector signed int vec_C[4]; acc_t acc_0; bool isAblock_q4 = std::is_same_v; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; std::array comparray{}; vector float res[4] = {0}; vector float fin_res[4] = {0}; vector float vs[4] = {0}; vector float CA[4] = {0}; __builtin_prefetch((A+(ii*lda)+0)->qs, 0, 1); // prefetch first value __builtin_prefetch((B+(jj*ldb)+0)->qs, 0, 1); // prefetch first value for (int l = 0; l < k; l++) { __builtin_prefetch((A+(ii*lda)+(l+1))->qs, 0, 1); // prefetch one loop ahead __builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead __builtin_mma_xxsetaccz(&acc_0); if (isAblock_q4) { packNormalInt4<4>((A+(ii*lda)+l), lda, RM, 4, (int8_t*)vec_A, comparray); } else { packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x+=4) { __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+1], vec_B[x+1]); __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+2], vec_B[x+2]); __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+3], vec_B[x+3]); } for (int I = 0; Id) * unhalf((B+((jj+J)*ldb)+l)->d)); } } __builtin_mma_disassemble_acc(vec_C, &acc_0); if (!isAblock_q4) { auto aoffset = A+(ii*lda)+l; for (int i = 0; i < RM; i++) { comparray[i] = 0; int ca = 0; auto *at = aoffset->qs; for (int j = 0; j < 32; j++) ca += (int)*at++; comparray[i] = ca; aoffset += lda; } } for (int i = 0; i < RM; i++) { CA[i] = vec_splats((float)(((double)comparray[i]) * -128.0)); res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]); fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]); } } save_res(ii, jj, 0, fin_res, RM, RN); } } template template NOINLINE void tinyBLAS_Q0_PPC::gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; this->kernel(ii, jj); } } template class tinyBLAS_Q0_PPC; template class tinyBLAS_Q0_PPC; class tinyBLAS_PPC { public: tinyBLAS_PPC(int64_t k, const float * A, int64_t lda, const float * B, int64_t ldb, float * C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } void matmul(int64_t m, int64_t n) { int64_t mc = 256; int64_t nc = 256; int64_t kc = 256; if (m % mc == 0 && n % nc == 0 && k % kc == 0) { matmul_tiled(m, n, mc, nc, kc); } else { mnpack(0, m, 0, n); } } private: inline void save_acc(acc_t * ACC, int64_t ii, int64_t jj) { vec_t vec_C[4]; __builtin_mma_disassemble_acc(vec_C, ACC); for (int I = 0; I < 4; I++) { for (int J = 0; J < 4; J++) { *((float *)(C+ii+((jj+J)*ldc)+I)) = *((float *)&vec_C[I]+J); } } } inline void add_save_acc(acc_t * ACC, int64_t ii, int64_t jj) { vec_t vec_C[4]; __builtin_mma_disassemble_acc(vec_C, ACC); for (int I = 0; I < 4; I++) { for (int J = 0; J < 4; J++) { float * c_ptr = (float *)(C+ii+((jj+J)*ldc)+I); *c_ptr += *((float *)&vec_C[I]+J); } } } inline void vector_permute_store_4(vector float * src, float * vecOffset) { vector float t1, t2, t3, t4, t5, t6, t7, t8; t1 = vec_mergeh(src[0], src[1]); t2 = vec_mergeh(src[2], src[3]); t3 = vec_mergel(src[0], src[1]); t4 = vec_mergel(src[2], src[3]); t5 = vec_xxpermdi(t1, t2, 0); t6 = vec_xxpermdi(t1, t2, 3); t7 = vec_xxpermdi(t3, t4, 0); t8 = vec_xxpermdi(t3, t4, 3); vec_xst(t5, 0, vecOffset); vec_xst(t6, 0, vecOffset + 4); vec_xst(t7, 0, vecOffset + 8); vec_xst(t8, 0, vecOffset + 12); } inline void vector_permute_store_8(vector float * src, float * vecOffset) { vector float t1, t2, t3, t4, t5, t6, t7, t8; t1 = vec_mergeh(src[0], src[1]); t2 = vec_mergeh(src[2], src[3]); t3 = vec_mergeh(src[4], src[5]); t4 = vec_mergeh(src[6], src[7]); t5 = vec_xxpermdi(t1, t2, 0); t6 = vec_xxpermdi(t3, t4, 0); t7 = vec_xxpermdi(t1, t2, 3); t8 = vec_xxpermdi(t3, t4, 3); vec_xst(t5, 0, vecOffset); vec_xst(t6, 0, vecOffset + 4); vec_xst(t7, 0, vecOffset + 8); vec_xst(t8, 0, vecOffset + 12); t1 = vec_mergel(src[0], src[1]); t2 = vec_mergel(src[2], src[3]); t3 = vec_mergel(src[4], src[5]); t4 = vec_mergel(src[6], src[7]); t5 = vec_xxpermdi(t1, t2, 0); t6 = vec_xxpermdi(t3, t4, 0); t7 = vec_xxpermdi(t1, t2, 3); t8 = vec_xxpermdi(t3, t4, 3); vec_xst(t5, 0, vecOffset + 16); vec_xst(t6, 0, vecOffset + 20); vec_xst(t7, 0, vecOffset + 24); vec_xst(t8, 0, vecOffset + 28); } void packTranspose(const float * a, int64_t lda, int rows, int cols, float * vec) { int64_t i, j; float * aoffsets[8]; float * aoffset = NULL, * boffset = NULL; __vector_pair arr[8]; vector float c[8][2] = {0}; vector float c1[8] = {0}; vector float c2[8] = {0}; aoffset = const_cast(a); boffset = vec; j = (rows >> 3); if (j > 0) { do { aoffsets[0] = aoffset; for (int it = 1; it < 8; it++) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 8 * lda; i = (cols >> 3); if (i > 0) { do { for (int it = 0; it < 8; it++) { arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]); __builtin_vsx_disassemble_pair(c[it], &arr[it]); c1[it] = c[it][0]; c2[it] = c[it][1]; } vector_permute_store_8(c1, boffset); vector_permute_store_8(c2, boffset + 32); boffset += 64; i--; if (i > 0) { for (int it = 0; it < 8; it++) { aoffsets[it] = aoffsets[it] + 8; } } } while(i > 0); } if (cols & 4) { for (int it = 0; it < 8 ; it++) c1[it] = vec_xl(0, aoffsets[it]); vector_permute_store_8(c1, boffset); } j--; } while(j > 0); } if (rows & 4) { aoffsets[0] = aoffset; for (int it = 1; it < 4; it++) aoffsets[it] = aoffsets[it-1] + lda; aoffset += 4 * lda; i = (cols >> 3); if (i > 0) { do { for (int it = 0; it < 4; it++) { arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]); __builtin_vsx_disassemble_pair(c[it], &arr[it]); c1[it] = c[it][0]; c2[it] = c[it][1]; } vector_permute_store_4(c1, boffset); vector_permute_store_4(c2, boffset + 16); for (int it = 0; it < 4; it++) aoffsets[it] += 8 * lda; boffset += 32; i--; } while(i > 0); } if (cols & 4) { for (int it = 0; it < 4; it++) c1[it] = vec_xl(0, aoffsets[it]); vector_permute_store_4(c1, boffset); } } if (rows & 3) { aoffsets[0] = aoffset; for (int it = 1; it < 3; it++) aoffsets[it] = aoffsets[it-1] + lda; if (cols & 4) { for (int it = 0; it < 3; it++) c1[it] = vec_xl(0, aoffsets[it]); vector_permute_store_4(c1, boffset); } } } void KERNEL_4x4(int64_t ii, int64_t jj) { vec_t vec_A[4], vec_B[4], vec_C[4]; acc_t acc_0; __builtin_mma_xxsetaccz(&acc_0); for (int l = 0; l < k; l += 4) { packTranspose(A + (ii * lda) + l, lda, 4, 4, (float *)vec_A); packTranspose(B + (jj * ldb) + l, ldb, 4, 4, (float *)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[3], vec_B[3]); } save_acc(&acc_0, ii, jj); } void KERNEL_4x8(int64_t ii, int64_t jj) { vec_t vec_A[4], vec_B[8], vec_C[4]; acc_t acc_0, acc_1; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l += 4) { packTranspose(A + (ii * lda) + l, lda, 4, 4, (float *)vec_A); packTranspose(B + (jj * ldb) + l, ldb, 8, 4, (float *)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[1], (vec_t)vec_B[3]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], (vec_t)vec_B[4]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[2], (vec_t)vec_B[5]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[3], (vec_t)vec_B[6]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[3], (vec_t)vec_B[7]); } save_acc(&acc_0, ii, jj); save_acc(&acc_1, ii, jj + 4); } void KERNEL_8x4(int64_t ii, int64_t jj) { vec_t vec_A[8], vec_B[4], vec_C[4]; acc_t acc_0, acc_1; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l += 4) { packTranspose(A + (ii * lda) + l, lda, 8, 4, (float *)vec_A); packTranspose(B + (jj * ldb) + l, ldb, 4, 4, (float *)vec_B); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[3], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[4], vec_B[2]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[5], vec_B[2]); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[6], vec_B[3]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[7], vec_B[3]); } save_acc(&acc_0, ii, jj); save_acc(&acc_1, ii + 4, jj); } void KERNEL_8x8(int64_t ii, int64_t jj) { vec_t vec_A[16], vec_B[16], vec_C[4]; acc_t acc_0, acc_1, acc_2, acc_3; __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); for (int l = 0; l < k; l+=8) { packTranspose(A + (ii * lda) + l, lda, 8, 8, (float *)vec_A); packTranspose(B + (jj * ldb) + l, ldb, 8, 8, (float *)vec_B); for(int x = 0; x < 16; x+=2) { __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x + 1]); __builtin_mma_xvf32gerpp(&acc_2, (vec_t)vec_A[x + 1], vec_B[x]); __builtin_mma_xvf32gerpp(&acc_3, (vec_t)vec_A[x + 1], vec_B[x + 1]); } } save_acc(&acc_0, ii, jj); save_acc(&acc_1, ii, jj + 4); save_acc(&acc_2, ii + 4, jj); save_acc(&acc_3, ii + 4, jj + 4); } inline void MMA_16x8(vec_t * vec_A0, vec_t * vec_A1, vec_t * vec_B, acc_t * acc) { for (int x = 0; x < 16; x += 2) { __builtin_mma_xvf32gerpp(&acc[0], vec_A0[x + 0], vec_B[x]); __builtin_mma_xvf32gerpp(&acc[1], vec_A0[x + 0], vec_B[x + 1]); __builtin_mma_xvf32gerpp(&acc[2], vec_A0[x + 1], vec_B[x]); __builtin_mma_xvf32gerpp(&acc[3], vec_A0[x + 1], vec_B[x + 1]); __builtin_mma_xvf32gerpp(&acc[4], vec_A1[x + 0], vec_B[x]); __builtin_mma_xvf32gerpp(&acc[5], vec_A1[x + 0], vec_B[x + 1]); __builtin_mma_xvf32gerpp(&acc[6], vec_A1[x + 1], vec_B[x]); __builtin_mma_xvf32gerpp(&acc[7], vec_A1[x + 1], vec_B[x + 1]); } } void KERNEL(int64_t ii, int64_t jj, int64_t mc, int64_t nc, int64_t kc, vec_t * vec_A, vec_t * vec_B, int64_t kk) { for (int64_t i = 0; i < mc; i += 16) { int A_base_addr = (mc / 8) * (i / 8) * 16; for (int64_t j = 0; j < nc; j += 8) { int B_base_addr = (nc / 8) * (j / 8) * 16; acc_t acc[8]; vec_t A0_block[16]; vec_t A1_block[16]; for (int x = 0; x < 8; x++) __builtin_mma_xxsetaccz(&acc[x]); for (int64_t l = 0; l < kc; l += 8) { int A0_block_idx = A_base_addr + (l / 8) * 16; int A1_block_idx = A0_block_idx + (mc / 8) * 16; int B_block_idx = B_base_addr + (l / 8) * 16; vec_t* A0_block = &vec_A[A0_block_idx]; vec_t* A1_block = &vec_A[A1_block_idx]; vec_t* B_block = &vec_B[B_block_idx]; MMA_16x8(A0_block, A1_block, B_block, acc); } if (kk == 0) { save_acc(&acc[0], ii + i, jj + j); save_acc(&acc[1], ii + i, jj + j + 4); save_acc(&acc[2], ii + i + 4, jj + j); save_acc(&acc[3], ii + i + 4, jj + j + 4); save_acc(&acc[4], ii + i + 8, jj + j); save_acc(&acc[5], ii + i + 8, jj + j + 4); save_acc(&acc[6], ii + i + 12, jj + j); save_acc(&acc[7], ii + i + 12, jj + j + 4); } else { add_save_acc(&acc[0], ii + i, jj + j); add_save_acc(&acc[1], ii + i, jj + j + 4); add_save_acc(&acc[2], ii + i + 4, jj + j); add_save_acc(&acc[3], ii + i + 4, jj + j + 4); add_save_acc(&acc[4], ii + i + 8, jj + j); add_save_acc(&acc[5], ii + i + 8, jj + j + 4); add_save_acc(&acc[6], ii + i + 12, jj + j); add_save_acc(&acc[7], ii + i + 12, jj + j + 4); } } } } void matmul_tiled(int64_t m , int64_t n, int64_t mc, int64_t nc, int64_t kc) { int64_t ytiles = m / mc; int64_t xtiles = n / nc; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) { end = tiles; } for (int64_t job = start; job < end; ++job) { int64_t ii = (job / xtiles) * mc; int64_t jj = (job % xtiles) * nc; for (int64_t kk = 0; kk < k; kk += kc) { vec_t A_pack[kc * mc / 4]; vec_t B_pack[kc * nc / 4]; packTranspose(A + (ii * lda) + kk, lda, kc, mc, (float *)A_pack); packTranspose(B + (jj * ldb) + kk, ldb, kc, nc, (float *)B_pack); KERNEL(ii, jj, mc, nc, kc, A_pack, B_pack, kk); } } } void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { int m_rem = MIN(m - m0, 8); int n_rem = MIN(n - n0, 8); int mc = 0, nc = 0; if (m_rem >= 8 && n_rem >= 8) { mc = 8; nc = 8; gemm<8, 8>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 8) { mc = 4; nc = 8; gemm<4, 8>(m0, m, n0, n); } else if (m_rem >= 8 && n_rem >= 4) { mc = 8; nc = 4; gemm<8, 4>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 4) { mc = 4; nc = 4; gemm<4, 4>(m0, m, n0, n); } else { mc = (m_rem >= 4) ? 4 : m_rem; nc = (n_rem >= 4) ? 4 : n_rem; if (mc == 0 || nc == 0) return; gemm_small(m0, m, n0, n, mc, nc); } int64_t mp = m0 + ((m - m0) / mc) * mc; int64_t np = n0 + ((n - n0) / nc) * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; vec_t vec_C[4]; acc_t acc_0; __builtin_mma_xxsetaccz(&acc_0); vec_t vec_A[4] = {0}, vec_B[4] = {0}; for (int l = 0; l < k; l += 4) { /* 'GEMV Forwarding' concept is used in first two conditional loops. * when one of the matrix has a single row/column, the elements are * broadcasted, instead of using packing routine to prepack the * matrix elements. */ if (RM == 1) { float * a = const_cast(A + (ii) * lda + l); packTranspose(B + (jj * ldb) + l, ldb, RN, 4, (float *)vec_B); vec_A[0] = (vec_t)vec_xl(0,a); vec_A[1] = (vec_t)vec_splats(*((float *)&vec_A+1)); vec_A[2] = (vec_t)vec_splats(*((float *)&vec_A+2)); vec_A[3] = (vec_t)vec_splats(*((float *)&vec_A+3)); } else if (RN == 1) { packTranspose(A + (ii * lda) + l, lda, RM, 4, (float *)vec_A); float * b = const_cast(B + (jj) * ldb + l); vec_B[0] = (vec_t)vec_xl(0,b); vec_B[1] = (vec_t)vec_splats(*((float *)&vec_B+1)); vec_B[2] = (vec_t)vec_splats(*((float *)&vec_B+2)); vec_B[3] = (vec_t)vec_splats(*((float *)&vec_B+3)); } else { packTranspose(A + (ii * lda) + l, lda, RM, 4, (float *)vec_A); packTranspose(B + (jj * ldb) + l, ldb, RN, 4, (float *)vec_B); } __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[3], vec_B[3]); } __builtin_mma_disassemble_acc(vec_C, &acc_0); for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { *((float *)(C+ii+((jj+J)*ldc)+I)) = *((float *)&vec_C[I]+J); } } } } template inline void kernel(int64_t ii, int64_t jj) { if constexpr(RM == 4 && RN == 4) { KERNEL_4x4(ii, jj); } else if constexpr(RM == 4 && RN == 8) { KERNEL_4x8(ii, jj); } else if constexpr(RM == 8 && RN == 4) { KERNEL_8x4(ii, jj); } else if constexpr(RM == 8 && RN == 8) { KERNEL_8x8(ii, jj); } else { static_assert(false, "RN/RM values not supported"); } } template NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; int64_t duty = (tiles + nth - 1) / nth; int64_t start = duty * ith; int64_t end = start + duty; if (end > tiles) end = tiles; for (int64_t job = start; job < end; ++job) { int64_t ii = m0 + job / xtiles * RM; int64_t jj = n0 + job % xtiles * RN; kernel(ii, jj); } } const float * const A; const float * const B; float * C; const int64_t k; const int64_t lda; const int64_t ldb; const int64_t ldc; const int ith; const int nth; }; #endif } // namespace /** * Performs optimized matrix multiplication on CPU. * * This subroutine may compute C = Aᵀ * B with column major ordering. * Despite its name, this isn't a generalized implementation. Work is * only performed when a handwritten kernel is written and available. * Otherwise the caller should fall back to a general matmul routine. * * For example, for single-threaded single-precision GEMM you can say * * llamafile_sgemm(m, n, k, A, lda, B, ldb, C, ldc, * 0, 1, * GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32); * * @param m is rows in `A` and `C` * @param n is cols in `B` and `C` * @param k is cols in `A` and rows in `B` * @param A is first input matrix (always transposed) * @param lda is row stride of `A` * @param B is second input matrix (never transposed) * @param ldb is row stride of `B` * @param C is input/output array of output matrices * @param ldc is row stride of `C` * @param ith is thread id (must be less than `nth`) * @param nth is number of threads (must be greater than zero) * @param Atype is GGML data type of `A` * @param Btype is GGML data type of `B` * @param Ctype is GGML data type of `C` * @return true if this function was able to service the matmul request */ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C, int64_t ldc, int Atype, int Btype, int Ctype) { assert(m >= 0); assert(n >= 0); assert(k >= 0); assert(lda >= k); assert(ldb >= k); assert(ldc >= m); assert(params->nth > 0); assert(params->ith < params->nth); // only enable sgemm for prompt processing #if !defined(__MMA__) if (n < 2) return false; #endif if (Ctype != GGML_TYPE_F32) return false; switch (Atype) { case GGML_TYPE_F32: { if (Btype != GGML_TYPE_F32) return false; #if defined(__AVX512F__) tinyBLAS<16, __m512, __m512, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); #elif defined(__AVX__) || defined(__AVX2__) tinyBLAS<8, __m256, __m256, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); #elif defined(__ARM_NEON) if (n < 4) return false; tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); #elif defined(__VXE__) || defined(__VXE2__) if (n < 4) return false; tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); #elif defined(__MMA__) if (k % 8) return false; tinyBLAS_PPC tb{ k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__riscv_zvfh) #if LMUL == 1 tinyBLAS_RVV tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; #elif LMUL == 2 tinyBLAS_RVV tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; #else // LMUL = 4 tinyBLAS_RVV tb{ params, k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc}; #endif return tb.matmul(m, n); #else return false; #endif } case GGML_TYPE_BF16: { #if defined(__AVX512BF16__) if (Btype == GGML_TYPE_BF16) { tinyBLAS<32, __m512, __m512bh, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__AVX512F__) if (Btype == GGML_TYPE_BF16) { tinyBLAS<16, __m512, __m512, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__AVX2__) if (Btype == GGML_TYPE_BF16) { tinyBLAS<8, __m256, __m256, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__MMA__) if ((k % 8)) return false; if(Btype == GGML_TYPE_BF16) { tinyBLAS_BF16_PPC tb{ k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; } #elif defined(__riscv_zvfbfwma) #if LMUL == 1 tinyBLAS_RVV tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; #elif LMUL == 2 tinyBLAS_RVV tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; #else // LMUL = 4 tinyBLAS_RVV tb{ params, k, (const ggml_bf16_t *)A, lda, (const ggml_bf16_t *)B, ldb, (float *)C, ldc}; #endif return tb.matmul(m, n); #endif return false; } case GGML_TYPE_F16: { #if defined(__AVX512F__) if (Btype == GGML_TYPE_F16) { tinyBLAS<16, __m512, __m512, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__) if (Btype == GGML_TYPE_F16) { tinyBLAS<8, __m256, __m256, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) if (n < 8) return false; if (Btype == GGML_TYPE_F16) { tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__ARM_NEON) && !defined(_MSC_VER) if (Btype == GGML_TYPE_F32) { tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ params, k, (const ggml_fp16_t *)A, lda, (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__VXE__) || defined(__VXE2__) if (n < 4) return false; if (Btype == GGML_TYPE_F16) { tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); } #elif defined(__riscv_zvfh) if (Btype == GGML_TYPE_F16) { #if LMUL == 1 tinyBLAS_RVV tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; #elif LMUL == 2 tinyBLAS_RVV tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; #else // LMUL = 4 tinyBLAS_RVV tb{ params, k, (const ggml_fp16_t *)A, lda, (const ggml_fp16_t *)B, ldb, (float *)C, ldc}; #endif return tb.matmul(m, n); } #endif return false; } case GGML_TYPE_Q8_0: { if (Btype != GGML_TYPE_Q8_0) return false; #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) tinyBLAS_Q0_AVX tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__ARM_FEATURE_DOTPROD) tinyBLAS_Q0_ARM tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__MMA__) //TO-DO: Remove this condition once gemv forwarding is enabled. if (n < 8 && n != 4) return false; if (m < 8 && m != 4) return false; tinyBLAS_Q0_PPC tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #else return false; #endif } case GGML_TYPE_Q4_0: { if (Btype != GGML_TYPE_Q8_0) return false; #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) tinyBLAS_Q0_AVX tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__ARM_FEATURE_DOTPROD) tinyBLAS_Q0_ARM tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #elif defined(__MMA__) //TO-DO: Remove this condition once gemv forwarding is enabled. if (n < 8 && n != 4) return false; if (m < 8 && m != 4) return false; tinyBLAS_Q0_PPC tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #else return false; #endif } case GGML_TYPE_Q5_0: { if (Btype != GGML_TYPE_Q8_0) return false; #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) tinyBLAS_Q0_AVX tb{ k, (const block_q5_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #else return false; #endif } case GGML_TYPE_IQ4_NL: { if (Btype != GGML_TYPE_Q8_0) return false; #if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) tinyBLAS_Q0_AVX tb{ k, (const block_iq4_nl *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, params->ith, params->nth}; tb.matmul(m, n); return true; #else return false; #endif } default: return false; } (void)params; (void)m; (void)n; (void)k; (void)A; (void)lda; (void)B; (void)ldb; (void)C; (void)ldc; (void)Atype; (void)Btype; (void)Ctype; } ggml-org-ggml-3678254/src/ggml-cpu/llamafile/sgemm.h000066400000000000000000000010141512524704700220710ustar00rootroot00000000000000#pragma once #include #include #if defined(__VXE__) || defined(__VXE2__) #include #endif #ifdef _MSC_VER #define NOINLINE __declspec(noinline) #else #define NOINLINE __attribute__((__noinline__)) #endif #ifdef __cplusplus extern "C" { #endif bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t, int64_t, int64_t, const void *, int64_t, const void *, int64_t, void *, int64_t, int, int, int); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/ops.cpp000066400000000000000000012623271512524704700202100ustar00rootroot00000000000000#include "ops.h" #include "ggml-cpu.h" #include "ggml-impl.h" #include "binary-ops.h" #include "ggml.h" #include "unary-ops.h" #include "vec.h" #include #include #include #include // ggml_compute_forward_dup static void ggml_compute_forward_dup_same_cont( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); GGML_ASSERT(src0->type == dst->type); const size_t nb0 = ggml_type_size(src0->type); const int ith = params->ith; // thread index const int nth = params->nth; // number of threads // parallelize by blocks const int nk = ggml_nelements(src0)/ggml_blck_size(src0->type); const int dr = (nk + nth - 1) / nth; const int k0 = dr * ith; const int k1 = MIN(k0 + dr, nk); if (k0 < k1) { memcpy( ((char *) dst->data + k0*nb0), ((char *) src0->data + k0*nb0), (k1 - k0) * nb0); } } template static void ggml_compute_forward_dup_flt( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); GGML_TENSOR_UNARY_OP_LOCALS const int ith = params->ith; // thread index const int nth = params->nth; // number of threads // parallelize by rows const int nr = ne01; // number of rows per thread const int dr = (nr + nth - 1) / nth; // row range for this thread const int ir0 = dr * ith; const int ir1 = MIN(ir0 + dr, nr); // case: type & row size equal if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) { // copy by rows const size_t rs = ne00*nb00; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ir0; i01 < ir1; i01++) { memcpy( ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03), rs); } } } return; } // case: dst tensor is contiguous if (ggml_is_contiguous(dst)) { if (nb00 == sizeof(src_t)) { if constexpr (std::is_same_v) { // same type size_t id = 0; const size_t rs = ne00 * nb00; char * dst_ptr = (char *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += rs * ir0; for (int i01 = ir0; i01 < ir1; i01++) { const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; memcpy(dst_ptr + id, src0_ptr, rs); id += rs; } id += rs * (ne01 - ir1); } } } else { // casting between non-quantized types size_t id = 0; dst_t * dst_ptr = (dst_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += ne00 * ir0; for (int i01 = ir0; i01 < ir1; i01++) { const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { float tmp = type_conversion_table::to_f32(src0_ptr[i00]); dst_ptr[id] = type_conversion_table::from_f32(tmp); id++; } } id += ne00 * (ne01 - ir1); } } } } else { //printf("%s: this is not optimal - fix me\n", __func__); size_t id = 0; dst_t * dst_ptr = (dst_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += ne00 * ir0; for (int i01 = ir0; i01 < ir1; i01++) { for (int i00 = 0; i00 < ne00; i00++) { const src_t * src0_ptr = (src_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); float tmp = type_conversion_table::to_f32(*src0_ptr); dst_ptr[id] = type_conversion_table::from_f32(tmp); id++; } } id += ne00 * (ne01 - ir1); } } } return; } // dst counters int64_t i10 = 0; int64_t i11 = 0; int64_t i12 = 0; int64_t i13 = 0; if constexpr (std::is_same_v) { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; while (i10 >= ne0) { i10 -= ne0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } for (int64_t i01 = ir0; i01 < ir1; i01++) { for (int64_t i00 = 0; i00 < ne00; i00++) { const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); if (++i10 == ne00) { i10 = 0; if (++i11 == ne01) { i11 = 0; if (++i12 == ne02) { i12 = 0; if (++i13 == ne03) { i13 = 0; } } } } } } i10 += ne00 * (ne01 - ir1); while (i10 >= ne0) { i10 -= ne0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } } } } else { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; while (i10 >= ne0) { i10 -= ne0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } for (int64_t i01 = ir0; i01 < ir1; i01++) { for (int64_t i00 = 0; i00 < ne00; i00++) { const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); if (++i10 == ne0) { i10 = 0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } } } i10 += ne00 * (ne01 - ir1); while (i10 >= ne0) { i10 -= ne0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } } } } } template static void ggml_compute_forward_dup_to_q( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); GGML_ASSERT(!ggml_is_quantized(src0->type)); GGML_TENSOR_UNARY_OP_LOCALS const int ith = params->ith; // thread index const int nth = params->nth; // number of threads // parallelize by rows const int nr = ne01; // number of rows per thread const int dr = (nr + nth - 1) / nth; // row range for this thread const int ir0 = dr * ith; const int ir1 = MIN(ir0 + dr, nr); if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { // casting non-quantized types --> intermediate f32 --> quantized ggml_from_float_t const quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; size_t id = 0; size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); char * dst_ptr = (char *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += rs * ir0; for (int i01 = ir0; i01 < ir1; i01++) { const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); } quantize_row_q(src0_f32, dst_ptr + id, ne00); id += rs; } id += rs * (ne01 - ir1); } } } else { // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); GGML_ABORT("not implemented"); } } // A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. static void ggml_compute_forward_dup_bytes( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); GGML_ASSERT(src0->type == dst->type); GGML_TENSOR_UNARY_OP_LOCALS; if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { ggml_compute_forward_dup_same_cont(params, dst); return; } const size_t type_size = ggml_type_size(src0->type); const int ith = params->ith; // thread index const int nth = params->nth; // number of threads // parallelize by rows const int nr = ne01; // number of rows per thread const int dr = (nr + nth - 1) / nth; // row range for this thread const int ir0 = dr * ith; const int ir1 = MIN(ir0 + dr, nr); if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { // copy by rows const size_t rs = ggml_row_size(src0->type, ne00); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ir0; i01 < ir1; i01++) { memcpy( ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03), rs); } } } return; } if (ggml_is_contiguous(dst)) { size_t id = 0; char * dst_ptr = (char *) dst->data; const size_t rs = ne00 * type_size; if (nb00 == type_size) { // src0 is contigous on first dimension, copy by rows for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { id += rs * ir0; for (int64_t i01 = ir0; i01 < ir1; i01++) { const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; memcpy(dst_ptr + id, src0_ptr, rs); id += rs; } id += rs * (ne01 - ir1); } } } else { //printf("%s: this is not optimal - fix me\n", __func__); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { id += rs * ir0; for (int64_t i01 = ir0; i01 < ir1; i01++) { for (int64_t i00 = 0; i00 < ne00; i00++) { const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03; memcpy(dst_ptr + id, src0_ptr, type_size); id += type_size; } } id += rs * (ne01 - ir1); } } } return; } // dst counters int64_t k10 = 0; int64_t i11 = 0; int64_t i12 = 0; int64_t i13 = 0; // number of blocks in a row const int64_t nk00 = ne00 / ggml_blck_size(src0->type); const int64_t nk0 = ne0 / ggml_blck_size(dst->type); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { k10 += nk00 * ir0; while (k10 >= nk0) { k10 -= nk0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } for (int64_t i01 = ir0; i01 < ir1; i01++) { for (int64_t k00 = 0; k00 < nk00; k00++) { const char * src0_ptr = ((char *) src0->data + k00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + k10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); memcpy(dst_ptr, src0_ptr, type_size); if (++k10 == nk0) { k10 = 0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } } } k10 += nk00 * (ne01 - ir1); while (k10 >= nk0) { k10 -= nk0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { i12 = 0; if (++i13 == ne3) { i13 = 0; } } } } } } } static void ggml_compute_forward_dup_from_q( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const ggml_type type = src0->type; ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float; size_t qk = ggml_blck_size(type); const int64_t nr = ggml_nelements(src1) / qk; // destination must be contiguous in the first dimension GGML_ASSERT(nb10 == ggml_type_size(dst->type)); // must either have first dimension large enough to hold a row, or fully contiguous GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); const int ith = params->ith; const int nth = params->nth; const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t ir = ir0; ir < ir1; ++ir) { uint32_t i = ir * qk; const int64_t i03 = i/(ne00 * ne01 * ne02); const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; const int64_t x_offset = (i00/qk)*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; const int64_t i13 = i/(ne10 * ne11 * ne12); const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13; dequantize_row_q( (const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), qk); } } void ggml_compute_forward_dup( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (src0->type == dst->type) { ggml_compute_forward_dup_bytes(params, dst); return; } switch (src0->type) { case GGML_TYPE_F16: { /**/ if (dst->type == GGML_TYPE_F16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_BF16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_F32) ggml_compute_forward_dup_flt(params, dst); else ggml_compute_forward_dup_to_q(params, dst); } break; case GGML_TYPE_BF16: { /**/ if (dst->type == GGML_TYPE_F16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_BF16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_F32) ggml_compute_forward_dup_flt(params, dst); else ggml_compute_forward_dup_to_q(params, dst); } break; case GGML_TYPE_F32: { /**/ if (dst->type == GGML_TYPE_F16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_BF16) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_F32) ggml_compute_forward_dup_flt(params, dst); else if (dst->type == GGML_TYPE_I32) ggml_compute_forward_dup_flt(params, dst); else ggml_compute_forward_dup_to_q(params, dst); } break; case GGML_TYPE_I32: { if (dst->type == GGML_TYPE_F32) ggml_compute_forward_dup_flt(params, dst); else GGML_ABORT("not implemented"); } break; default: { if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { ggml_compute_forward_dup_from_q(params, dst); break; } GGML_ABORT("fatal error"); } } } // ggml_compute_forward_add static void ggml_compute_forward_add_q_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); const int nr = ggml_nrows(src0); GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const ggml_type type = src0->type; const ggml_type dtype = dst->type; ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float; ggml_from_float_t const quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(type)); GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); GGML_ASSERT(ggml_is_quantized(src0->type)); GGML_ASSERT(src1->type == GGML_TYPE_F32); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; for (int ir = ir0; ir < ir1; ++ir) { // src0 indices const int i03 = ir/(ne02*ne01); const int i02 = (ir - i03*ne02*ne01)/ne01; const int i01 = (ir - i03*ne02*ne01 - i02*ne01); // src1 and dst are same shape as src0 => same indices const int i13 = i03; const int i12 = i02; const int i11 = i01; const int i3 = i03; const int i2 = i02; const int i1 = i01; void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)); void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); assert(ne00 % 32 == 0); // unquantize row from src0 to temp buffer dequantize_row_q(src0_row, wdata, ne00); // add src1 ggml_vec_acc_f32(ne00, wdata, src1_row); // quantize row to dst if (quantize_row_q != NULL) { quantize_row_q(wdata, dst_row, ne00); } else { memcpy(dst_row, wdata, ne0*nb0); } } } void ggml_compute_forward_add( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_BF16: { ggml_compute_forward_add_non_quantized(params, dst); } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: { ggml_compute_forward_add_q_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_add_id static void ggml_compute_forward_add_id_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src2->type == GGML_TYPE_I32); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_TERNARY_OP_LOCALS GGML_ASSERT( nb0 == sizeof(float)); GGML_ASSERT(nb10 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); // src1 indices const int i11 = *(int32_t *) ((char *) src2->data + i1*nb20 + i2*nb21); GGML_ASSERT(i11 >= 0 && i11 < ne11); ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), (float *) ((char *) src1->data + i11*nb11)); } } void ggml_compute_forward_add_id( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_add_id_f32(params, dst); } break; default: { GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); } } } // ggml_compute_forward_add1 static void ggml_compute_forward_add1_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT( nb0 == sizeof(float)); GGML_ASSERT(nb00 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); #ifdef GGML_USE_ACCELERATE GGML_UNUSED(ggml_vec_add1_f32); vDSP_vadd( (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1, (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1, ne0); #else ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), *(float *) src1->data); #endif } } static void ggml_compute_forward_add1_f16_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add const float v = *(float *) src1->data; const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F16); GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } static void ggml_compute_forward_add1_f16_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F16); GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } static void ggml_compute_forward_add1_q_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add const float v = *(float *) src1->data; const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS const ggml_type type = src0->type; ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float; ggml_from_float_t const quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; // we don't support permuted src0 GGML_ASSERT(nb00 == ggml_type_size(type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); GGML_ASSERT(ggml_is_quantized(src0->type)); GGML_ASSERT(dst->type == src0->type); GGML_ASSERT(src1->type == GGML_TYPE_F32); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03)); void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 )); assert(ne0 % 32 == 0); // unquantize row from src0 to temp buffer dequantize_row_q(src0_row, wdata, ne0); // add src1 ggml_vec_acc1_f32(ne0, wdata, v); // quantize row to dst quantize_row_q(wdata, dst_row, ne0); } } static void ggml_compute_forward_add1_bf16_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add const float v = *(float *) src1->data; const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(src0->type == GGML_TYPE_BF16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_BF16); GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); } } } static void ggml_compute_forward_add1_bf16_bf16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(src0->type == GGML_TYPE_BF16); GGML_ASSERT(src1->type == GGML_TYPE_BF16); GGML_ASSERT(dst->type == GGML_TYPE_BF16); GGML_ASSERT( nb0 == sizeof(ggml_bf16_t)); GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); } } } void ggml_compute_forward_add1( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_add1_f32(params, dst); } break; case GGML_TYPE_F16: { if (src1->type == GGML_TYPE_F16) { ggml_compute_forward_add1_f16_f16(params, dst); } else if (src1->type == GGML_TYPE_F32) { ggml_compute_forward_add1_f16_f32(params, dst); } else { GGML_ABORT("fatal error"); } } break; case GGML_TYPE_BF16: { if (src1->type == GGML_TYPE_BF16) { ggml_compute_forward_add1_bf16_bf16(params, dst); } else if (src1->type == GGML_TYPE_F32) { ggml_compute_forward_add1_bf16_f32(params, dst); } else { GGML_ABORT("fatal error"); } } break; case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: { ggml_compute_forward_add1_q_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_acc static void ggml_compute_forward_acc_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during acc // nb0 is implicitly element_size because src0 and dst are contiguous size_t nb1 = ((int32_t *) dst->op_params)[0]; size_t nb2 = ((int32_t *) dst->op_params)[1]; size_t nb3 = ((int32_t *) dst->op_params)[2]; size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; if (!inplace) { if (params->ith == 0) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); } ggml_barrier(params->threadpool); } const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src1); const int nc = src1->ne[0]; GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) // src0 and dst as viewed during acc const size_t nb0 = ggml_element_size(src0); const size_t nb00 = nb0; const size_t nb01 = nb1; const size_t nb02 = nb2; const size_t nb03 = nb3; GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst)); GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0)); GGML_ASSERT(nb10 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are viewed with shape of src1 and offset // => same indices const int i3 = ir/(ne12*ne11); const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); #ifdef GGML_USE_ACCELERATE vDSP_vadd( (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1, (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc); #else ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); #endif } } void ggml_compute_forward_acc( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_acc_f32(params, dst); } break; case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_sum static void ggml_compute_forward_sum_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(ggml_is_scalar(dst)); assert(src0->nb[0] == sizeof(float)); GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) ggml_float sum = 0; ggml_float row_sum = 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_f32_ggf(ne00, &row_sum, (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); sum += row_sum; } } } ((float *) dst->data)[0] = sum; } static void ggml_compute_forward_sum_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(ggml_is_scalar(dst)); assert(src0->nb[0] == sizeof(ggml_fp16_t)); GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) float sum = 0; float row_sum = 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_f16_ggf(ne00, &row_sum, (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); sum += row_sum; } } } ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); } static void ggml_compute_forward_sum_bf16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(ggml_is_scalar(dst)); assert(src0->nb[0] == sizeof(ggml_bf16_t)); GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) float sum = 0; float row_sum = 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_bf16_ggf(ne00, &row_sum, (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); sum += row_sum; } } } ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); } void ggml_compute_forward_sum( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_sum_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_sum_f16(params, dst); } break; case GGML_TYPE_BF16: { ggml_compute_forward_sum_bf16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_cumsum static void ggml_compute_forward_cumsum_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(dst->nb[0] == sizeof(float)); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(ne0 == ne00); GGML_ASSERT(ne1 == ne01); GGML_ASSERT(ne2 == ne02); GGML_ASSERT(ne3 == ne03); const auto [ir0, ir1] = get_thread_range(params, src0); for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); float * src_row = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); float * dst_row = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); ggml_vec_cumsum_f32(ne00, dst_row, src_row); } } void ggml_compute_forward_cumsum( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_cumsum_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_sum_rows static void ggml_compute_forward_sum_rows_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(dst->nb[0] == sizeof(float)); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(ne0 == 1); GGML_ASSERT(ne1 == ne01); GGML_ASSERT(ne2 == ne02); GGML_ASSERT(ne3 == ne03); for (int64_t i3 = 0; i3 < ne03; i3++) { for (int64_t i2 = 0; i2 < ne02; i2++) { for (int64_t i1 = 0; i1 < ne01; i1++) { float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03); float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; ggml_vec_sum_f32(ne00, &row_sum, src_row); dst_row[0] = row_sum; } } } } void ggml_compute_forward_sum_rows( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_sum_rows_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_mean static void ggml_compute_forward_mean_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(src0->nb[0] == sizeof(float)); GGML_TENSOR_UNARY_OP_LOCALS assert(ne0 == 1); assert(ne1 == ne01); assert(ne2 == ne02); assert(ne3 == ne03); GGML_UNUSED(ne0); GGML_UNUSED(ne1); GGML_UNUSED(ne2); GGML_UNUSED(ne3); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00; } } } } void ggml_compute_forward_mean( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_mean_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_argmax static void ggml_compute_forward_argmax_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(src0->nb[0] == sizeof(float)); assert(dst->nb[0] == sizeof(float)); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const size_t nb01 = src0->nb[1]; const size_t nb0 = dst->nb[0]; for (int64_t i1 = 0; i1 < ne01; i1++) { float * src = (float *) ((char *) src0->data + i1*nb01); int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0); int v = 0; ggml_vec_argmax_f32(ne00, &v, src); dst_[0] = v; } } void ggml_compute_forward_argmax( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_argmax_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_count_equal static void ggml_compute_forward_count_equal_i32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS; GGML_ASSERT(src0->type == GGML_TYPE_I32); GGML_ASSERT(src1->type == GGML_TYPE_I32); GGML_ASSERT(ggml_are_same_shape(src0, src1)); GGML_ASSERT(ggml_is_scalar(dst)); GGML_ASSERT(dst->type == GGML_TYPE_I64); const int64_t nr = ggml_nrows(src0); const int ith = params->ith; const int nth = params->nth; int64_t * sums = (int64_t *) params->wdata; int64_t sum_thread = 0; // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir / (ne02*ne01); const int64_t i02 = (ir - i03*ne03) / ne01; const int64_t i01 = ir - i03*ne03 - i02*ne02; const char * data0 = (const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01; const char * data1 = (const char *) src1->data + i03*nb13 + i02*nb12 + i01*nb11; for (int64_t i00 = 0; i00 < ne00; ++i00) { const int32_t val0 = *((const int32_t *) (data0 + i00*nb00)); const int32_t val1 = *((const int32_t *) (data1 + i00*nb10)); sum_thread += val0 == val1; } } if (ith != 0) { sums[ith] = sum_thread; } ggml_barrier(params->threadpool); if (ith != 0) { return; } for (int ith_other = 1; ith_other < nth; ++ith_other) { sum_thread += sums[ith_other]; } *((int64_t *) dst->data) = sum_thread; } void ggml_compute_forward_count_equal( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_I32: { ggml_compute_forward_count_equal_i32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_repeat static void ggml_compute_forward_repeat_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } GGML_ASSERT(ggml_can_repeat(src0, dst)); GGML_TENSOR_UNARY_OP_LOCALS // guaranteed to be an integer due to the check in ggml_can_repeat const int nr0 = (int)(ne0/ne00); const int nr1 = (int)(ne1/ne01); const int nr2 = (int)(ne2/ne02); const int nr3 = (int)(ne3/ne03); // TODO: support for transposed / permuted tensors GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb00 == sizeof(float)); // TODO: maybe this is not optimal? for (int i3 = 0; i3 < nr3; i3++) { for (int k3 = 0; k3 < ne03; k3++) { for (int i2 = 0; i2 < nr2; i2++) { for (int k2 = 0; k2 < ne02; k2++) { for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne01; k1++) { for (int i0 = 0; i0 < nr0; i0++) { ggml_vec_cpy_f32(ne00, (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0), (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01)); } } } } } } } } static void ggml_compute_forward_repeat_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } GGML_ASSERT(ggml_can_repeat(src0, dst)); GGML_TENSOR_UNARY_OP_LOCALS // guaranteed to be an integer due to the check in ggml_can_repeat const int nr0 = (int)(ne0/ne00); const int nr1 = (int)(ne1/ne01); const int nr2 = (int)(ne2/ne02); const int nr3 = (int)(ne3/ne03); // TODO: support for transposed / permuted tensors GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); // TODO: maybe this is not optimal? for (int i3 = 0; i3 < nr3; i3++) { for (int k3 = 0; k3 < ne03; k3++) { for (int i2 = 0; i2 < nr2; i2++) { for (int k2 = 0; k2 < ne02; k2++) { for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne01; k1++) { for (int i0 = 0; i0 < nr0; i0++) { ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0); ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01); // ggml_vec_cpy_f16(ne00, y, x) for (int i = 0; i < ne00; ++i) { y[i] = x[i]; } } } } } } } } } void ggml_compute_forward_repeat( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_I16: { ggml_compute_forward_repeat_f16(params, dst); } break; case GGML_TYPE_F32: case GGML_TYPE_I32: { ggml_compute_forward_repeat_f32(params, dst); } break; // TODO: templateify the implemenation and support for I64 // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 //case GGML_TYPE_I64: // { // ggml_compute_forward_repeat_i64(params, dst); // } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_repeat_back static void ggml_compute_forward_repeat_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } GGML_ASSERT(ggml_can_repeat(dst, src0)); GGML_TENSOR_UNARY_OP_LOCALS // guaranteed to be an integer due to the check in ggml_can_repeat const int nr0 = (int)(ne00/ne0); const int nr1 = (int)(ne01/ne1); const int nr2 = (int)(ne02/ne2); const int nr3 = (int)(ne03/ne3); // TODO: support for transposed / permuted tensors GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb00 == sizeof(float)); if (ggml_is_contiguous(dst)) { ggml_vec_set_f32(ne0*ne1*ne2*ne3, (float *)dst->data, 0); } else { for (int k3 = 0; k3 < ne3; k3++) { for (int k2 = 0; k2 < ne2; k2++) { for (int k1 = 0; k1 < ne1; k1++) { ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3), 0); } } } } // TODO: maybe this is not optimal? for (int i3 = 0; i3 < nr3; i3++) { for (int k3 = 0; k3 < ne3; k3++) { for (int i2 = 0; i2 < nr2; i2++) { for (int k2 = 0; k2 < ne2; k2++) { for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne1; k1++) { for (int i0 = 0; i0 < nr0; i0++) { ggml_vec_acc_f32(ne0, (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1), (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00)); } } } } } } } } void ggml_compute_forward_repeat_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_repeat_back_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_concat static void ggml_compute_forward_concat_any( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const size_t len = ggml_type_size(src0->type); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_BINARY_OP_LOCALS const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int64_t o[4] = {0, 0, 0, 0}; o[dim] = src0->ne[dim]; const char * x; // TODO: smarter multi-theading for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = ith; i2 < ne2; i2 += nth) { for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < ne0; i0++) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03; } else { x = (const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13; } char * y = (char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3; memcpy(y, x, len); } } } } } static void ggml_compute_forward_concat_i8( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_BINARY_OP_LOCALS const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int64_t o[4] = {0, 0, 0, 0}; o[dim] = src0->ne[dim]; const int8_t * x; // TODO: smarter multi-theading for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = ith; i2 < ne2; i2 += nth) { for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < ne0; i0++) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const int8_t *) ((const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03); } else { x = (const int8_t *) ((const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13); } int8_t * y = (int8_t *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y = *x; } } } } } static void ggml_compute_forward_concat_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_BINARY_OP_LOCALS const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int64_t o[4] = {0, 0, 0, 0}; o[dim] = src0->ne[dim]; const ggml_fp16_t * x; // TODO: smarter multi-theading for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = ith; i2 < ne2; i2 += nth) { for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < ne0; i0++) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const ggml_fp16_t *) ((const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03); } else { x = (const ggml_fp16_t *) ((const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13); } ggml_fp16_t * y = (ggml_fp16_t *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y = *x; } } } } } static void ggml_compute_forward_concat_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_BINARY_OP_LOCALS const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int64_t o[4] = {0, 0, 0, 0}; o[dim] = src0->ne[dim]; const float * x; // TODO: smarter multi-theading for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = ith; i2 < ne2; i2 += nth) { for (int i1 = 0; i1 < ne1; i1++) { for (int i0 = 0; i0 < ne0; i0++) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const float *) ((const char *)src0->data + (i0 )*nb00 + (i1 )*nb01 + (i2 )*nb02 + (i3 )*nb03); } else { x = (const float *) ((const char *)src1->data + (i0 - o[0])*nb10 + (i1 - o[1])*nb11 + (i2 - o[2])*nb12 + (i3 - o[3])*nb13); } float * y = (float *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y = *x; } } } } } void ggml_compute_forward_concat( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_I16: { ggml_compute_forward_concat_f16(params, dst); } break; case GGML_TYPE_I8: { ggml_compute_forward_concat_i8(params, dst); } break; case GGML_TYPE_F32: case GGML_TYPE_I32: { ggml_compute_forward_concat_f32(params, dst); } break; default: { ggml_compute_forward_concat_any(params, dst); } } } // ggml_compute_forward_gelu static void ggml_compute_forward_gelu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_gelu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), (ggml_fp16_t *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_gelu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_gelu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_gelu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_fill static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { const float c = ggml_get_op_params_f32(dst, 0); GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); GGML_TENSOR_LOCALS(size_t, nb, dst, nb); const auto [ir0, ir1] = get_thread_range(params, dst); for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne2*ne1); const int64_t i02 = (ir - i03*ne2*ne1)/ne1; const int64_t i01 = (ir - i03*ne2*ne1 - i02*ne1); float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1); ggml_vec_set_f32(ne0, dst_ptr, c); } } void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { ggml_compute_forward_fill_f32(params, dst); } // ggml_compute_tri static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_TENSOR_UNARY_OP_LOCALS const auto [ir0, ir1] = get_thread_range(params, src0); bool (*bipred)(int, int); switch (ttype) { case GGML_TRI_TYPE_LOWER: bipred = [](int i, int r) { return i < r; }; break; case GGML_TRI_TYPE_LOWER_DIAG: bipred = [](int i, int r) { return i <= r; }; break; case GGML_TRI_TYPE_UPPER: bipred = [](int i, int r) { return i > r; }; break; case GGML_TRI_TYPE_UPPER_DIAG: bipred = [](int i, int r) { return i >= r; }; break; default: GGML_ABORT("invalid tri type"); } for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); const float * src_ptr = (const float *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); float * dst_ptr = ( float *) (( char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1); for (int i0 = 0; i0 < ne0; ++i0) { dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; } } } void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_tri_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_gelu_erf static void ggml_compute_forward_gelu_erf_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_gelu_erf_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), (ggml_fp16_t *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_gelu_erf( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_gelu_erf_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_gelu_erf_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_gelu_quick static void ggml_compute_forward_gelu_quick_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_gelu_quick_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), (ggml_fp16_t *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_gelu_quick( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_gelu_quick_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_gelu_quick_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_silu static void ggml_compute_forward_silu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_silu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), (ggml_fp16_t *) ((char *) src0->data + i1*(src0->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])))[k]; const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_silu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_silu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_silu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_leaky_relu static void ggml_compute_forward_leaky_relu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int n = ggml_nrows(src0); const int nc = src0->ne[0]; float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope); } } static void ggml_compute_forward_leaky_relu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } assert(ggml_is_contiguous_1(src0)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src0, dst)); const int n = ggml_nrows(src0); const int nc = src0->ne[0]; float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); assert(dst->nb[0] == sizeof(ggml_fp16_t)); assert(src0->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i*( dst->nb[1])), (ggml_fp16_t *) ((char *) src0->data + i*(src0->nb[1])), negative_slope); } } void ggml_compute_forward_leaky_relu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_leaky_relu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_leaky_relu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_silu_back static void ggml_compute_forward_silu_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; assert(ggml_is_contiguous_1(grad)); assert(ggml_is_contiguous_1(src1)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src1, dst)); assert(ggml_are_same_shape(src1, grad)); const int ith = params->ith; const int nth = params->nth; const int nc = src1->ne[0]; const int nr = ggml_nrows(src1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src1->data + i1*(src1->nb[1])), (float *) ((char *) grad->data + i1*(grad->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_silu_back_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; assert(ggml_is_contiguous_1(grad)); assert(ggml_is_contiguous_1(src1)); assert(ggml_is_contiguous_1(dst)); assert(ggml_are_same_shape(src1, dst)); assert(ggml_are_same_shape(src1, grad)); const int ith = params->ith; const int nth = params->nth; const int nc = src1->ne[0]; const int nr = ggml_nrows(src1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), (ggml_fp16_t *) ((char *) src1->data + i1*(src1->nb[1])), (ggml_fp16_t *) ((char *) grad->data + i1*(grad->nb[1]))); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } void ggml_compute_forward_silu_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_silu_back_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_silu_back_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_reglu static void ggml_compute_forward_reglu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_reglu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_reglu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_reglu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_reglu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_geglu static void ggml_compute_forward_geglu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_geglu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_geglu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_geglu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_geglu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_swiglu static void ggml_compute_forward_swiglu_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_swiglu_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_swiglu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_swiglu_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_swiglu_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_swiglu_oai static void ggml_compute_forward_swiglu_oai_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); const float alpha = ggml_get_op_params_f32(dst, 2); const float limit = ggml_get_op_params_f32(dst, 3); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); float * dst_p = (float *) ((char *) dst->data + i1*(dst->nb[1])); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } for (int k = 0; k < nc; k++) { const float x = std::min(src0_p[k], limit); const float y = std::clamp(src1_p[k], -limit, limit); const float out_glu = x / (1.f + expf(alpha * (-x))); dst_p[k] = out_glu * (y + 1.f); } #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = dst_p[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_swiglu_oai( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_swiglu_oai_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_geglu_erf static void ggml_compute_forward_geglu_erf_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_geglu_erf_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_geglu_erf( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_geglu_erf_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_geglu_erf_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_geglu_quick static void ggml_compute_forward_geglu_quick_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float * src0_p = (float *) (src0_d + i1*src0_o); float * src1_p = (float *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; GGML_UNUSED(x); assert(!isnan(x)); assert(!isinf(x)); } #endif } } static void ggml_compute_forward_geglu_quick_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; char * src0_d = (char *) src0->data; char * src1_d = (char *) (src1 ? src1->data : src0->data); const size_t src0_o = src0->nb[1]; const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); } const int ith = params->ith; const int nth = params->nth; const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; const int nr = ggml_nrows(src0); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == nr); const int32_t swapped = ggml_get_op_params_i32(dst, 1); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; const float v = GGML_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); } #endif } } static void ggml_compute_forward_geglu_quick( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_geglu_quick_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_geglu_quick_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_norm static void ggml_compute_forward_norm_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); float sum = 0.0; ggml_vec_sum_f32(ne00, &sum, x); float mean = sum/ne00; float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); float variance = 0; #ifdef GGML_USE_ACCELERATE mean = -mean; vDSP_vsadd(x, 1, &mean, y, 1, ne00); vDSP_measqv(y, 1, &variance, ne00); #else variance = ggml_vec_cvar_f32(ne00, y, x, mean); #endif //GGML_USE_ACCELERATE const float scale = 1.0f/sqrtf(variance + eps); ggml_vec_scale_f32(ne00, y, scale); } } } } void ggml_compute_forward_norm( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_norm_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_group_rms_norm static void ggml_compute_forward_rms_norm_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); // TODO: optimize for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); ggml_float sum = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { sum += (ggml_float)(x[i00] * x[i00]); } const float mean = sum/ne00; float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); memcpy(y, x, ne00 * sizeof(float)); // for (int i00 = 0; i00 < ne00; i00++) { // y[i00] = x[i00]; // } const float scale = 1.0f/sqrtf(mean + eps); // if you hit this, likely you got an inf somewhere earlier assert(scale > 0.0f); ggml_vec_scale_f32(ne00, y, scale); } } } } void ggml_compute_forward_rms_norm( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_rms_norm_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_compute_forward_rms_norm_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_BINARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); // TODO: optimize for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ith; i01 < ne01; i01 += nth) { // src1 is same shape as src0 => same indices const int64_t i11 = i01; const int64_t i12 = i02; const int64_t i13 = i03; const float * dz = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); const float * x = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13); ggml_float sum_xx = 0.0; ggml_float sum_xdz = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { sum_xx += (ggml_float)(x[i00] * x[i00]); sum_xdz += (ggml_float)(x[i00] * dz[i00]); } //const float mean = (float)(sum_xx)/ne00; const float mean_eps = (float)(sum_xx)/ne00 + eps; const float sum_eps = (float)(sum_xx) + eps*ne00; //const float mean_xdz = (float)(sum_xdz)/ne00; // we could cache rms from forward pass to improve performance. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. //const float rms = sqrtf(mean_eps); const float rrms = 1.0f / sqrtf(mean_eps); //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) { // z = rms_norm(x) // // rms_norm(src1) = // scale( // src1, // div( // 1, // sqrt( // add( // scale( // sum( // sqr( // src1)), // (1.0/N)), // eps)))); // postorder: // ## op args grad // 00 param src1 grad[#00] // 01 const 1 // 02 sqr (#00) grad[#02] // 03 sum (#02) grad[#03] // 04 const 1/N // 05 scale (#03, #04) grad[#05] // 06 const eps // 07 add (#05, #06) grad[#07] // 08 sqrt (#07) grad[#08] // 09 div (#01,#08) grad[#09] // 10 scale (#00,#09) grad[#10] // // backward pass, given grad[#10] // #10: scale // grad[#00] += scale(grad[#10],#09) // grad[#09] += sum(mul(grad[#10],#00)) // #09: div // grad[#08] += neg(mul(grad[#09], div(#09,#08))) // #08: sqrt // grad[#07] += mul(grad[#08], div(0.5, #08)) // #07: add // grad[#05] += grad[#07] // #05: scale // grad[#03] += scale(grad[#05],#04) // #03: sum // grad[#02] += repeat(grad[#03], #02) // #02: // grad[#00] += scale(mul(#00, grad[#02]), 2.0) // // substitute and simplify: // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) // grad[#02] = repeat(grad[#03], #02) // grad[#02] = repeat(scale(grad[#05],#04), #02) // grad[#02] = repeat(scale(grad[#07],#04), #02) // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) // a = b*c + d*e // a = b*c*f/f + d*e*f/f // a = (b*c*f + d*e*f)*(1/f) // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) // a = (b + d*e/c)*c // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms // a = (dz + x*div(-mean_xdz,mean_eps))*rrms // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) } // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) // post-order: // dx := x // dx := scale(dx,-mean_xdz/mean_eps) // dx := add(dx, dz) // dx := scale(dx, rrms) float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) ggml_vec_cpy_f32 (ne00, dx, x); // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps); ggml_vec_acc_f32 (ne00, dx, dz); ggml_vec_scale_f32(ne00, dx, rrms); } } } } void ggml_compute_forward_rms_norm_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_rms_norm_back_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_group_norm static void ggml_compute_forward_group_norm_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS // TODO: optimize float eps; memcpy(&eps, dst->op_params + 1, sizeof(float)); int n_channels = src0->ne[2]; int n_groups = dst->op_params[0]; int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; for (int i = ith; i < n_groups; i += nth) { int start = i * n_channels_per_group; int end = start + n_channels_per_group; if (end > n_channels) { end = n_channels; } int step = end - start; for (int64_t i03 = 0; i03 < ne03; i03++) { ggml_float sum = 0.0; for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); ggml_float sumr = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { sumr += (ggml_float)x[i00]; } sum += sumr; } } const float mean = sum / (ne00 * ne01 * step); ggml_float sum2 = 0.0; for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); ggml_float sumr = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { float v = x[i00] - mean; y[i00] = v; sumr += (ggml_float)(v * v); } sum2 += sumr; } } const float variance = sum2 / (ne00 * ne01 * step); const float scale = 1.0f / sqrtf(variance + eps); for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); ggml_vec_scale_f32(ne00, y, scale); } } } } } void ggml_compute_forward_group_norm( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_group_norm_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_l2_norm static void ggml_compute_forward_l2_norm_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); // TODO: optimize for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); ggml_float sum = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { sum += (ggml_float)(x[i00] * x[i00]); } float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); memcpy(y, x, ne00 * sizeof(float)); const float scale = 1.0f/fmaxf(sqrtf(sum), eps); ggml_vec_scale_f32(ne00, y, scale); } } } } void ggml_compute_forward_l2_norm( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_l2_norm_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_out_prod static void ggml_compute_forward_out_prod_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); const int ith = params->ith; const int nth = params->nth; GGML_ASSERT(ne0 == ne00); GGML_ASSERT(ne1 == ne10); GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); GGML_ASSERT(ne2 % ne02 == 0); GGML_ASSERT(ne3 % ne03 == 0); // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == sizeof(float)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); // GGML_ASSERT(nb0 <= nb1); // GGML_ASSERT(nb1 <= nb2); // GGML_ASSERT(nb2 <= nb3); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows if (ith == 0) { ggml_vec_set_f32(ne0*ne1*ne2*ne3, (float *)dst->data, 0); } ggml_barrier(params->threadpool); // dst[:,:,:,:] = 0 // for i2,i3: // for i1: // for i01: // for i0: // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] // parallelize by last three dimensions // total rows in dst const int64_t nr = ne1*ne2*ne3; // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); // block-tiling attempt const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); const int64_t blck_1 = 16; // dps == dst per src0, used for group query attention const int64_t dps2 = ne2 / ne02; const int64_t dps3 = ne3 / ne03; for (int64_t bir = ir0; bir < ir1; bir += blck_1) { const int64_t bir1 = MIN(bir + blck_1, ir1); for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { const int64_t bne01 = MIN(bi01 + blck_0, ne01); for (int64_t ir = bir; ir < bir1; ++ir) { // dst indices const int64_t i3 = ir/(ne2*ne1); const int64_t i2 = (ir - i3*ne2*ne1)/ne1; const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1); const int64_t i02 = i2 / dps2; const int64_t i03 = i3 / dps3; //const int64_t i10 = i1; const int64_t i12 = i2; const int64_t i13 = i3; #if GGML_VEC_MAD_UNROLL > 2 const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { const int64_t i11 = i01; float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); } for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { const int64_t i11 = i01; float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); ggml_vec_mad_f32(ne0, d, s0, *s1); } #else for (int64_t i01 = bi01; i01 < bne01; ++i01) { const int64_t i11 = i01; float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); ggml_vec_mad_f32(ne0, d, s0, *s1); } #endif } } } } static void ggml_compute_forward_out_prod_q_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS; const int ith = params->ith; const int nth = params->nth; const ggml_type type = src0->type; ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float; GGML_ASSERT(ne02 == ne12); GGML_ASSERT(ne03 == ne13); GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 dim0 GGML_ASSERT(nb00 == ggml_type_size(type)); // dst dim0 cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); // GGML_ASSERT(nb0 <= nb1); // GGML_ASSERT(nb1 <= nb2); // GGML_ASSERT(nb2 <= nb3); GGML_ASSERT(ne0 == ne00); GGML_ASSERT(ne1 == ne10); GGML_ASSERT(ne2 == ne02); GGML_ASSERT(ne3 == ne03); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows if (ith == 0) { ggml_vec_set_f32(ne0*ne1*ne2*ne3, (float *)dst->data, 0); } ggml_barrier(params->threadpool); // parallelize by last three dimensions // total rows in dst const int64_t nr = ne1*ne2*ne3; // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); // dst[:,:,:,:] = 0 // for i2,i3: // for i1: // for i01: // for i0: // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; for (int64_t ir = ir0; ir < ir1; ++ir) { // dst indices const int64_t i3 = ir/(ne2*ne1); const int64_t i2 = (ir - i3*ne2*ne1)/ne1; const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1); const int64_t i02 = i2; const int64_t i03 = i3; //const int64_t i10 = i1; const int64_t i12 = i2; const int64_t i13 = i3; for (int64_t i01 = 0; i01 < ne01; ++i01) { const int64_t i11 = i01; float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); dequantize_row_q(s0, wdata, ne0); ggml_vec_mad_f32(ne0, d, wdata, *s1); } } } void ggml_compute_forward_out_prod( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: { ggml_compute_forward_out_prod_q_f32(params, dst); } break; case GGML_TYPE_F16: { GGML_ABORT("fatal error"); // todo // ggml_compute_forward_out_prod_f16_f32(params, dst); } case GGML_TYPE_F32: { ggml_compute_forward_out_prod_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_scale static void ggml_compute_forward_scale_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_are_same_shape(src0, dst)); float s; // scale factor float b; // bias memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); const size_t nb01 = src0->nb[1]; const size_t nb1 = dst->nb[1]; if (b == 0.0f) { for (int i1 = ir0; i1 < ir1; i1++) { if (dst->data != src0->data) { // src0 is same shape as dst => same indices // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float)); } ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), s); } } else { for (int i1 = ir0; i1 < ir1; i1++) { ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1*nb1), (float *) ((char *) src0->data + i1*nb1), s, b); } } } void ggml_compute_forward_scale( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_scale_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_set static void ggml_compute_forward_set_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during set // nb0 is implicitly element_size because src0 and dst are contiguous size_t nb1 = ((int32_t *) dst->op_params)[0]; size_t nb2 = ((int32_t *) dst->op_params)[1]; size_t nb3 = ((int32_t *) dst->op_params)[2]; size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; if (!inplace) { if (params->ith == 0) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); } ggml_barrier(params->threadpool); } const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src1); const int nc = src1->ne[0]; GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) // src0 and dst as viewed during set const size_t nb0 = ggml_element_size(src0); const int im0 = (ne10 == 0 ? 0 : ne10-1); const int im1 = (ne11 == 0 ? 0 : ne11-1); const int im2 = (ne12 == 0 ? 0 : ne12-1); const int im3 = (ne13 == 0 ? 0 : ne13-1); GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst)); GGML_ASSERT(nb10 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are viewed with shape of src1 and offset // => same indices const int i3 = ir/(ne12*ne11); const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); } } static void ggml_compute_forward_set_i32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during set // nb0 is implicitly element_size because src0 and dst are contiguous size_t nb1 = ((int32_t *) dst->op_params)[0]; size_t nb2 = ((int32_t *) dst->op_params)[1]; size_t nb3 = ((int32_t *) dst->op_params)[2]; size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; if (!inplace) { if (params->ith == 0) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); } ggml_barrier(params->threadpool); } const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src1); const int nc = src1->ne[0]; GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) // src0 and dst as viewed during set const size_t nb0 = ggml_element_size(src0); const int im0 = (ne10 == 0 ? 0 : ne10-1); const int im1 = (ne11 == 0 ? 0 : ne11-1); const int im2 = (ne12 == 0 ? 0 : ne12-1); const int im3 = (ne13 == 0 ? 0 : ne13-1); GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst)); GGML_ASSERT(nb10 == sizeof(int32_t)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int ir = ir0; ir < ir1; ++ir) { // src0 and dst are viewed with shape of src1 and offset // => same indices const int i3 = ir/(ne12*ne11); const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (int32_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); } } void ggml_compute_forward_set( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_set_f32(params, dst); } break; case GGML_TYPE_I32: { ggml_compute_forward_set_i32(params, dst); } break; case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_cpy void ggml_compute_forward_cpy( const ggml_compute_params * params, ggml_tensor * dst) { ggml_compute_forward_dup(params, dst); } // ggml_compute_forward_cont void ggml_compute_forward_cont( const ggml_compute_params * params, ggml_tensor * dst) { ggml_compute_forward_dup(params, dst); } // ggml_compute_forward_get_rows static void ggml_compute_forward_get_rows_q( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int64_t nc = ne00; const int64_t nr = ggml_nelements(src1); const ggml_type type = src0->type; ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float; assert(ne0 == nc); assert(ne02 == ne11); assert(nb00 == ggml_type_size(type)); assert(ggml_nrows(dst) == nr); const int ith = params->ith; const int nth = params->nth; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t i = ir0; i < ir1; ++i) { const int64_t i12 = i/(ne11*ne10); const int64_t i11 = (i - i12*ne11*ne10)/ne10; const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10); const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); GGML_ASSERT(i01 >= 0 && i01 < ne01); dequantize_row_q( (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03), (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc); } } static void ggml_compute_forward_get_rows_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int64_t nc = ne00; const int64_t nr = ggml_nelements(src1); assert(ne0 == nc); assert(ne02 == ne11); assert(nb00 == sizeof(ggml_fp16_t)); assert(ggml_nrows(dst) == nr); const int ith = params->ith; const int nth = params->nth; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t i = ir0; i < ir1; ++i) { const int64_t i12 = i/(ne11*ne10); const int64_t i11 = (i - i12*ne11*ne10)/ne10; const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10); const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); GGML_ASSERT(i01 >= 0 && i01 < ne01); ggml_cpu_fp16_to_fp32( (const ggml_fp16_t*) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03), (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc); } } static void ggml_compute_forward_get_rows_bf16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int64_t nc = ne00; const int64_t nr = ggml_nelements(src1); assert(ne0 == nc); assert(ne02 == ne11); assert(nb00 == sizeof(ggml_bf16_t)); assert(ggml_nrows(dst) == nr); const int ith = params->ith; const int nth = params->nth; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t i = ir0; i < ir1; ++i) { const int64_t i12 = i/(ne11*ne10); const int64_t i11 = (i - i12*ne11*ne10)/ne10; const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10); const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); GGML_ASSERT(i01 >= 0 && i01 < ne01); ggml_cpu_bf16_to_fp32( (const ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03), (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc); } } static void ggml_compute_forward_get_rows_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int64_t nc = ne00; const int64_t nr = ggml_nelements(src1); assert(ne0 == nc); assert(ne02 == ne11); assert(nb00 == sizeof(float)); assert(ggml_nrows(dst) == nr); const int ith = params->ith; const int nth = params->nth; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int64_t i = ir0; i < ir1; ++i) { const int64_t i12 = i/(ne11*ne10); const int64_t i11 = (i - i12*ne11*ne10)/ne10; const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10); const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); GGML_ASSERT(i01 >= 0 && i01 < ne01); ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03)); } } void ggml_compute_forward_get_rows( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: { ggml_compute_forward_get_rows_q(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_get_rows_f16(params, dst); } break; case GGML_TYPE_BF16: { ggml_compute_forward_get_rows_bf16(params, dst); } break; case GGML_TYPE_F32: case GGML_TYPE_I32: { ggml_compute_forward_get_rows_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } //static bool first = true; //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); //if (first) { // first = false; //} else { // for (int k = 0; k < dst->ne[1]; ++k) { // for (int j = 0; j < dst->ne[0]/16; ++j) { // for (int i = 0; i < 16; ++i) { // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); // } // printf("\n"); // } // printf("\n"); // } // printf("\n"); // exit(0); //} } template static void ggml_compute_forward_set_rows_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS const int64_t nc = ne00; const int64_t nr = ne01; assert(ne0 == nc); assert(ne2 == ne02); assert(ne3 == ne03); assert(src0->type == GGML_TYPE_F32); assert(ne02 % ne11 == 0); assert(ne03 % ne12 == 0); const int ith = params->ith; const int nth = params->nth; // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = std::min(ir0 + dr, nr); ggml_from_float_t const from_float = ggml_get_type_traits_cpu(dst->type)->from_float; for (int64_t i03 = 0; i03 < ne03; ++i03) { for (int64_t i02 = 0; i02 < ne02; ++i02) { for (int64_t i = ir0; i < ir1; ++i) { const int64_t i12 = i03%ne12; const int64_t i11 = i02%ne11; const int64_t i10 = i; const int64_t i1 = *(idx_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); GGML_ASSERT(i1 >= 0 && i1 < ne1); from_float( (const float *) ((char *) src0->data + i*nb01 + i02*nb02 + i03*nb03), ((char *) dst->data + i1*nb1 + i02*nb2 + i03*nb3), nc); } } } } void ggml_compute_forward_set_rows( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; switch (src0->type) { case GGML_TYPE_F32: { if (src1->type == GGML_TYPE_I64) { ggml_compute_forward_set_rows_f32(params, dst); } else if (src1->type == GGML_TYPE_I32) { ggml_compute_forward_set_rows_f32(params, dst); } else { GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); } } break; default: { GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); } } } // ggml_compute_forward_get_rows_back static void ggml_compute_forward_get_rows_back_f32_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; if (params->ith != 0) { return; } GGML_ASSERT(ggml_is_contiguous(dst)); // ggml_compute_forward_dup_same_cont(params, opt0, dst); memset(dst->data, 0, ggml_nbytes(dst)); const int nc = src0->ne[0]; const int nr = ggml_nelements(src1); GGML_ASSERT( dst->ne[0] == nc); GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; for (int j = 0; j < nc; ++j) { ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j]; ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); } } } static void ggml_compute_forward_get_rows_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; if (params->ith != 0) { return; } GGML_ASSERT(ggml_is_contiguous(dst)); // ggml_compute_forward_dup_same_cont(params, opt0, dst); memset(dst->data, 0, ggml_nbytes(dst)); const int nc = src0->ne[0]; const int nr = ggml_nelements(src1); GGML_ASSERT( dst->ne[0] == nc); GGML_ASSERT(src0->nb[0] == sizeof(float)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r*dst->nb[1]), (float *) ((char *) dst->data + r*dst->nb[1]), (float *) ((char *) src0->data + i*src0->nb[1])); } } void ggml_compute_forward_get_rows_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: { ggml_compute_forward_get_rows_back_f32_f16(params, dst); } break; case GGML_TYPE_F32: { ggml_compute_forward_get_rows_back_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } //static bool first = true; //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); //if (first) { // first = false; //} else { // for (int k = 0; k < dst->ne[1]; ++k) { // for (int j = 0; j < dst->ne[0]/16; ++j) { // for (int i = 0; i < 16; ++i) { // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); // } // printf("\n"); // } // printf("\n"); // } // printf("\n"); // exit(0); //} } // ggml_compute_forward_diag static void ggml_compute_forward_diag_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; if (params->ith != 0) { return; } // TODO: handle transposed/permuted matrices GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(ne00 == ne0); GGML_ASSERT(ne00 == ne1); GGML_ASSERT(ne01 == 1); GGML_ASSERT(ne02 == ne2); GGML_ASSERT(ne03 == ne3); GGML_ASSERT(nb00 == sizeof(float)); GGML_ASSERT(nb0 == sizeof(float)); for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = 0; i2 < ne2; i2++) { for (int i1 = 0; i1 < ne1; i1++) { float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02); for (int i0 = 0; i0 < i1; i0++) { d[i0] = 0; } d[i1] = s[i1]; for (int i0 = i1+1; i0 < ne0; i0++) { d[i0] = 0; } } } } } void ggml_compute_forward_diag( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_diag_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_diag_mask_inf static void ggml_compute_forward_diag_mask_f32( const ggml_compute_params * params, ggml_tensor * dst, const float value) { const ggml_tensor * src0 = dst->src[0]; const int ith = params->ith; const int nth = params->nth; const int n_past = ((int32_t *) dst->op_params)[0]; const bool inplace = src0->data == dst->data; GGML_ASSERT(n_past >= 0); if (!inplace) { if (ith == 0) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); memcpy( ((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); } ggml_barrier(params->threadpool); } // TODO: handle transposed/permuted matrices const int n = ggml_nrows(src0); const int nc = src0->ne[0]; const int nr = src0->ne[1]; const int nz = n/nr; GGML_ASSERT( dst->nb[0] == sizeof(float)); GGML_ASSERT(src0->nb[0] == sizeof(float)); for (int k = 0; k < nz; k++) { for (int j = ith; j < nr; j += nth) { for (int i = n_past; i < nc; i++) { if (i > n_past + j) { *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value; } } } } } void ggml_compute_forward_diag_mask_inf( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); } break; default: { GGML_ABORT("fatal error"); } } } void ggml_compute_forward_diag_mask_zero( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_diag_mask_f32(params, dst, 0); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_soft_max static void ggml_compute_forward_soft_max_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; assert(ggml_is_contiguous(dst)); assert(ggml_are_same_shape(src0, dst)); float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS const int64_t nb11 = src1 ? src1->nb[1] : 1; const int64_t nb12 = src1 ? src1->nb[2] : 1; const int64_t nb13 = src1 ? src1->nb[3] : 1; const int64_t ne12 = src1 ? src1->ne[2] : 1; const int64_t ne13 = src1 ? src1->ne[3] : 1; // TODO: is this supposed to be ceil instead of floor? // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 const uint32_t n_head = ne02; const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); // sinks const float * sk = src2 ? (float *)((char *) src2->data) : nullptr; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const int64_t i11 = i01; const int64_t i12 = i02%ne12; const int64_t i13 = i03%ne13; // ALiBi const uint32_t h = i02; // head const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; float * sp = (float *)((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); float * dp = (float *)((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); // broadcast the mask across rows ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13) : NULL; float * mp_f32 = src1 ? (float *)((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13) : NULL; ggml_vec_cpy_f32 (ne00, wp, sp); ggml_vec_scale_f32(ne00, wp, scale); if (mp_f32) { if (use_f16) { for (int i = 0; i < ne00; ++i) { wp[i] += slope*GGML_CPU_FP16_TO_FP32(mp_f16[i]); } } else { for (int i = 0; i < ne00; ++i) { wp[i] += slope*mp_f32[i]; } } } #ifndef NDEBUG for (int i = 0; i < ne00; ++i) { //printf("p[%d] = %f\n", i, p[i]); assert(!isnan(wp[i])); } #endif float max = -INFINITY; ggml_vec_max_f32(ne00, &max, wp); // if we have sinks, make a correction as if they were included in the softmax if (sk) { max = MAX(max, sk[i02]); } ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); assert(sum > 0.0); if (sk) { sum += (ggml_float) expf(sk[i02] - max); } sum = 1.0/sum; ggml_vec_scale_f32(ne00, dp, sum); #ifndef NDEBUG for (int i = 0; i < ne00; ++i) { assert(!isnan(dp[i])); assert(!isinf(dp[i])); } #endif } } } } void ggml_compute_forward_soft_max( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_soft_max_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_soft_max_ext_back static void ggml_compute_forward_soft_max_ext_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(ggml_are_same_shape(src1, dst)); float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); GGML_ASSERT(max_bias == 0.0f); // TODO: handle transposed/permuted matrices const int ith = params->ith; const int nth = params->nth; const int nc = src0->ne[0]; const int nr = ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { float *dy = (float *)((char *) src0->data + i1*src0->nb[1]); float *y = (float *)((char *) src1->data + i1*src1->nb[1]); float *dx = (float *)((char *) dst->data + i1*dst->nb[1]); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { //printf("p[%d] = %f\n", i, p[i]); assert(!isnan(dy[i])); assert(!isnan(y[i])); } #endif // Jii = yi - yi*yi // Jij = -yi*yj // J = diag(y)-y.T*y // dx = J * dy // dxk = sum_i(Jki * dyi) // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk // dxk = sum_i(-yk*yi * dyi) + yk*dyk // dxk = -yk * sum_i(yi * dyi) + yk*dyk // dxk = -yk * dot(y, dy) + yk*dyk // dxk = yk * (- dot(y, dy) + dyk) // dxk = yk * (dyk - dot(y, dy)) // // post-order: // dot_y_dy := dot(y, dy) // dx := dy // dx := dx - dot_y_dy // dx := dx * y // linear runtime, no additional memory float dot_y_dy = 0; ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1); ggml_vec_cpy_f32 (nc, dx, dy); ggml_vec_acc1_f32 (nc, dx, -dot_y_dy); ggml_vec_mul_f32 (nc, dx, dx, y); ggml_vec_scale_f32(nc, dx, scale); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { assert(!isnan(dx[i])); assert(!isinf(dx[i])); } #endif } } void ggml_compute_forward_soft_max_ext_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_soft_max_ext_back_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_clamp static void ggml_compute_forward_clamp_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; float min; float max; memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int n = ggml_nrows(src0); const int nc = src0->ne[0]; const size_t nb00 = src0->nb[0]; const size_t nb01 = src0->nb[1]; const size_t nb0 = dst->nb[0]; const size_t nb1 = dst->nb[1]; GGML_ASSERT( nb0 == sizeof(float)); GGML_ASSERT(nb00 == sizeof(float)); for (int j = ith; j < n; j += nth) { float * dst_ptr = (float *) ((char *) dst->data + j*nb1); float * src0_ptr = (float *) ((char *) src0->data + j*nb01); for (int i = 0; i < nc; i++) { dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); } } } static void ggml_compute_forward_clamp_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; float min; float max; memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int n = ggml_nrows(src0); const int nc = src0->ne[0]; const size_t nb00 = src0->nb[0]; const size_t nb01 = src0->nb[1]; const size_t nb0 = dst->nb[0]; const size_t nb1 = dst->nb[1]; GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); for (int j = ith; j < n; j += nth) { ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01); for (int i = 0; i < nc; i++) { float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); } } } void ggml_compute_forward_clamp( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_clamp_f32(params, dst); } break; case GGML_TYPE_F16: { ggml_compute_forward_clamp_f16(params, dst); } break; case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_TQ1_0: case GGML_TYPE_TQ2_0: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: case GGML_TYPE_Q8_K: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_I64: case GGML_TYPE_F64: case GGML_TYPE_COUNT: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_rope static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / MAX(0.001f, high - low); return 1 - MIN(1, MAX(0, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. static void rope_yarn( float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, float * cos_theta, float * sin_theta) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); } *cos_theta = cosf(theta) * mscale; *sin_theta = sinf(theta) * mscale; } static void ggml_rope_cache_init( float theta_base, float freq_scale, const float * freq_factors, float corr_dims[2], int64_t ne0, float ext_factor, float mscale, float * cache, float sin_sign, float theta_scale) { // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py float theta = theta_base; for (int64_t i0 = 0; i0 < ne0; i0 += 2) { const float ff = freq_factors ? freq_factors[i0/2] : 1.0f; rope_yarn( theta/ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1] ); cache[i0 + 1] *= sin_sign; theta *= theta_scale; } } static void ggml_mrope_cache_init( float theta_base_t, float theta_base_h, float theta_base_w, float theta_base_e, int sections[4], bool is_imrope, bool indep_sects, float freq_scale, const float * freq_factors, float corr_dims[2], int64_t ne0, float ext_factor, float mscale, float * cache, float sin_sign, float theta_scale) { // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py float theta_t = theta_base_t; float theta_h = theta_base_h; float theta_w = theta_base_w; float theta_e = theta_base_e; // extra position id for vision encoder int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; int sec_w = sections[1] + sections[0]; int sec_e = sections[2] + sec_w; GGML_ASSERT(sect_dims <= ne0); for (int64_t i0 = 0; i0 < ne0; i0 += 2) { const float ff = freq_factors ? freq_factors[i0/2] : 1.0f; int sector = (i0 / 2) % sect_dims; if (indep_sects) { // compute theta independently for each dim sections // (i.e. reset corresponding theta when `i0` go from one section to another) if (sector == 0) { theta_t = theta_base_t; } else if (sector == sections[0]) { theta_h = theta_base_h;; } else if (sector == sec_w) { theta_w = theta_base_w; } else if (sector == sec_e) { theta_e = theta_base_e; } } float theta = theta_t; if (is_imrope) { // qwen3vl apply interleaved mrope if (sector % 3 == 1 && sector < 3 * sections[1]) { theta = theta_h; } else if (sector % 3 == 2 && sector < 3 * sections[2]) { theta = theta_w; } else if (sector % 3 == 0 && sector < 3 * sections[0]) { theta = theta_t; } else { theta = theta_e; } } else { if (sector >= sections[0] && sector < sec_w) { theta = theta_h; } else if (sector >= sec_w && sector < sec_w + sections[2]) { theta = theta_w; } else if (sector >= sec_w + sections[2]) { theta = theta_e; } } rope_yarn( theta/ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1] ); cache[i0 + 1] *= sin_sign; theta_t *= theta_scale; theta_w *= theta_scale; theta_h *= theta_scale; theta_e *= theta_scale; } } template static void rotate_pairs(const int64_t n, const int64_t n_offset, const float * cache, const T * src_data, T * dst_data, const int scale = 2) { for (int64_t i0 = 0; i0 < n; i0 += 2) { const int64_t ic = i0/scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 const float cos_theta = cache[i0 + 0]; const float sin_theta = cache[i0 + 1]; const T * const src = src_data + ic; T * dst = dst_data + ic; const float x0 = type_conversion_table::to_f32(src[0]); const float x1 = type_conversion_table::to_f32(src[n_offset]); dst[0] = type_conversion_table::from_f32(x0*cos_theta - x1*sin_theta); dst[n_offset] = type_conversion_table::from_f32(x0*sin_theta + x1*cos_theta); } } template //float or ggml_fp16_t static void ggml_compute_forward_rope_flt( const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_I32); float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; int sections[4]; //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int)*4); GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); GGML_ASSERT(nb0 == nb00); GGML_ASSERT(nb0 == sizeof(T)); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(dst); GGML_ASSERT(n_dims <= ne0); GGML_ASSERT(n_dims % 2 == 0); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); // row index used to determine which thread to use int ir = 0; const float theta_scale = powf(freq_base, -2.0f/n_dims); float corr_dims[2]; ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope const bool mrope_used = mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope const bool is_vision = mode == GGML_ROPE_TYPE_VISION; if (mrope_used) { GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); } if (is_vision) { GGML_ASSERT(n_dims == ne0/2); } const float * freq_factors = NULL; if (src2 != NULL) { GGML_ASSERT(src2->type == GGML_TYPE_F32); GGML_ASSERT(src2->ne[0] >= n_dims / 2); freq_factors = (const float *) src2->data; } // backward process uses inverse rotation by cos and sin. // cos and sin build a rotation matrix, where the inverse is the transpose. // this essentially just switches the sign of sin. const float sin_sign = forward ? 1.0f : -1.0f; const int32_t * pos = (const int32_t *) src1->data; for (int64_t i3 = 0; i3 < ne3; i3++) { // batch for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith; if (!mrope_used) { const int64_t p = pos[i2]; ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); } else { const int64_t p_t = pos[i2]; const int64_t p_h = pos[i2 + ne2]; const int64_t p_w = pos[i2 + ne2 * 2]; const int64_t p_e = pos[i2 + ne2 * 3]; ggml_mrope_cache_init( p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); } for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads if (ir++ < ir0) continue; if (ir > ir1) break; T * src = (T *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); T * dst_data = (T *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); switch (mode) { case GGML_ROPE_TYPE_NORMAL: rotate_pairs(n_dims, 1, cache, src, dst_data, 1); break; case GGML_ROPE_TYPE_NEOX: case GGML_ROPE_TYPE_MROPE: case GGML_ROPE_TYPE_IMROPE: rotate_pairs(n_dims, n_dims/2, cache, src, dst_data); break; case GGML_ROPE_TYPE_VISION: rotate_pairs(ne0, n_dims, cache, src, dst_data); break; default: GGML_ABORT("rope type not supported"); } if (!is_vision) { // fill the remain channels with data from src tensor for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { const T * const src = (T *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); T * dst_data = (T *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } //attn-heads } } } void ggml_compute_forward_rope( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: { ggml_compute_forward_rope_flt(params, dst, true); } break; case GGML_TYPE_F32: { ggml_compute_forward_rope_flt(params, dst, true); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_rope_back void ggml_compute_forward_rope_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: { ggml_compute_forward_rope_flt(params, dst, false); } break; case GGML_TYPE_F32: { ggml_compute_forward_rope_flt(params, dst, false); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_conv_transpose_1d static void ggml_compute_forward_conv_transpose_1d_f16_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const int nk = ne00*ne01*ne02; GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb10 == sizeof(float)); if (ith == 0) { memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); ggml_fp16_t * dst_data = wdata + i01*ne00*ne02; for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ne02 + i02] = src[i00]; } } } } // permute source data (src1) from (L x Cin) to (Cin x L) { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; ggml_fp16_t * dst_data = wdata; for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[i10*ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } // need to zero dst since we are accumulating into it memset(dst->data, 0, ggml_nbytes(dst)); } ggml_barrier(params->threadpool); const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; // total rows in dst const int nr = ne1; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; ggml_fp16_t * const wdata_src = wdata + nk; for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00; for (int i10 = 0; i10 < ne10; i10++) { const int i1n = i10*ne11; for (int i00 = 0; i00 < ne00; i00++) { float v = 0; ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, (ggml_fp16_t *) wdata_kernel + i00*ne02, 0, 1); dst_data[i10*s0 + i00] += v; } } } } static void ggml_compute_forward_conv_transpose_1d_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const int nk = ne00*ne01*ne02; GGML_ASSERT(nb00 == sizeof(float)); GGML_ASSERT(nb10 == sizeof(float)); if (ith == 0) { memset(params->wdata, 0, params->wsize); // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) { float * const wdata = (float *) params->wdata + 0; for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); float * dst_data = wdata + i01*ne00*ne02; for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i00*ne02 + i02] = src[i00]; } } } } // prepare source data (src1) { float * const wdata = (float *) params->wdata + nk; float * dst_data = wdata; for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); for (int64_t i10 = 0; i10 < ne10; i10++) { dst_data[i10*ne11 + i11] = src[i10]; } } } // need to zero dst since we are accumulating into it memset(dst->data, 0, ggml_nbytes(dst)); } ggml_barrier(params->threadpool); const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; // total rows in dst const int nr = ne1; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); float * const wdata = (float *) params->wdata + 0; float * const wdata_src = wdata + nk; for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i1*nb1); float * wdata_kernel = wdata + i1*ne02*ne00; for (int i10 = 0; i10 < ne10; i10++) { const int i1n = i10*ne11; for (int i00 = 0; i00 < ne00; i00++) { float v = 0; ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00*ne02, 0, 1); dst_data[i10*s0 + i00] += v; } } } } void ggml_compute_forward_conv_transpose_1d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: { ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); } break; case GGML_TYPE_F32: { ggml_compute_forward_conv_transpose_1d_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_im2col_f32 // src0: kernel [OC, IC, KH, KW] // src1: image [N, IC, IH, IW] // dst: result [N, OH, OW, IC*KH*KW] static void ggml_compute_forward_im2col_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS; const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t p0 = ((const int32_t *)(dst->op_params))[2]; const int32_t p1 = ((const int32_t *)(dst->op_params))[3]; const int32_t d0 = ((const int32_t *)(dst->op_params))[4]; const int32_t d1 = ((const int32_t *)(dst->op_params))[5]; const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1; const int ith = params->ith; const int nth = params->nth; const int64_t N = is_2D ? ne13 : ne12; const int64_t IC = is_2D ? ne12 : ne11; const int64_t IH = is_2D ? ne11 : 1; const int64_t IW = ne10; const int64_t KH = is_2D ? ne01 : 1; const int64_t KW = ne00; const int64_t OH = is_2D ? ne2 : 1; const int64_t OW = ne1; int ofs0 = is_2D ? nb13 : nb12; int ofs1 = is_2D ? nb12 : nb11; GGML_ASSERT(nb10 == sizeof(float)); // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] { float * const wdata = (float *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 for (int64_t iow = 0; iow < OW; iow++) { for (int64_t iic = ith; iic < IC; iic += nth) { // micro kernel float * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW] for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 for (int64_t ikw = 0; ikw < KW; ikw++) { const int64_t iiw = iow*s0 + ikw*d0 - p0; const int64_t iih = ioh*s1 + ikh*d1 - p1; if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0; } else { dst_data[iic*(KH*KW) + ikh*KW + ikw] = (src_data[iih*IW + iiw]); } } } } } } } } } // ggml_compute_forward_im2col_f16 // src0: kernel [OC, IC, KH, KW] // src1: image [N, IC, IH, IW] // dst: result [N, OH, OW, IC*KH*KW] static void ggml_compute_forward_im2col_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F16); GGML_TENSOR_BINARY_OP_LOCALS; const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t p0 = ((const int32_t *)(dst->op_params))[2]; const int32_t p1 = ((const int32_t *)(dst->op_params))[3]; const int32_t d0 = ((const int32_t *)(dst->op_params))[4]; const int32_t d1 = ((const int32_t *)(dst->op_params))[5]; const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1; const int ith = params->ith; const int nth = params->nth; const int64_t N = is_2D ? ne13 : ne12; const int64_t IC = is_2D ? ne12 : ne11; const int64_t IH = is_2D ? ne11 : 1; const int64_t IW = ne10; const int64_t KH = is_2D ? ne01 : 1; const int64_t KW = ne00; const int64_t OH = is_2D ? ne2 : 1; const int64_t OW = ne1; int ofs0 = is_2D ? nb13 : nb12; int ofs1 = is_2D ? nb12 : nb11; GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb10 == sizeof(float)); // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] { ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 for (int64_t iow = 0; iow < OW; iow++) { for (int64_t iic = ith; iic < IC; iic += nth) { // micro kernel ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW] for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 for (int64_t ikw = 0; ikw < KW; ikw++) { const int64_t iiw = iow*s0 + ikw*d0 - p0; const int64_t iih = ioh*s1 + ikh*d1 - p1; if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0; } else { dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_CPU_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } } } } } } } void ggml_compute_forward_im2col( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->type) { case GGML_TYPE_F16: { ggml_compute_forward_im2col_f16(params, dst); } break; case GGML_TYPE_F32: { ggml_compute_forward_im2col_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_im2col_back_f32 void ggml_compute_forward_im2col_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output const ggml_tensor * src1 = dst->src[1]; // convolution kernel GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS; const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t p0 = ((const int32_t *)(dst->op_params))[2]; const int32_t p1 = ((const int32_t *)(dst->op_params))[3]; const int32_t d0 = ((const int32_t *)(dst->op_params))[4]; const int32_t d1 = ((const int32_t *)(dst->op_params))[5]; const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1; const int ith = params->ith; const int nth = params->nth; const int64_t N = is_2D ? ne3 : ne2; const int64_t IC = is_2D ? ne2 : ne1; const int64_t IH = is_2D ? ne1 : 1; const int64_t IW = ne0; const int64_t KH = is_2D ? ne11 : 1; const int64_t KW = ne10; const int64_t OH = is_2D ? ne02 : 1; const int64_t OW = ne01; int ofs0 = is_2D ? nb3 : nb2; int ofs1 = is_2D ? nb2 : nb1; GGML_ASSERT(nb0 == sizeof(float)); // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] { float * const wdata = (float *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t iic = ith; iic < IC; iic += nth) { for (int64_t iih = 0; iih < IH; iih++) { for (int64_t iiw = 0; iiw < IW; iiw++) { // micro kernel float grad = 0.0f; for (int64_t ikh = 0; ikh < KH; ikh++) { for (int64_t ikw = 0; ikw < KW; ikw++) { // For s0 > 1 some values were skipped over in the forward pass. // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. const int64_t tmpw = (iiw + p0 - ikw*d0); if (tmpw % s0 != 0) { continue; } const int64_t iow = tmpw / s0; // Equivalent logic as above except for s1. int64_t ioh; if (is_2D) { const int64_t tmph = iih + p1 - ikh*d1; if (tmph % s1 != 0) { continue; } ioh = tmph / s1; } else { ioh = 0; } if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { continue; } const float * const grad_in = (const float *) src0->data + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] grad += grad_in[iic*(KH*KW) + ikh*KW + ikw]; } } float * dst_data = (float *)((char *) wdata + (in*ofs0 + iic*ofs1)); // [IH, IW] dst_data[iih*IW + iiw] = grad; } } } } } } // ggml_compute_forward_im2col_3d_f16 // src0: kernel [OC*IC, KD, KH, KW] // src1: image [N*IC, ID, IH, IW] // dst: result [N*OD, OH, OW, IC * KD * KH * KW] static void ggml_compute_forward_im2col_3d_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F16); GGML_TENSOR_BINARY_OP_LOCALS; const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t s2 = ((const int32_t *)(dst->op_params))[2]; const int32_t p0 = ((const int32_t *)(dst->op_params))[3]; const int32_t p1 = ((const int32_t *)(dst->op_params))[4]; const int32_t p2 = ((const int32_t *)(dst->op_params))[5]; const int32_t d0 = ((const int32_t *)(dst->op_params))[6]; const int32_t d1 = ((const int32_t *)(dst->op_params))[7]; const int32_t d2 = ((const int32_t *)(dst->op_params))[8]; const int32_t IC = ((const int32_t *)(dst->op_params))[9]; const int ith = params->ith; const int nth = params->nth; const int64_t N = ne13 / IC; const int64_t ID = ne12; const int64_t IH = ne11; const int64_t IW = ne10; const int64_t OC = ne03 / IC; GGML_UNUSED(OC); const int64_t KD = ne02; const int64_t KH = ne01; const int64_t KW = ne00; const int64_t OD = ne3 / N; const int64_t OH = ne2; const int64_t OW = ne1; const int64_t OH_OW = OH*OW; const int64_t KD_KH_KW = KD*KH*KW; const int64_t KH_KW = KH*KW; const int64_t IC_KD_KH_KW = IC*KD*KH*KW; GGML_ASSERT(nb10 == sizeof(float)); // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] { ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t iod = 0; iod < OD; iod++) { for (int64_t ioh = 0; ioh < OH; ioh++) { for (int64_t iow = 0; iow < OW; iow++) { for (int64_t iic = ith; iic < IC; iic += nth) { // micro kernel ggml_fp16_t * dst_data = wdata + (in*OD*OH_OW + iod*OH_OW + ioh*OW + iow)*IC_KD_KH_KW; // [IC, KD, KH, KW] const float * const src_data = (const float *) ((const char *)src1->data + (in*IC + iic)*nb13); // [ID, IH, IW] for (int64_t ikd = 0; ikd < KD; ikd++) { for (int64_t ikh = 0; ikh < KH; ikh++) { for (int64_t ikw = 0; ikw < KW; ikw++) { const int64_t iiw = iow*s0 + ikw*d0 - p0; const int64_t iih = ioh*s1 + ikh*d1 - p1; const int64_t iid = iod*s2 + ikd*d2 - p2; if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst_data[iic*KD_KH_KW + ikd * KH_KW + ikh*KW + ikw] = 0; } else { const float * const s = (const float *) ((const char *)src_data + iid*nb12 + iih*nb11 + iiw*nb10); // [ID, IH, IW] dst_data[iic*KD_KH_KW + ikd * KH_KW + ikh*KW + ikw] = GGML_CPU_FP32_TO_FP16(*s); } } } } } } } } } } } // ggml_compute_forward_im2col_3d_f32 // src0: kernel [OC*IC, KD, KH, KW] // src1: image [N*IC, ID, IH, IW] // dst: result [N*OD, OH, OW, IC * KD * KH * KW] static void ggml_compute_forward_im2col_3d_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS; const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t s2 = ((const int32_t *)(dst->op_params))[2]; const int32_t p0 = ((const int32_t *)(dst->op_params))[3]; const int32_t p1 = ((const int32_t *)(dst->op_params))[4]; const int32_t p2 = ((const int32_t *)(dst->op_params))[5]; const int32_t d0 = ((const int32_t *)(dst->op_params))[6]; const int32_t d1 = ((const int32_t *)(dst->op_params))[7]; const int32_t d2 = ((const int32_t *)(dst->op_params))[8]; const int32_t IC = ((const int32_t *)(dst->op_params))[9]; const int ith = params->ith; const int nth = params->nth; const int64_t N = ne13 / IC; const int64_t ID = ne12; const int64_t IH = ne11; const int64_t IW = ne10; const int64_t OC = ne03 / IC; GGML_UNUSED(OC); const int64_t KD = ne02; const int64_t KH = ne01; const int64_t KW = ne00; const int64_t OD = ne3 / N; const int64_t OH = ne2; const int64_t OW = ne1; const int64_t OH_OW = OH*OW; const int64_t KD_KH_KW = KD*KH*KW; const int64_t KH_KW = KH*KW; const int64_t IC_KD_KH_KW = IC*KD*KH*KW; GGML_ASSERT(nb10 == sizeof(float)); // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] { float * const wdata = (float *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t iod = 0; iod < OD; iod++) { for (int64_t ioh = 0; ioh < OH; ioh++) { for (int64_t iow = 0; iow < OW; iow++) { for (int64_t iic = ith; iic < IC; iic += nth) { // micro kernel float * dst_data = wdata + (in*OD*OH_OW + iod*OH_OW + ioh*OW + iow)*IC_KD_KH_KW; // [IC, KD, KH, KW] const float * const src_data = (const float *) ((const char *)src1->data + (in*IC + iic)*nb13); // [ID, IH, IW] for (int64_t ikd = 0; ikd < KD; ikd++) { for (int64_t ikh = 0; ikh < KH; ikh++) { for (int64_t ikw = 0; ikw < KW; ikw++) { const int64_t iiw = iow*s0 + ikw*d0 - p0; const int64_t iih = ioh*s1 + ikh*d1 - p1; const int64_t iid = iod*s2 + ikd*d2 - p2; if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || iid < 0 || iid >= ID) { dst_data[iic*KD_KH_KW + ikd * KH_KW + ikh*KW + ikw] = 0; } else { const float * const s = (const float *) ((const char *)src_data + iid*nb12 + iih*nb11 + iiw*nb10); // [ID, IH, IW] dst_data[iic*KD_KH_KW + ikd * KH_KW + ikh*KW + ikw] = *s; } } } } } } } } } } } void ggml_compute_forward_im2col_3d( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->type) { case GGML_TYPE_F16: { ggml_compute_forward_im2col_3d_f16(params, dst); } break; case GGML_TYPE_F32: { ggml_compute_forward_im2col_3d_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_call_mul_mat(ggml_type type, const ggml_compute_params * params, int64_t m, int64_t n, int64_t k, void * a, void * b, float * c) { const ggml_type_traits * traits = ggml_get_type_traits(type); struct ggml_tensor src1 = {}; src1.type = type; src1.ne[0] = k; src1.ne[1] = m; src1.ne[2] = 1; src1.ne[3] = 1; src1.nb[0] = traits->type_size; src1.nb[1] = k * traits->type_size; src1.nb[2] = src1.nb[1]; src1.nb[3] = src1.nb[2]; src1.data = a; struct ggml_tensor src0 = {}; src0.type = type; src0.ne[0] = k; src0.ne[1] = n; src0.ne[2] = 1; src0.ne[3] = 1; src0.nb[0] = traits->type_size; src0.nb[1] = k * traits->type_size; src0.nb[2] = src0.nb[1]; src0.nb[3] = src0.nb[2]; src0.data = b; struct ggml_tensor dst = {}; dst.ne[0] = n; dst.ne[1] = m; dst.ne[2] = 1; dst.ne[3] = 1; dst.nb[0] = sizeof(float); dst.nb[1] = n * sizeof(float); dst.nb[2] = dst.nb[1]; dst.nb[3] = dst.nb[2]; dst.data = c; dst.src[0] = &src0; dst.src[1] = &src1; ggml_compute_forward_mul_mat(params, &dst); } static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { return (coord + size) % size; // adding size avoids negative number weirdness } // ggml_compute_forward_conv_2d static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, const ggml_tensor * kernel, // [KW, KH, IC, OC] const ggml_tensor * src, // [W, H, C, N] ggml_tensor * dst, // [OW, OH, OC, N] ggml_type kernel_type) { GGML_ASSERT(ggml_is_contiguous(kernel)); GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); GGML_ASSERT(kernel->type == kernel_type); const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); const int32_t stride_x = dst->op_params[0]; const int32_t stride_y = dst->op_params[1]; const int32_t pad_x = dst->op_params[2]; const int32_t pad_y = dst->op_params[3]; const int32_t dilation_x = dst->op_params[4]; const int32_t dilation_y = dst->op_params[5]; const int64_t c_in = src->ne[2]; const int64_t c_out = kernel->ne[3]; GGML_ASSERT(c_in == kernel->ne[2]); const int64_t src_w = src->ne[0]; const int64_t src_h = src->ne[1]; const int64_t knl_w = kernel->ne[0]; const int64_t knl_h = kernel->ne[1]; const int64_t dst_w = dst->ne[0]; const int64_t dst_h = dst->ne[1]; const float * src_data = (float *) src->data; void * knl_data = kernel->data; float * dst_data = (float *) dst->data; const int64_t knl_n = knl_w * knl_h * c_in; const int64_t patch_total = dst->ne[3] * dst_w * dst_h; const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); const int64_t batch_size = params->wsize / space_per_patch; const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); void * tmp = params->wdata; for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { const int64_t patch_start_batch = batch_i * patches_per_batch; const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); const int64_t patch_n = patch_end_batch - patch_start_batch; const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); //im2col for a patch for (int64_t p = patch_start; p < patch_end; ++p) { const int64_t batch_n = p / (dst_w * dst_h); const int64_t src_x = (p / dst_w) % dst_h; const int64_t src_y = p % dst_w; const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; for (int64_t ic = 0; ic < c_in; ++ic) { for (int64_t ky = 0; ky < knl_h; ++ky) { for (int64_t kx = 0; kx < knl_w; ++kx) { const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; float src_val; if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { src_val = 0.0f; } else { const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); src_val = *src_ptr; } char * element_ptr = dst_row + dst_idx * traits->type_size; if (kernel_type == GGML_TYPE_F32) { *(float *) element_ptr = src_val; } else if (kernel_type == GGML_TYPE_F16) { *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); } } } } } // patches handled by this thread ggml_barrier(params->threadpool); float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); GGML_ASSERT(gemm_output + patch_n * c_out <= (float*)tmp + params->wsize); // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); ggml_barrier(params->threadpool); //permute back [OC, N, OH, OW] to [N, OC, OH, OW] const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; const int64_t permute_start = params->ith * permute_per_thread; const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); for (int64_t i = permute_start; i < permute_end; ++i) { const int64_t p = patch_start_batch + i; const int64_t batch_n = p / (dst_w * dst_h); const int64_t dst_y = (p / dst_w) % dst_h; const int64_t dst_x = p % dst_w; for (int64_t oc = 0; oc < c_out; ++oc) { const float value = gemm_output[i * c_out + oc]; float * dst_ptr = (float *)((char *)dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + oc * dst->nb[2] + batch_n * dst->nb[3]); *dst_ptr = value; } } } } void ggml_compute_forward_conv_2d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); } // ggml_compute_forward_conv_3d static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, const ggml_tensor * kernel, const ggml_tensor * src, ggml_tensor * dst, ggml_type kernel_type) { GGML_ASSERT(ggml_is_contiguous(kernel)); GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); GGML_ASSERT(kernel->type == kernel_type); const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); const int32_t s0 = dst->op_params[0]; const int32_t s1 = dst->op_params[1]; const int32_t s2 = dst->op_params[2]; const int32_t p0 = dst->op_params[3]; const int32_t p1 = dst->op_params[4]; const int32_t p2 = dst->op_params[5]; const int32_t d0 = dst->op_params[6]; const int32_t d1 = dst->op_params[7]; const int32_t d2 = dst->op_params[8]; const int32_t c = dst->op_params[9]; const int32_t n = dst->op_params[10]; const int32_t oc = dst->op_params[11]; const int64_t src_w = src->ne[0]; const int64_t src_h = src->ne[1]; const int64_t src_d = src->ne[2]; const int64_t knl_w = kernel->ne[0]; const int64_t knl_h = kernel->ne[1]; const int64_t knl_d = kernel->ne[2]; const int64_t dst_w = dst->ne[0]; const int64_t dst_h = dst->ne[1]; const int64_t dst_d = dst->ne[2]; const float * src_data = (float *) src->data; void * knl_data = kernel->data; float * dst_data = (float *) dst->data; const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; const int64_t knl_n_total = knl_n_per_channel * c; const int64_t patch_total = n * dst_w * dst_h * dst_d; const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); const int64_t batch_size = params->wsize / space_per_patch; const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); void * tmp = params->wdata; for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { const int64_t patch_start_batch = batch_i * patches_per_batch; const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); for (int64_t p = patch_start; p < patch_end; ++p) { const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); const int64_t batch_idx = p / (dst_w * dst_h * dst_d); const int64_t dst_z = p_in_batch / (dst_w * dst_h); const int64_t dst_y = p_in_depth / dst_w; const int64_t dst_x = p_in_depth % dst_w; char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; for (int64_t ic = 0; ic < c; ++ic) { for (int64_t kz = 0; kz < knl_d; ++kz) { for (int64_t ky = 0; ky < knl_h; ++ky) { for (int64_t kx = 0; kx < knl_w; ++kx) { const int64_t sz = dst_z * s2 + kz * d2 - p2; const int64_t sy = dst_y * s1 + ky * d1 - p1; const int64_t sx = dst_x * s0 + kx * d0 - p0; int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; float src_val; if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { src_val = 0.0f; } else { const int64_t cn_idx = batch_idx * c + ic; const float * src_ptr = (const float *)((const char *)src_data + sx*src->nb[0] + sy*src->nb[1] + sz*src->nb[2] + cn_idx*src->nb[3]); src_val = *src_ptr; } char * element_ptr = dst_row + dst_idx * traits->type_size; if (kernel_type == GGML_TYPE_F32) { *(float *)element_ptr = src_val; } else if (kernel_type == GGML_TYPE_F16) { *(ggml_fp16_t *)element_ptr = GGML_CPU_FP32_TO_FP16(src_val); } } } } } } ggml_barrier(params->threadpool); float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); ggml_barrier(params->threadpool); const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; const int64_t permute_start = params->ith * permute_per_thread; const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); for (int64_t i = permute_start; i < permute_end; ++i) { const int64_t p = patch_start_batch + i; const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); const int64_t batch_idx = p / (dst_w * dst_h * dst_d); const int64_t dst_z = p_in_batch / (dst_w * dst_h); const int64_t dst_y = p_in_depth / dst_w; const int64_t dst_x = p_in_depth % dst_w; for (int64_t ioc = 0; ioc < oc; ++ioc) { const float value = gemm_output[i * oc + ioc]; const int64_t ocn_idx = batch_idx * oc + ioc; float * dst_ptr = (float *)((char *)dst_data + dst_x*dst->nb[0] + dst_y*dst->nb[1] + dst_z*dst->nb[2] + ocn_idx*dst->nb[3]); *dst_ptr = value; } } } } void ggml_compute_forward_conv_3d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); } // ggml_compute_forward_conv_transpose_2d void ggml_compute_forward_conv_transpose_2d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const int nk = ne00*ne01*ne02*ne03; GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb10 == sizeof(float)); if (ith == 0) { memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02); ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03; for (int64_t i01 = 0; i01 < ne01; i01++) { for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00]; } } } } } // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; for (int i12 = 0; i12 < ne12; i12++) { for (int i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11); ggml_fp16_t * dst_data = wdata + i11*ne10*ne12; for (int i10 = 0; i10 < ne10; i10++) { dst_data[i10*ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } } memset(dst->data, 0, ggml_nbytes(dst)); } ggml_barrier(params->threadpool); const int32_t stride = ggml_get_op_params_i32(dst, 0); // total patches in dst const int np = ne2; // patches per thread const int dp = (np + nth - 1)/nth; // patch range for this thread const int ip0 = dp*ith; const int ip1 = MIN(ip0 + dp, np); ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; ggml_fp16_t * const wdata_src = wdata + nk; for (int i2 = ip0; i2 < ip1; i2++) { // Cout float * dst_data = (float *)((char *) dst->data + i2*nb2); ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03; for (int i11 = 0; i11 < ne11; i11++) { for (int i10 = 0; i10 < ne10; i10++) { const int i1n = i11*ne10*ne12 + i10*ne12; for (int i01 = 0; i01 < ne01; i01++) { for (int i00 = 0; i00 < ne00; i00++) { float v = 0; ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01*ne00*ne03 + i00*ne03, 0, 1); dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v; } } } } } } // ggml_compute_forward_conv_2d_dw struct ggml_conv_2d_dw_params { int64_t channels; int64_t batch; int64_t src_w; int64_t src_h; int64_t dst_w; int64_t dst_h; int64_t knl_w; int64_t knl_h; int stride_x; int stride_y; int pad_x; int pad_y; int dilation_x; int dilation_y; }; static void ggml_compute_forward_conv_2d_dw_cwhn( const ggml_compute_params * params, const ggml_tensor * src, const ggml_tensor * kernel, ggml_tensor * dst, const ggml_conv_2d_dw_params & p) { const int64_t c = p.channels; const float * knl_data = (const float *)kernel->data; const int64_t rows_total = p.dst_h * p.batch; const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; const int64_t row_start = params->ith * rows_per_thread; const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); #ifdef GGML_SIMD #if defined(__ARM_FEATURE_SVE) const int64_t pkg_size = svcntw(); #else const int64_t pkg_size = GGML_F32_EPR; #endif const int64_t pkg_count = c / pkg_size; const int64_t c_pkg_end = pkg_count * pkg_size; #else const int64_t c_pkg_end = 0; #endif for (int64_t row = row_start; row < row_end; ++row) { const int64_t dst_y = row % p.dst_h; const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; #ifdef GGML_SIMD // Vectorized loop for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { GGML_F32_VEC sum = GGML_F32_VEC_ZERO; for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { const int64_t src_y = src_y_base + knl_y * p.dilation_y; if (src_y < 0 || src_y >= p.src_h) { continue; } for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { const int64_t src_x = src_x_base + knl_x * p.dilation_x; if (src_x < 0 || src_x >= p.src_w) { continue; } GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); sum = GGML_F32_VEC_FMA(sum, k, s); } } GGML_F32_VEC_STORE(dst_data + c_i, sum); } #endif // Scalar loop for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { float sum = 0.0f; for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { const int64_t src_y = src_y_base + knl_y * p.dilation_y; if (src_y < 0 || src_y >= p.src_h) { continue; } for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { const int64_t src_x = src_x_base + knl_x * p.dilation_x; if (src_x < 0 || src_x >= p.src_w) { continue; } sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * src_data[(src_y * p.src_w + src_x) * c + c_i]; } } dst_data[c_i] = sum; } } } } static void ggml_compute_forward_conv_2d_dw_whcn( const ggml_compute_params * params, const ggml_tensor * src, const ggml_tensor * kernel, ggml_tensor * dst, const ggml_conv_2d_dw_params & p) { const int64_t n = p.channels * p.batch; const int64_t per_thread = (n + params->nth - 1) / params->nth; const int64_t start = params->ith * per_thread; const int64_t end = MIN(start + per_thread, n); for (int64_t i = start; i < end; ++i) { const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { float sum = 0.0f; for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; if (src_y < 0 || src_y >= p.src_h) { continue; } for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; if (src_x < 0 || src_x >= p.src_w) { continue; } sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; } } dst_data[dst_y * p.dst_w + dst_x] = sum; } } } } void ggml_compute_forward_conv_2d_dw( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * kernel = dst->src[0]; const ggml_tensor * src = dst->src[1]; ggml_conv_2d_dw_params p; p.channels = src->ne[2]; p.batch = src->ne[3]; p.src_w = src->ne[0]; p.src_h = src->ne[1]; p.dst_w = dst->ne[0]; p.dst_h = dst->ne[1]; p.knl_w = kernel->ne[0]; p.knl_h = kernel->ne[1]; p.stride_x = dst->op_params[0]; p.stride_y = dst->op_params[1]; p.pad_x = dst->op_params[2]; p.pad_y = dst->op_params[3]; p.dilation_x = dst->op_params[4]; p.dilation_y = dst->op_params[5]; GGML_ASSERT(kernel->ne[3] == p.channels); GGML_ASSERT(dst->ne[3] == p.batch); if (ggml_is_contiguous(src)) { ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); } else if (ggml_is_contiguous_channels(src)) { // kernel should also have channels most contiguous in memory GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); } else { GGML_ABORT("non-contiguous memory layout not supported"); } } // ggml_compute_forward_pool_1d_sk_p0 static void ggml_compute_forward_pool_1d_sk_p0( const ggml_compute_params * params, const ggml_op_pool op, const int k, ggml_tensor * dst) { const ggml_tensor * src = dst->src[0]; assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); if (params->ith != 0) { return; } const char * cdata = (const char *)src->data; const char * const data_end = cdata + ggml_nbytes(src); float * drow = (float *)dst->data; const int64_t rs = dst->ne[0]; while (cdata < data_end) { const void * srow = (const void *)cdata; int j = 0; for (int64_t i = 0; i < rs; ++i) { switch (op) { case GGML_OP_POOL_AVG: drow[i] = 0; break; case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } for (int ki = 0; ki < k; ++ki) { const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: drow[i] += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > drow[i]) drow[i] = srow_j; break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } ++j; } switch (op) { case GGML_OP_POOL_AVG: drow[i] /= k; break; case GGML_OP_POOL_MAX: break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } } cdata += src->nb[1]; drow += rs; } } // ggml_compute_forward_pool_1d void ggml_compute_forward_pool_1d( const ggml_compute_params * params, ggml_tensor * dst) { const int32_t * opts = (const int32_t *)dst->op_params; ggml_op_pool op = static_cast(opts[0]); const int k0 = opts[1]; const int s0 = opts[2]; const int p0 = opts[3]; GGML_ASSERT(p0 == 0); // padding not supported GGML_ASSERT(k0 == s0); // only s = k supported ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); } // ggml_compute_forward_pool_2d void ggml_compute_forward_pool_2d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src = dst->src[0]; assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); if (params->ith != 0) { return; } const int32_t * opts = (const int32_t *)dst->op_params; ggml_op_pool op = static_cast(opts[0]); const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; const char * cdata = (const char*)src->data; const char * const data_end = cdata + ggml_nbytes(src); const int64_t px = dst->ne[0]; const int64_t py = dst->ne[1]; const int64_t pa = px * py; float * dplane = (float *)dst->data; const int ka = k0 * k1; const int offset0 = -p0; const int offset1 = -p1; while (cdata < data_end) { for (int oy = 0; oy < py; ++oy) { float * const drow = dplane + oy * px; for (int ox = 0; ox < px; ++ox) { float * const out = drow + ox; switch (op) { case GGML_OP_POOL_AVG: *out = 0; break; case GGML_OP_POOL_MAX: *out = -FLT_MAX; break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } const int ix = offset0 + ox * s0; const int iy = offset1 + oy * s1; for (int ky = 0; ky < k1; ++ky) { if (iy + ky < 0 || iy + ky >= src->ne[1]) continue; const void * srow = (const void *)(cdata + src->nb[1] * (iy + ky)); for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; if (j < 0 || j >= src->ne[0]) continue; const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: *out += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > *out) *out = srow_j; break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } } } switch (op) { case GGML_OP_POOL_AVG: *out /= ka; break; case GGML_OP_POOL_MAX: break; case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } } } cdata += src->nb[2]; dplane += pa; } } // ggml_compute_forward_pool_2d_back void ggml_compute_forward_pool_2d_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src = dst->src[0]; const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); if (params->ith != 0) { return; } const int32_t * opts = (const int32_t *)dst->op_params; ggml_op_pool op = static_cast(opts[0]); const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; char * cdata = (char *) dst->data; const char * cdataf = (const char *) dstf->data; const char * const data_end = cdata + ggml_nbytes(dst); GGML_ASSERT(params->ith == 0); memset(cdata, 0, ggml_nbytes(dst)); const int64_t px = src->ne[0]; const int64_t py = src->ne[1]; const int64_t pa = px * py; const float * splane = (const float *) src->data; const int ka = k0 * k1; const int offset0 = -p0; const int offset1 = -p1; while (cdata < data_end) { for (int oy = 0; oy < py; ++oy) { const float * const srow = splane + oy * px; for (int ox = 0; ox < px; ++ox) { const float grad0 = srow[ox]; const int ix = offset0 + ox * s0; const int iy = offset1 + oy * s1; if (op == GGML_OP_POOL_MAX) { float maxval = -FLT_MAX; int kxmax = -1; int kymax = -1; for (int ky = 0; ky < k1; ++ky) { if (iy + ky < 0 || iy + ky >= dst->ne[1]) { continue; } const void * drowf = (const void *)(cdataf + dst->nb[1] * (iy + ky)); for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; if (j < 0 || j >= dst->ne[0]) { continue; } const float val = dst->type == GGML_TYPE_F32 ? ((const float *) drowf)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); if (val <= maxval) { continue; } maxval = val; kxmax = kx; kymax = ky; } } if (kxmax == -1 || kymax == -1) { continue; } void * drow = (void *)(cdata + dst->nb[1] * (iy + kymax)); const int j = ix + kxmax; if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad0; } else { ((ggml_fp16_t *) drow)[j] = GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); } } else if (op == GGML_OP_POOL_AVG) { const float grad = grad0 / ka; for (int ky = 0; ky < k1; ++ky) { if (iy + ky < 0 || iy + ky >= dst->ne[1]) { continue; } void * drow = (void *)(cdata + dst->nb[1] * (iy + ky)); for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; if (j < 0 || j >= dst->ne[0]) { continue; } if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad; } else { ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); } } } } else { GGML_ASSERT(false); } } } cdata += dst->nb[2]; cdataf += dst->nb[2]; splane += pa; } } // ggml_compute_forward_upscale static void ggml_compute_forward_upscale_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == GGML_TYPE_F32); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float sf0 = (float)ne0/src0->ne[0]; float sf1 = (float)ne1/src0->ne[1]; float sf2 = (float)ne2/src0->ne[2]; float sf3 = (float)ne3/src0->ne[3]; float pixel_offset = 0.5f; const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { pixel_offset = 0.0f; sf0 = ne0 > 1 && ne00 > 1 ? (float)(ne0 - 1) / (ne00 - 1) : sf0; sf1 = ne1 > 1 && ne01 > 1 ? (float)(ne1 - 1) / (ne01 - 1) : sf1; } if (mode == GGML_SCALE_MODE_NEAREST) { for (int64_t i3 = 0; i3 < ne3; i3++) { const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { const int64_t i01 = i1 / sf1; for (int64_t i0 = 0; i0 < ne0; i0++) { const int64_t i00 = i0 / sf0; const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y = *x; } } } } } else if (mode == GGML_SCALE_MODE_BILINEAR && (mode_flags & GGML_SCALE_FLAG_ANTIALIAS)) { // Similar to F.interpolate(..., mode="bilinear", align_corners=False, antialias=True) // https://github.com/pytorch/pytorch/blob/8871ff29b743948d1225389d5b7068f37b22750b/aten/src/ATen/native/cpu/UpSampleKernel.cpp auto triangle_filter = [](float x) -> float { return std::max(1.0f - fabsf(x), 0.0f); }; // support and invscale, minimum 1 pixel for bilinear const float support1 = std::max(1.0f, 1.0f / sf1); const float invscale1 = 1.0f / support1; const float support0 = std::max(1.0f, 1.0f / sf0); const float invscale0 = 1.0f / support0; for (int64_t i3 = 0; i3 < ne3; i3++) { const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { const float y = ((float) i1 + pixel_offset) / sf1; for (int64_t i0 = 0; i0 < ne0; i0++) { const float x = ((float) i0 + pixel_offset) / sf0; // the range of source pixels that contribute const int64_t x_min = std::max(x - support0 + pixel_offset, 0); const int64_t x_max = std::min(x + support0 + pixel_offset, ne00); const int64_t y_min = std::max(y - support1 + pixel_offset, 0); const int64_t y_max = std::min(y + support1 + pixel_offset, ne01); // bilinear filter with antialiasing float val = 0.0f; float total_weight = 0.0f; for (int64_t sy = y_min; sy < y_max; sy++) { const float weight_y = triangle_filter((sy - y + pixel_offset) * invscale1); for (int64_t sx = x_min; sx < x_max; sx++) { const float weight_x = triangle_filter((sx - x + pixel_offset) * invscale0); const float weight = weight_x * weight_y; if (weight <= 0.0f) { continue; } const float pixel = *(const float *)((const char *)src0->data + sx*nb00 + sy*nb01 + i02*nb02 + i03*nb03); val += pixel * weight; total_weight += weight; } } if (total_weight > 0.0f) { val /= total_weight; } float * dst_ptr = (float *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *dst_ptr = val; } } } } } else if (mode == GGML_SCALE_MODE_BILINEAR) { for (int64_t i3 = 0; i3 < ne3; i3++) { const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { const float y = ((float)i1 + pixel_offset) / sf1 - pixel_offset; int64_t y0 = (int64_t)floorf(y); int64_t y1 = y0 + 1; y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); float dy = y - (float)y0; dy = std::max(0.0f, std::min(dy, 1.0f)); for (int64_t i0 = 0; i0 < ne0; i0++) { const float x = ((float)i0 + pixel_offset) / sf0 - pixel_offset; int64_t x0 = (int64_t)floorf(x); int64_t x1 = x0 + 1; x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); float dx = x - (float)x0; dx = std::max(0.0f, std::min(dx, 1.0f)); // fetch the four surrounding pixel values and interpolate const float a = *(const float *)((const char *)src0->data + x0*nb00 + y0*nb01 + i02*nb02 + i03*nb03); const float b = *(const float *)((const char *)src0->data + x1*nb00 + y0*nb01 + i02*nb02 + i03*nb03); const float c = *(const float *)((const char *)src0->data + x0*nb00 + y1*nb01 + i02*nb02 + i03*nb03); const float d = *(const float *)((const char *)src0->data + x1*nb00 + y1*nb01 + i02*nb02 + i03*nb03); const float val = a*(1 - dx)*(1 - dy) + b*dx*(1 - dy) + c*(1 - dx)*dy + d*dx*dy; float * y_dst = (float *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y_dst = val; } } } } } else if (mode == GGML_SCALE_MODE_BICUBIC) { // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) auto weight1 = [a](float x) { return ((a + 2) * x - (a + 3)) * x * x + 1; }; auto weight2 = [a](float x) { return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; }; auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { const float w0 = weight2(x + 1); const float w1 = weight1(x + 0); const float w2 = weight1(1 - x); const float w3 = weight2(2 - x); return p0*w0 + p1*w1 + p2*w2 + p3*w3; }; for (int64_t i3 = 0; i3 < ne3; i3++) { const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { const float y = ((float)i1 + pixel_offset) / sf1 - pixel_offset; const int64_t y0 = (int64_t)floorf(y); const float dy = y - (float)y0; for (int64_t i0 = 0; i0 < ne0; i0++) { const float x = ((float)i0 + pixel_offset) / sf0 - pixel_offset; const int64_t x0 = (int64_t)floorf(x); const float dx = x - (float)x0; auto p = [=](int64_t x_off, int64_t y_off) -> float { int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); return *(const float *)((const char *)src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); }; const float val = bicubic( bicubic(p(-1,-1), p(0,-1), p(1,-1), p(2,-1), dx), bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); float * y_dst = (float *)((char *)dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); *y_dst = val; } } } } } else { GGML_ABORT("unsupported upscale mode"); } } void ggml_compute_forward_upscale( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_upscale_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_pad template static void ggml_compute_forward_pad_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT( dst->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float * dst_ptr = (float *) dst->data; const int32_t lp0 = ggml_get_op_params_i32(dst, 0); const int32_t rp0 = ggml_get_op_params_i32(dst, 1); const int32_t lp1 = ggml_get_op_params_i32(dst, 2); const int32_t rp1 = ggml_get_op_params_i32(dst, 3); const int32_t lp2 = ggml_get_op_params_i32(dst, 4); const int32_t rp2 = ggml_get_op_params_i32(dst, 5); const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); // TODO: optimize for (int64_t i2 = 0; i2 < ne2; ++i2) { for (int64_t i1 = ith; i1 < ne1; i1 += nth) { for (int64_t i0 = 0; i0 < ne0; ++i0) { for (int64_t i3 = 0; i3 < ne3; ++i3) { // circular means wrap around on a torus, so x and y loop around if constexpr (circular_t) { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); const int64_t src_idx = src_i3*nb03 + src_i2*nb02 + src_i1*nb01 + src_i0*nb00; const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; } else { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; if ((i0 >= lp0 && i0 < ne0 - rp0) \ && (i1 >= lp1 && i1 < ne1 - rp1) \ && (i2 >= lp2 && i2 < ne2 - rp2) \ && (i3 >= lp3 && i3 < ne3 - rp3)) { const int64_t src_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; } else { dst_ptr[dst_idx] = 0; } } } } } } } void ggml_compute_forward_pad( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const bool circular = (bool) ggml_get_op_params_i32(dst, 8); switch (src0->type) { case GGML_TYPE_F32: { if (circular) { ggml_compute_forward_pad_f32(params, dst); } else { ggml_compute_forward_pad_f32(params, dst); } } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_pad_reflect_1d void ggml_compute_forward_pad_reflect_1d( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int ith = params->ith; const int nth = params->nth; const int32_t * opts = (const int32_t *) dst->op_params; const int p0 = opts[0]; const int p1 = opts[1]; GGML_TENSOR_UNARY_OP_LOCALS for (int64_t i3 = 0; i3 < ne3; i3++) { for (int64_t i2 = 0; i2 < ne2; i2++) { for (int64_t i1 = ith; i1 < ne1; i1 += nth) { float * left = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + p0*nb0); float * right = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (ne0-p1-1)*nb0); ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01)); for (int i0 = 1; i0 <= p0; i0++) { left[-i0] = left[i0]; } for (int i0 = 1; i0 <= p1; i0++) { right[i0] = right[-i0]; } } } } } // ggml_compute_forward_roll static int64_t ggml_wrap_index(int64_t i, int64_t ne) { if (i < 0) { return i + ne; } else if (i >= ne) { return i - ne; } return i; } static void ggml_compute_forward_roll_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src_data = (const float *) src0->data; float * dst_data = (float *) dst->data; GGML_TENSOR_UNARY_OP_LOCALS const int s0 = ggml_get_op_params_i32(dst, 0); const int s1 = ggml_get_op_params_i32(dst, 1); const int s2 = ggml_get_op_params_i32(dst, 2); const int s3 = ggml_get_op_params_i32(dst, 3); const int64_t total = ne1 * ne2 * ne3; const int64_t per_thread = (total + params->nth) / params->nth; const int64_t start = params->ith * per_thread; const int64_t end = std::min(start + per_thread, total); for (int64_t i = start; i < end; ++i) { const int64_t i1 = i % ne1; const int64_t i2 = (i / ne1) % ne2; const int64_t i3 = i / (ne2 * ne1); float * dst_row = dst_data + (i3*nb3 + i2*nb2 + i1*nb1) / sizeof(float); const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); const float * src_row = src_data + (i03*nb03 + i02*nb02 + i01*nb01) / sizeof(float); const int64_t s = ggml_wrap_index(-s0, ne00); const int64_t n = ne00 - s; ggml_vec_cpy_f32(n, dst_row, src_row + s); ggml_vec_cpy_f32(s, dst_row + n, src_row); } } void ggml_compute_forward_roll( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_roll_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_arange static void ggml_compute_forward_arange_f32( const ggml_compute_params * params, ggml_tensor * dst) { GGML_ASSERT(dst->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; const float start = ggml_get_op_params_f32(dst, 0); const float stop = ggml_get_op_params_f32(dst, 1); const float step = ggml_get_op_params_f32(dst, 2); const int64_t steps = (int64_t) ceilf((stop - start) / step); GGML_ASSERT(ggml_nelements(dst) == steps); for (int64_t i = ith; i < steps; i+= nth) { float value = start + step * i; ((float *)dst->data)[i] = value; } } void ggml_compute_forward_arange( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->type) { case GGML_TYPE_F32: { ggml_compute_forward_arange_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_compute_forward_timestep_embedding_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS const int dim = ggml_get_op_params_i32(dst, 0); const int max_period = ggml_get_op_params_i32(dst, 1); int half = dim / 2; for (int64_t i = 0; i < ne00; i++) { float * embed_data = (float *)((char *) dst->data + i*nb1); for (int64_t j = ith; j < half; j += nth) { float timestep = ((float *)src0->data)[i]; float freq = (float)expf(-logf(max_period) * j / half); float arg = timestep * freq; embed_data[j] = cosf(arg); embed_data[j + half] = sinf(arg); } if (dim % 2 != 0 && ith == 0) { embed_data[2 * half] = 0.f; } } } void ggml_compute_forward_timestep_embedding( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_timestep_embedding_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_argsort template struct cmp_argsort { const float * data; bool operator()(int32_t a, int32_t b) const { if constexpr (order == GGML_SORT_ORDER_ASC) { return data[a] < data[b]; } else { return data[a] > data[b]; } } }; static void ggml_compute_forward_argsort_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(nb0 == sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int64_t nr = ggml_nrows(src0); ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); for (int64_t i = ith; i < nr; i += nth) { const float * src_data = (float *)((char *) src0->data + i*nb01); int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1); for (int64_t j = 0; j < ne0; j++) { dst_data[j] = j; } switch (order) { case GGML_SORT_ORDER_ASC: std::sort(dst_data, dst_data + ne0, cmp_argsort{src_data}); break; case GGML_SORT_ORDER_DESC: std::sort(dst_data, dst_data + ne0, cmp_argsort{src_data}); break; default: GGML_ABORT("invalid sort order"); } } } void ggml_compute_forward_argsort( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_argsort_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_top_k struct cmp_top_k { const float * data; bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } }; static void ggml_compute_forward_top_k_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(nb0 == sizeof(float)); const int ith = params->ith; const int nth = params->nth; const int64_t nr = ggml_nrows(src0); const int top_k = ne0; int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; for (int64_t i = ith; i < nr; i += nth) { const float * src_data = (float *)((char *) src0->data + i*nb01); for (int64_t j = 0; j < ne00; j++) { tmp[j] = j; } std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{src_data}); int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1); std::copy(tmp, tmp + top_k, dst_data); // emphasize that the order is not important if (top_k > 1) { std::swap(dst_data[0], dst_data[1]); } } } void ggml_compute_forward_top_k( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_top_k_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_flash_attn_ext static void ggml_compute_forward_flash_attn_ext_f16_one_chunk( const ggml_compute_params * params, ggml_tensor * dst, int ir0, int ir1) { const ggml_tensor * q = dst->src[0]; const ggml_tensor * k = dst->src[1]; const ggml_tensor * v = dst->src[2]; const ggml_tensor * mask = dst->src[3]; const ggml_tensor * sinks = dst->src[4]; GGML_TENSOR_LOCALS(int64_t, neq, q, ne) GGML_TENSOR_LOCALS(size_t, nbq, q, nb) GGML_TENSOR_LOCALS(int64_t, nek, k, ne) GGML_TENSOR_LOCALS(size_t, nbk, k, nb) GGML_TENSOR_LOCALS(int64_t, nev, v, ne) GGML_TENSOR_LOCALS(size_t, nbv, v, nb) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int64_t DK = nek0; const int64_t DV = nev0; const int64_t N = neq1; GGML_ASSERT(ne0 == DV); GGML_ASSERT(ne2 == N); // input tensor rows must be contiguous GGML_ASSERT(nbq0 == ggml_type_size(q->type)); GGML_ASSERT(nbk0 == ggml_type_size(k->type)); GGML_ASSERT(nbv0 == ggml_type_size(v->type)); GGML_ASSERT(neq0 == DK); GGML_ASSERT(nek0 == DK); GGML_ASSERT(nev0 == DV); GGML_ASSERT(neq1 == N); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // broadcast factors const int64_t rk2 = neq2/nek2; const int64_t rk3 = neq3/nek3; const int64_t rv2 = neq2/nev2; const int64_t rv3 = neq3/nev3; // parallelize by q rows using ggml_vec_dot_f32 float scale = 1.0f; float max_bias = 0.0f; float logit_softcap = 0.0f; memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); if (logit_softcap != 0) { scale /= logit_softcap; } const uint32_t n_head = neq2; const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); ggml_type const k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; ggml_from_float_t const q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; ggml_vec_dot_t const kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; ggml_to_float_t const v_to_float = ggml_get_type_traits(v->type)->to_float; GGML_ASSERT(( q_to_vec_dot) && "fattn: unsupported K-type"); GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float ) && "fattn: unsupported V-type"); int ith = params->ith; // loop over n_batch and n_head for (int ir = ir0; ir < ir1; ++ir) { // q indices const int iq3 = ir/(neq2*neq1); const int iq2 = (ir - iq3*neq2*neq1)/neq1; const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1); const uint32_t h = iq2; // head index const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; float S = 0.0f; // sum float M = -INFINITY; // maximum KQ value float * VKQ32 = (float *) params->wdata + ith*(1*DK + 2*DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator float * V32 = (VKQ32 + 1*DV); // (temporary) FP32 V buffer ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1*DV); // (temporary) FP16 VKQ accumulator ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2*DV); // (temporary) buffer for Q converted to quantized/FP16 if (v->type == GGML_TYPE_F16) { memset(VKQ16, 0, DV*sizeof(ggml_fp16_t)); } else { memset(VKQ32, 0, DV*sizeof(float)); } const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1] + (iq2%mask->ne[2])*mask->nb[2] + (iq3%mask->ne[3])*mask->nb[3]) : NULL; // k indices const int ik3 = iq3 / rk3; const int ik2 = iq2 / rk2; // v indices const int iv3 = iq3 / rv3; const int iv2 = iq2 / rv2; const float * pq = (const float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)); q_to_vec_dot(pq, Q_q, DK); // online softmax / attention // loop over n_kv and n_head_kv // ref: https://arxiv.org/pdf/2112.05682.pdf for (int64_t ic = 0; ic < nek1; ++ic) { const float mv = mp ? slope*GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; if (mv == -INFINITY) { continue; } float s; // KQ value const char * k_data = (const char *) k->data + ( ic*nbk1 + ik2*nbk2 + ik3*nbk3); kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); s = s*scale; // scale KQ value if (logit_softcap != 0.0f) { s = logit_softcap*tanhf(s); } s += mv; // apply mask const float Mold = M; float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value float vs = 1.0f; // post-softmax KQ value, expf(s - M) const char * v_data = ((const char *) v->data + (ic*nbv1 + iv2*nbv2 + iv3*nbv3)); if (v->type == GGML_TYPE_F16) { if (s > M) { // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f M = s; ms = expf(Mold - M); // V = V*expf(Mold - M) ggml_vec_scale_f16(DV, VKQ16, ms); } else { // no new maximum, ms == 1.0f, vs != 1.0f vs = expf(s - M); } // V += v*expf(s - M) ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); } else { if (s > M) { // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f M = s; ms = expf(Mold - M); // V = V*expf(Mold - M) ggml_vec_scale_f32(DV, VKQ32, ms); } else { // no new maximum, ms == 1.0f, vs != 1.0f vs = expf(s - M); } // V += v*expf(s - M) if (v_to_float) { v_to_float(v_data, V32, DV); ggml_vec_mad_f32(DV, VKQ32, V32, vs); } else { // V is F32 ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); } } S = S*ms + vs; // scale and increment sum with partial sum } if (v->type == GGML_TYPE_F16) { for (int64_t d = 0; d < DV; ++d) { VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); } } // sinks if (sinks) { const float s = ((float *)((char *) sinks->data))[h]; float ms = 1.0f; float vs = 1.0f; if (s > M) { ms = expf(M - s); ggml_vec_scale_f32(DV, VKQ32, ms); } else { vs = expf(s - M); } S = S*ms + vs; } // V /= S const float S_inv = S == 0.0f ? 0.0f : 1.0f/S; ggml_vec_scale_f32(DV, VKQ32, S_inv); // dst indices const int i1 = iq1; const int i2 = iq2; const int i3 = iq3; // original //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); // permute(0, 2, 1, 3) memcpy((char *) dst->data + (i3*ne2*ne1 + i2 + i1*ne1)*nb1, VKQ32, nb1); } } static void ggml_compute_forward_flash_attn_ext_f16( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * q = dst->src[0]; const ggml_tensor * k = dst->src[1]; const ggml_tensor * v = dst->src[2]; GGML_TENSOR_LOCALS(int64_t, neq, q, ne) GGML_TENSOR_LOCALS(size_t, nbq, q, nb) GGML_TENSOR_LOCALS(int64_t, nek, k, ne) GGML_TENSOR_LOCALS(size_t, nbk, k, nb) GGML_TENSOR_LOCALS(int64_t, nev, v, ne) GGML_TENSOR_LOCALS(size_t, nbv, v, nb) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int64_t DK = nek0; const int64_t DV = nev0; const int64_t N = neq1; GGML_ASSERT(ne0 == DV); GGML_ASSERT(ne2 == N); // input tensor rows must be contiguous GGML_ASSERT(nbq0 == ggml_type_size(q->type)); GGML_ASSERT(nbk0 == ggml_type_size(k->type)); GGML_ASSERT(nbv0 == ggml_type_size(v->type)); GGML_ASSERT(neq0 == DK); GGML_ASSERT(nek0 == DK); GGML_ASSERT(nev0 == DV); GGML_ASSERT(neq1 == N); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // parallelize by q rows using ggml_vec_dot_f32 // total rows in q const int64_t nr = neq1*neq2*neq3; // rows per thread const int ith = params->ith; const int nth = params->nth; // disable for NUMA const bool disable_chunking = ggml_is_numa(); // 4x chunks per thread int nth_scaled = nth * 4; int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; int64_t nchunk = (nr + chunk_size - 1) / chunk_size; if (nth == 1 || nchunk < nth || disable_chunking) { nchunk = nth; } if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. ggml_threadpool_chunk_set(params->threadpool, nth); } ggml_barrier(params->threadpool); // The number of elements in each chunk const int64_t dr = (nr + nchunk - 1) / nchunk; // The first chunk comes from our thread_id, the rest will get auto-assigned. int current_chunk = ith; while (current_chunk < nchunk) { const int64_t ir0 = dr * current_chunk; const int64_t ir1 = MIN(ir0 + dr, nr); ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); } } void ggml_compute_forward_flash_attn_ext( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->op_params[3]) { case GGML_PREC_DEFAULT: case GGML_PREC_F32: { // uses F32 accumulators ggml_compute_forward_flash_attn_ext_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_flash_attn_back static void ggml_compute_forward_flash_attn_back_f32( const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { const ggml_tensor * q = dst->src[0]; const ggml_tensor * k = dst->src[1]; const ggml_tensor * v = dst->src[2]; const ggml_tensor * d = dst->src[3]; GGML_TENSOR_LOCALS(int64_t, neq, q, ne) GGML_TENSOR_LOCALS(size_t, nbq, q, nb) GGML_TENSOR_LOCALS(int64_t, nek, k, ne) GGML_TENSOR_LOCALS(size_t, nbk, k, nb) GGML_TENSOR_LOCALS(int64_t, nev, v, ne) GGML_TENSOR_LOCALS(size_t, nbv, v, nb) GGML_TENSOR_LOCALS(int64_t, ned, d, ne) GGML_TENSOR_LOCALS(size_t, nbd, d, nb) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int ith = params->ith; const int nth = params->nth; const int64_t D = neq0; const int64_t N = neq1; const int64_t P = nek1 - N; const int64_t M = P + N; const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); const int mxDM = MAX(D, Mup); // GGML_ASSERT(ne0 == D); // GGML_ASSERT(ne1 == N); GGML_ASSERT(P >= 0); GGML_ASSERT(nbq0 == sizeof(float)); GGML_ASSERT(nbk0 == sizeof(float)); GGML_ASSERT(nbv0 == sizeof(float)); GGML_ASSERT(neq0 == D); GGML_ASSERT(nek0 == D); GGML_ASSERT(nev1 == D); GGML_ASSERT(ned0 == D); GGML_ASSERT(neq1 == N); GGML_ASSERT(nek1 == N + P); GGML_ASSERT(nev1 == D); GGML_ASSERT(ned1 == N); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); if (ith == 0) { memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3); } ggml_barrier(params->threadpool); const int64_t elem_q = ggml_nelements(q); const int64_t elem_k = ggml_nelements(k); ggml_type result_type = dst->type; GGML_ASSERT(ggml_blck_size(result_type) == 1); const size_t tsize = ggml_type_size(result_type); const size_t offs_q = 0; const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); void * grad_q = (char *) dst->data; void * grad_k = (char *) dst->data + offs_k; void * grad_v = (char *) dst->data + offs_v; const size_t nbgq1 = nb0*neq0; const size_t nbgq2 = nb0*neq0*neq1; const size_t nbgq3 = nb0*neq0*neq1*neq2; const size_t nbgk1 = nb0*nek0; const size_t nbgk2 = nb0*nek0*nek1; const size_t nbgk3 = nb0*nek0*nek1*neq2; const size_t nbgv1 = nb0*nev0; const size_t nbgv2 = nb0*nev0*nev1; const size_t nbgv3 = nb0*nev0*nev1*neq2; // parallelize by k rows using ggml_vec_dot_f32 // total rows in k const int nr = nek2*nek3; // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); const float scale = 1.0f/sqrtf(D); //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); // how often k2 (and v2) is repeated in q2 int nrep = neq2/nek2; for (int ir = ir0; ir < ir1; ++ir) { // q indices const int ik3 = ir/(nek2); const int ik2 = ir - ik3*nek2; const int iq3 = ik3; const int id3 = ik3; const int iv3 = ik3; const int iv2 = ik2; for (int irep = 0; irep < nrep; ++irep) { const int iq2 = ik2 + irep*nek2; const int id2 = iq2; // (ik2 + irep*nek2) % nek2 == ik2 for (int iq1 = 0; iq1 < neq1; ++iq1) { const int id1 = iq1; // not sure about CACHE_LINE_SIZE_F32.. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32); float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32); for (int i = M; i < Mup; ++i) { S[i] = -INFINITY; } const int64_t masked_begin = masked ? (P + iq1 + 1) : M; for (int64_t ic = 0; ic < masked_begin; ++ic) { // k indices const int ik1 = ic; // S indices const int i1 = ik1; ggml_vec_dot_f32(neq0, S + i1, 0, (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0, (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1); } // scale ggml_vec_scale_f32(masked_begin, S, scale); for (int64_t i = masked_begin; i < M; i++) { S[i] = -INFINITY; } // softmax // exclude known -INF S[..] values from max and loop // dont forget to set their SM values to zero { float max = -INFINITY; ggml_vec_max_f32(masked_begin, &max, S); ggml_float sum = 0.0; { #ifdef GGML_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(SM, 1, &max, SM, 1, Mup); vvexpf(SM, SM, &Mup); ggml_vec_sum_f32(Mup, &sum, SM); #else sum = ggml_vec_soft_max_f32(Mup, SM, S, max); #endif } assert(sum > 0.0); sum = 1.0/sum; ggml_vec_scale_f32(masked_begin, SM, sum); } // step-by-step explanation { // forward-process shape grads from backward process // parallel_for ik2,ik3: // for irep: // iq2 = ik2 + irep*nek2 // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] // for iq1: // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 // S0 = -Inf [D,1,1,1] // ~S1[i] = dot(kcur[:D,i], qcur) // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur // ~S5[i] = dot(vcur[:,i], S4) // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] // ~dst[i,iq1,iq2,iq3] = S5[i] ^ // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] // dst backward-/ grad[dst] = d // // output gradients with their dependencies: // // grad[kcur] = grad[S1].T @ qcur // grad[S1] = diag_mask_zero(grad[S3], P) * scale // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) // grad[S4] = grad[S5] @ vcur // grad[S4] = d[:D,id1,id2,id3] @ vcur // grad[qcur] = grad[S1] @ kcur // grad[vcur] = grad[S5].T @ S4 // grad[vcur] = d[:D,id1,id2,id3].T @ S4 // // in post-order: // // S1 = qcur @ kcur.T // S2 = S1 * scale // S3 = diag_mask_inf(S2, P) // S4 = softmax(S3) // grad[S4] = d[:D,id1,id2,id3] @ vcur // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) // grad[S1] = diag_mask_zero(grad[S3], P) * scale // grad[qcur] = grad[S1] @ kcur // grad[kcur] = grad[S1].T @ qcur // grad[vcur] = d[:D,id1,id2,id3].T @ S4 // // using less variables (SM=S4): // // S = diag_mask_inf(qcur @ kcur.T * scale, P) // SM = softmax(S) // S = d[:D,iq1,iq2,iq3] @ vcur // dot_SM_gradSM = dot(SM, S) // S = SM * (S - dot(SM, S)) // S = diag_mask_zero(S, P) * scale // // grad[q][:D,iq1,iq2,iq3] += S @ kcur // grad[k][:D,:M,ik2,ik3] += S.T @ qcur // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM } // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] // for ic: // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] // exclude known future zero S[..] values from operation ggml_vec_set_f32(masked_begin, S, 0); for (int64_t ic = 0; ic < D; ++ic) { ggml_vec_mad_f32(masked_begin, S, (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3))); } // S = SM * (S - dot(SM, S)) float dot_SM_gradSM = 0; ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); ggml_vec_mul_f32 (masked_begin, S, S, SM); // S = diag_mask_zero(S, P) * scale // already done by above ggml_vec_set_f32 // exclude known zero S[..] values from operation ggml_vec_scale_f32(masked_begin, S, scale); // S shape [M,1] // SM shape [M,1] // kcur shape [D,M] // qcur shape [D,1] // vcur shape [M,D] // grad[q][:D,iq1,iq2,iq3] += S @ kcur // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] // for ic: // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] // exclude known zero S[..] values from loop for (int64_t ic = 0; ic < masked_begin; ++ic) { ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)), (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)), S[ic]); } // grad[k][:D,:M,iq2,iq3] += S.T @ qcur // for ic: // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] // exclude known zero S[..] values from loop for (int64_t ic = 0; ic < masked_begin; ++ic) { ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)), (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), S[ic]); } // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM // for ic: // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] // exclude known zero SM[..] values from mad for (int64_t ic = 0; ic < D; ++ic) { ggml_vec_mad_f32(masked_begin, (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)), SM, *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3))); } } } } } void ggml_compute_forward_flash_attn_back( const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { const ggml_tensor * q = dst->src[0]; switch (q->type) { case GGML_TYPE_F32: { ggml_compute_forward_flash_attn_back_f32(params, masked, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_ssm_conv static void ggml_compute_forward_ssm_conv_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // conv_x const ggml_tensor * src1 = dst->src[1]; // conv1d.weight const int ith = params->ith; const int nth = params->nth; const int nc = src1->ne[0]; // d_conv const int ncs = src0->ne[0]; // d_conv - 1 + n_t const int nr = src0->ne[1]; // d_inner const int n_t = dst->ne[1]; // tokens per sequence const int n_s = dst->ne[2]; // number of sequences in the batch GGML_ASSERT( dst->ne[0] == nr); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); const int ir = ir1 - ir0; for (int i3 = 0; i3 < n_s; ++i3) { for (int i2 = 0; i2 < n_t; ++i2) { // {d_conv - 1 + n_t, d_inner, n_seqs} // sliding window const float * s = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i2*(src0->nb[0]) + i3*(src0->nb[2])); // {d_conv, d_inner, n_s} const float * c = (const float *) ((const char *) src1->data + ir0*(src1->nb[1])); // {d_conv, d_inner} float * x = (float *) ((char *) dst->data + ir0*(dst->nb[0]) + i2*(dst->nb[1]) + i3*(dst->nb[2])); // {d_inner, n_t, n_s} // TODO: transpose the output for smaller strides for big batches? // d_inner for (int i1 = 0; i1 < ir; ++i1) { // rowwise dot product // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision float sumf = 0.0f; // d_conv for (int i0 = 0; i0 < nc; ++i0) { sumf += s[i0 + i1*ncs] * c[i0 + i1*nc]; } x[i1] = sumf; } } } } void ggml_compute_forward_ssm_conv( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->src[0]->type) { case GGML_TYPE_F32: { ggml_compute_forward_ssm_conv_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_ssm_scan static void ggml_compute_forward_ssm_scan_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} const int ith = params->ith; const int nth = params->nth; const int64_t nc = src0->ne[0]; // d_state const int64_t nr = src0->ne[1]; // dim const int64_t nh = src1->ne[1]; // n_head const int64_t ng = src4->ne[1]; const int64_t nt = src1->ne[2]; // number of tokens per sequence const int64_t ns = src1->ne[3]; // number of sequences in the batch // can't use ggml_nbytes because src1 is not necessarily contiguous const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); GGML_ASSERT(ggml_nelements(src1) + nc*nr*nh*ns == ggml_nelements(dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src2->nb[0] == sizeof(float)); GGML_ASSERT(src3->nb[0] == sizeof(float)); GGML_ASSERT(src4->nb[0] == sizeof(float)); GGML_ASSERT(src5->nb[0] == sizeof(float)); GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); GGML_ASSERT(nh % ng == 0); // heads per thread const int dh = (nh + nth - 1)/nth; // head range for this thread const int ih0 = dh*ith; const int ih1 = MIN(ih0 + dh, nh); const int32_t * ids = (const int32_t *) src6->data; for (int i3 = 0; i3 < ns; ++i3) { const float * s0 = (const float *) ((const char *) src0->data + ids[i3]*(src0->nb[3])); // {d_state, dim, nh, ns} float * s = ( float *) (( char *) dst->data + i3*(src0->nb[3]) + s_off); // {d_state, dim, nh, ns} for (int i2 = 0; i2 < nt; ++i2) { const float * x = (const float *) ((const char *) src1->data + i2*(src1->nb[2]) + i3*(src1->nb[3])); // {dim, nh, nt, ns} const float * dt = (const float *) ((const char *) src2->data + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {nh, nt, ns} const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[2]) + i3*(src4->nb[3])); // {d_state, ng, nt, ns} const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[2]) + i3*(src5->nb[3])); // {d_state, ng, nt, ns} float * y = ( float *) (( char *) dst->data + i2*(nh*nr*sizeof(float)) + i3*(nt*nh*nr*sizeof(float))); // {dim, nh, nt, ns} if (src3->ne[0] == 1) { // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop // n_head for (int h = ih0; h < ih1; ++h) { // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); const float dA = expf(dt_soft_plus * A[h]); const int g = h / (nh / ng); // repeat_interleave // dim for (int i1 = 0; i1 < nr; ++i1) { const int ii = i1 + h*nr; const float x_dt = x[ii] * dt_soft_plus; float sumf = 0.0f; #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) const int ggml_f32_epr = svcntw(); const int ggml_f32_step = 1 * ggml_f32_epr; const int np = (nc & ~(ggml_f32_step - 1)); GGML_F32_VEC sum = GGML_F32_VEC_ZERO; GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); for (int i = 0; i < np; i += ggml_f32_step) { // TODO: maybe unroll more? for (int j = 0; j < 1; j++) { GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j*ggml_f32_epr + ii*nc); GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j*ggml_f32_epr + g*nc); GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j*ggml_f32_epr + g*nc); t0 = GGML_F32_VEC_MUL(t0, adA); t1 = GGML_F32_VEC_MUL(t1, axdt); t0 = GGML_F32_VEC_ADD(t0, t1); sum = GGML_F32_VEC_FMA(sum, t0, t2); GGML_F32_VEC_STORE(s + i + j*ggml_f32_epr + ii*nc, t0); } } sumf = GGML_F32xt_REDUCE_ONE(sum); #elif defined(__riscv_v_intrinsic) // todo: RVV implementation const int np = 0; #else const int np = (nc & ~(GGML_F32_STEP - 1)); GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); GGML_F32_VEC ax[GGML_F32_ARR]; GGML_F32_VEC ay[GGML_F32_ARR]; GGML_F32_VEC az[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ax[j] = GGML_F32_VEC_LOAD(s0 + i + j*GGML_F32_EPR + ii*nc); ay[j] = GGML_F32_VEC_LOAD(B + i + j*GGML_F32_EPR + g*nc); az[j] = GGML_F32_VEC_LOAD(C + i + j*GGML_F32_EPR + g*nc); ax[j] = GGML_F32_VEC_MUL(ax[j], adA); ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); GGML_F32_VEC_STORE(s + i + j*GGML_F32_EPR + ii*nc, ax[j]); } } // reduce sum0..sum3 to sum0 GGML_F32_VEC_REDUCE(sumf, sum); #endif #else const int np = 0; #endif // d_state for (int i0 = np; i0 < nc; ++i0) { const int i = i0 + ii*nc; const int ig = i0 + g*nc; // state = prev_state * dA + dB * x const float state = (s0[i] * dA) + (B[ig] * x_dt); // y = rowwise_dotprod(state, C) sumf += state * C[ig]; s[i] = state; } y[ii] = sumf; } } } else { // Mamba-1 has an element-wise decay factor for the states // n_head for (int h = ih0; h < ih1; ++h) { // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); const int g = h / (nh / ng); // repeat_interleave // dim for (int i1 = 0; i1 < nr; ++i1) { const int ii = i1 + h*nr; const float x_dt = x[ii] * dt_soft_plus; #if defined(__ARM_FEATURE_SVE) svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); svfloat32_t r1_vector = GGML_F32_VEC_ZERO; // d_state // TODO: what happens when (d_state % svcntw()) != 0? for (int64_t k = 0; k < nc; k += svcntw()) { svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h*nc + k]); svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g*nc]); svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g*nc]); svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii*nc + k]); svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); t1 = exp_ps_sve(svptrue_b32(), t1); svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); GGML_F32_VEC_STORE(&s[ii*nc + k], vs0); } y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); #else float sumf = 0.0f; // NOTE: can't really use GGML_SIMD here because d_state is usually 16 // and also because expf is used within the loop. // d_state for (int i0 = 0; i0 < nc; ++i0) { const int i = i0 + ii*nc; const int ig = i0 + g*nc; // state = prev_state * dA + dB * x const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h*nc])) + (B[ig] * x_dt); // y = rowwise_dotprod(state, C) sumf += state * C[ig]; s[i] = state; } y[ii] = sumf; #endif } } } // use the output as the source when it's not the first token-wise iteration s0 = s; } } } void ggml_compute_forward_ssm_scan( const ggml_compute_params * params, ggml_tensor * dst) { switch (dst->src[0]->type) { case GGML_TYPE_F32: { ggml_compute_forward_ssm_scan_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_win_part static void ggml_compute_forward_win_part_f32( const ggml_compute_params * params, ggml_tensor * dst) { GGML_UNUSED(params); const ggml_tensor * src0 = dst->src[0]; GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) const int32_t nep0 = ((const int32_t *)(dst->op_params))[0]; const int32_t nep1 = ((const int32_t *)(dst->op_params))[1]; const int32_t w = ((const int32_t *)(dst->op_params))[2]; assert(ne00 == ne0); assert(ne3 == nep0*nep1); // TODO: optimize / multi-thread for (int py = 0; py < nep1; ++py) { for (int px = 0; px < nep0; ++px) { const int64_t i3 = py*nep0 + px; for (int64_t i2 = 0; i2 < ne2; ++i2) { for (int64_t i1 = 0; i1 < ne1; ++i1) { for (int64_t i0 = 0; i0 < ne0; ++i0) { const int64_t i02 = py*w + i2; const int64_t i01 = px*w + i1; const int64_t i00 = i0; const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0; const int64_t j = i02*ne01*ne00 + i01*ne00 + i00; if (py*w + i2 >= ne02 || px*w + i1 >= ne01) { ((float *) dst->data)[i] = 0.0f; } else { ((float *) dst->data)[i] = ((float *) src0->data)[j]; } } } } } } } void ggml_compute_forward_win_part( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_win_part_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_win_unpart static void ggml_compute_forward_win_unpart_f32( const ggml_compute_params * params, ggml_tensor * dst) { GGML_UNUSED(params); const ggml_tensor * src0 = dst->src[0]; GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) const int32_t w = ((const int32_t *)(dst->op_params))[0]; // padding const int px = (w - ne1%w)%w; //const int py = (w - ne2%w)%w; const int npx = (px + ne1)/w; //const int npy = (py + ne2)/w; assert(ne0 == ne00); // TODO: optimize / multi-thread for (int64_t i2 = 0; i2 < ne2; ++i2) { for (int64_t i1 = 0; i1 < ne1; ++i1) { for (int64_t i0 = 0; i0 < ne0; ++i0) { const int ip2 = i2/w; const int ip1 = i1/w; const int64_t i02 = i2%w; const int64_t i01 = i1%w; const int64_t i00 = i0; const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00; const int64_t j = i2*ne1*ne0 + i1*ne0 + i0; ((float *) dst->data)[j] = ((float *) src0->data)[i]; } } } } void ggml_compute_forward_win_unpart( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_win_unpart_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } //gmml_compute_forward_unary void ggml_compute_forward_unary( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_unary_op op = ggml_get_unary_op(dst); switch (op) { case GGML_UNARY_OP_ABS: { ggml_compute_forward_abs(params, dst); } break; case GGML_UNARY_OP_SGN: { ggml_compute_forward_sgn(params, dst); } break; case GGML_UNARY_OP_NEG: { ggml_compute_forward_neg(params, dst); } break; case GGML_UNARY_OP_STEP: { ggml_compute_forward_step(params, dst); } break; case GGML_UNARY_OP_TANH: { ggml_compute_forward_tanh(params, dst); } break; case GGML_UNARY_OP_ELU: { ggml_compute_forward_elu(params, dst); } break; case GGML_UNARY_OP_RELU: { ggml_compute_forward_relu(params, dst); } break; case GGML_UNARY_OP_SIGMOID: { ggml_compute_forward_sigmoid(params, dst); } break; case GGML_UNARY_OP_GELU: { ggml_compute_forward_gelu(params, dst); } break; case GGML_UNARY_OP_GELU_ERF: { ggml_compute_forward_gelu_erf(params, dst); } break; case GGML_UNARY_OP_GELU_QUICK: { ggml_compute_forward_gelu_quick(params, dst); } break; case GGML_UNARY_OP_SILU: { ggml_compute_forward_silu(params, dst); } break; case GGML_UNARY_OP_HARDSWISH: { ggml_compute_forward_hardswish(params, dst); } break; case GGML_UNARY_OP_HARDSIGMOID: { ggml_compute_forward_hardsigmoid(params, dst); } break; case GGML_UNARY_OP_EXP: { ggml_compute_forward_exp(params, dst); } break; case GGML_UNARY_OP_FLOOR: { ggml_compute_forward_floor(params, dst); } break; case GGML_UNARY_OP_CEIL: { ggml_compute_forward_ceil(params, dst); } break; case GGML_UNARY_OP_ROUND: { ggml_compute_forward_round(params, dst); } break; case GGML_UNARY_OP_TRUNC: { ggml_compute_forward_trunc(params, dst); } break; case GGML_UNARY_OP_XIELU: { ggml_compute_forward_xielu(params, dst); } break; case GGML_UNARY_OP_EXPM1: { ggml_compute_forward_expm1(params, dst); } break; case GGML_UNARY_OP_SOFTPLUS: { ggml_compute_forward_softplus(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } //ggml_compute_forward_glu void ggml_compute_forward_glu( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_glu_op op = ggml_get_glu_op(dst); switch (op) { case GGML_GLU_OP_REGLU: { ggml_compute_forward_reglu(params, dst); } break; case GGML_GLU_OP_GEGLU: { ggml_compute_forward_geglu(params, dst); } break; case GGML_GLU_OP_SWIGLU: { ggml_compute_forward_swiglu(params, dst); } break; case GGML_GLU_OP_SWIGLU_OAI: { ggml_compute_forward_swiglu_oai(params, dst); } break; case GGML_GLU_OP_GEGLU_ERF: { ggml_compute_forward_geglu_erf(params, dst); } break; case GGML_GLU_OP_GEGLU_QUICK: { ggml_compute_forward_geglu_quick(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_get_rel_pos static void ggml_compute_forward_get_rel_pos_f16( const ggml_compute_params * params, ggml_tensor * dst) { GGML_UNUSED(params); const ggml_tensor * src0 = dst->src[0]; // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 GGML_TENSOR_UNARY_OP_LOCALS const int64_t w = ne1; ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; for (int64_t i2 = 0; i2 < ne2; ++i2) { for (int64_t i1 = 0; i1 < ne1; ++i1) { const int64_t pos = (w - i1 - 1) + i2; for (int64_t i0 = 0; i0 < ne0; ++i0) { dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0]; } } } } void ggml_compute_forward_get_rel_pos( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F16: case GGML_TYPE_BF16: { ggml_compute_forward_get_rel_pos_f16(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_add_rel_pos static void ggml_compute_forward_add_rel_pos_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; if (!inplace) { if (params->ith == 0) { memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); } ggml_barrier(params->threadpool); } // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 float * src1_data = (float *) src1->data; float * src2_data = (float *) src2->data; float * dst_data = (float *) dst->data; const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; const int64_t ne12 = src1->ne[2]; const int64_t ne13 = src1->ne[3]; const int ith = params->ith; const int nth = params->nth; // total patches in dst const int np = ne13; // patches per thread const int dp = (np + nth - 1)/nth; // patch range for this thread const int ip0 = dp*ith; const int ip1 = MIN(ip0 + dp, np); for (int64_t i13 = ip0; i13 < ip1; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = 0; i11 < ne11; ++i11) { const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10; for (int64_t i10 = 0; i10 < ne10; ++i10) { const int64_t jp0 = jp1 + i10; const float src1_e = src1_data[jp0]; const float src2_e = src2_data[jp0]; const int64_t jdh = jp0 * ne10; const int64_t jdw = jdh - (ne10 - 1) * i10; for (int64_t j = 0; j < ne10; ++j) { dst_data[jdh + j ] += src2_e; dst_data[jdw + j*ne10] += src1_e; } } } } } } void ggml_compute_forward_add_rel_pos( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_add_rel_pos_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_rwkv_wkv6 static void ggml_compute_forward_rwkv_wkv6_f32( const ggml_compute_params * params, ggml_tensor * dst) { const int64_t T = dst->src[1]->ne[2]; const int64_t C = dst->ne[0]; const int64_t HEADS = dst->src[1]->ne[1]; const int64_t n_seqs = dst->src[5]->ne[1]; const int64_t head_size = C / HEADS; float * dst_data = (float *) dst->data; float * state = ((float *) dst->data) + C * T; const int ith = params->ith; const int nth = params->nth; if (ith >= HEADS) { return; } const int h_start = (HEADS * ith) / nth; const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; float * k = (float *) dst->src[0]->data; float * v = (float *) dst->src[1]->data; float * r = (float *) dst->src[2]->data; float * time_faaaa = (float *) dst->src[3]->data; float * time_decay = (float *) dst->src[4]->data; size_t t_stride = HEADS * head_size; // Same to C size_t h_stride = C / HEADS; GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS size_t h_stride_2d = head_size * head_size; if (ith == 0) { memset(dst_data, 0, T * C * sizeof(float)); } ggml_barrier(params->threadpool); #if defined(__AVX__) && !defined(__AVX512F__) #define GGML_F32X GGML_F32x8 #define GGML_F32X_SET1 GGML_F32x8_SET1 #define GGML_F32X_LOAD GGML_F32x8_LOAD #define GGML_F32X_STORE GGML_F32x8_STORE #define GGML_F32X_MUL GGML_F32x8_MUL #define GGML_F32X_FMA GGML_F32x8_FMA #define WKV_VECTOR_SIZE 8 #elif defined(__AVX512F__) #define GGML_F32X GGML_F32x16 #define GGML_F32X_SET1 GGML_F32x16_SET1 #define GGML_F32X_LOAD GGML_F32x16_LOAD #define GGML_F32X_STORE GGML_F32x16_STORE #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define WKV_VECTOR_SIZE 16 #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) #define GGML_F32X GGML_F32xt #define GGML_F32X_SET1 GGML_F32xt_SET1 #define GGML_F32X_LOAD GGML_F32xt_LOAD #define GGML_F32X_STORE GGML_F32xt_STORE #define GGML_F32X_MUL GGML_F32xt_MUL #define GGML_F32X_FMA GGML_F32xt_FMA #define WKV_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 #define GGML_F32X_LOAD GGML_F32x4_LOAD #define GGML_F32X_STORE GGML_F32x4_STORE #define GGML_F32X_MUL GGML_F32x4_MUL #define GGML_F32X_FMA GGML_F32x4_FMA #define WKV_VECTOR_SIZE 4 #endif #ifdef WKV_VECTOR_SIZE int wkv_vector_size; #if defined(__ARM_FEATURE_SVE) wkv_vector_size = svcntw(); #else wkv_vector_size = WKV_VECTOR_SIZE; #endif const int64_t vec_count = head_size / wkv_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; size_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[5]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { size_t h_offset = h * h_stride; size_t t_h_offset = t_offset + h_offset; size_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { size_t t_h_i_offset = t_h_offset + i; size_t h_i_offset = h_offset + i; size_t h_2d_i_offset = h_2d_offset + i * h_stride; float k_val = k[t_h_i_offset]; float r_val = r[t_h_i_offset]; float time_faaaa_val = time_faaaa[h_i_offset]; float time_decay_val = time_decay[t_h_i_offset]; // Broadcast scalar values to vectors GGML_F32X k_vec = GGML_F32X_SET1(k_val); GGML_F32X r_vec = GGML_F32X_SET1(r_val); GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); for (int64_t j = 0; j < vec_count; j++) { size_t base_j = j * wkv_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; // Load x elements at once GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); // Compute kv = v * k GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); // Compute temp = kv * time_faaaa + prev_state GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); // Update dst: dst += temp * r dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); // Update state: state = prev_state * time_decay + kv GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); } // Handle remaining elements, this will not be used. for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; float temp_val = kv_val * time_faaaa_val + prev_state_val; dst_data[t_h_j_offset] += temp_val * r_val; state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; } } } } #else // basically fused operations: // dst = r @ (time_faaaa * (k @ v) + state), // state = time_decay * state + (k @ v), // recursive through each token for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; size_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[5]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { size_t h_offset = h * h_stride; size_t t_h_offset = t_offset + h_offset; size_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { size_t t_h_i_offset = t_h_offset + i; size_t h_i_offset = h_offset + i; size_t h_2d_i_offset = h_2d_offset + i * h_stride; float k_val = k[t_h_i_offset]; float r_val = r[t_h_i_offset]; float time_faaaa_val = time_faaaa[h_i_offset]; // RWKV v6: different time_decay for each token. float time_decay_val = time_decay[t_h_i_offset]; for (int64_t j = 0; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; float temp_val = kv_val * time_faaaa_val + prev_state_val; dst_data[t_h_j_offset] += temp_val * r_val; state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; } } } } #endif } void ggml_compute_forward_rwkv_wkv6( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_rwkv_wkv6_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_gla static void ggml_compute_forward_gla_f32( const ggml_compute_params * params, ggml_tensor * dst) { const int64_t T = dst->src[1]->ne[2]; const int64_t C = dst->ne[0]; const int64_t HEADS = dst->src[1]->ne[1]; const int64_t n_seqs = dst->src[4]->ne[1]; const int64_t head_size = C / HEADS; const float scale = ggml_get_op_params_f32(dst, 0); float * dst_data = (float *) dst->data; float * state = ((float *) dst->data) + C * T; const int ith = params->ith; const int nth = params->nth; if (ith >= HEADS) { return; } const int h_start = (HEADS * ith) / nth; const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; float * k = (float *) dst->src[0]->data; float * v = (float *) dst->src[1]->data; float * q = (float *) dst->src[2]->data; float * g = (float *) dst->src[3]->data; size_t t_stride = HEADS * head_size; // Same to C size_t h_stride = C / HEADS; GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS size_t h_stride_2d = head_size * head_size; if (ith == 0) { memset(dst_data, 0, T * C * sizeof(float)); } ggml_barrier(params->threadpool); #if defined(__AVX__) && !defined(__AVX512F__) #define GGML_F32X GGML_F32x8 #define GGML_F32X_SET1 GGML_F32x8_SET1 #define GGML_F32X_LOAD GGML_F32x8_LOAD #define GGML_F32X_STORE GGML_F32x8_STORE #define GGML_F32X_MUL GGML_F32x8_MUL #define GGML_F32X_FMA GGML_F32x8_FMA #define GLA_VECTOR_SIZE 8 #elif defined(__AVX512F__) #define GGML_F32X GGML_F32x16 #define GGML_F32X_SET1 GGML_F32x16_SET1 #define GGML_F32X_LOAD GGML_F32x16_LOAD #define GGML_F32X_STORE GGML_F32x16_STORE #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define GLA_VECTOR_SIZE 16 #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) #define GGML_F32X GGML_F32xt #define GGML_F32X_SET1 GGML_F32xt_SET1 #define GGML_F32X_LOAD GGML_F32xt_LOAD #define GGML_F32X_STORE GGML_F32xt_STORE #define GGML_F32X_MUL GGML_F32xt_MUL #define GGML_F32X_FMA GGML_F32xt_FMA #define GLA_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 #define GGML_F32X_LOAD GGML_F32x4_LOAD #define GGML_F32X_STORE GGML_F32x4_STORE #define GGML_F32X_MUL GGML_F32x4_MUL #define GGML_F32X_FMA GGML_F32x4_FMA #define GLA_VECTOR_SIZE 4 #endif #ifdef GLA_VECTOR_SIZE int gla_vector_size; #if defined(__ARM_FEATURE_SVE) gla_vector_size = svcntw(); #else gla_vector_size = GLA_VECTOR_SIZE; #endif const int64_t vec_count = head_size / gla_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; size_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[4]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { size_t h_offset = h * h_stride; size_t t_h_offset = t_offset + h_offset; size_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { size_t t_h_i_offset = t_h_offset + i; size_t h_2d_i_offset = h_2d_offset + i * h_stride; float k_val = k[t_h_i_offset]; float q_val = q[t_h_i_offset] * scale; float g_val = g[t_h_i_offset]; // Broadcast scalar values to vectors GGML_F32X k_vec = GGML_F32X_SET1(k_val); GGML_F32X q_vec = GGML_F32X_SET1(q_val); GGML_F32X g_vec = GGML_F32X_SET1(g_val); for (int64_t j = 0; j < vec_count; j++) { size_t base_j = j * gla_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; // Load x elements at once GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); // Compute kv = v * k GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); // Compute temp = prev_state * g + kv GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); // Update dst: dst += temp * q dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); // Update state GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); } // Handle remaining elements, this will not be used. for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; float temp_val = kv_val + prev_state_val * g_val; dst_data[t_h_j_offset] += temp_val * q_val; state_cur[h_2d_i_j_offset] = temp_val; } } } } #else for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; size_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[4]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { size_t h_offset = h * h_stride; size_t t_h_offset = t_offset + h_offset; size_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { size_t t_h_i_offset = t_h_offset + i; size_t h_2d_i_offset = h_2d_offset + i * h_stride; float k_val = k[t_h_i_offset]; float q_val = q[t_h_i_offset] * scale; float g_val = g[t_h_i_offset]; for (int64_t j = 0; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; float temp_val = prev_state_val * g_val + kv_val; dst_data[t_h_j_offset] += temp_val * q_val; state_cur[h_2d_i_j_offset] = temp_val; } } } } #endif } void ggml_compute_forward_gla( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_gla_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) GGML_TENSOR_BINARY_OP_LOCALS; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ne00 == ne01); // A must be square GGML_ASSERT(ne0 == ne10); // solution cols == B cols GGML_ASSERT(ne1 == ne11); // solution rows == B rows GGML_ASSERT(ne02 == ne12 && ne12 == ne2); GGML_ASSERT(ne03 == ne13 && ne13 == ne3); const int ith = params->ith; const int nth = params->nth; const int64_t k = ne10; // number of RHS columns const int64_t n = ne11; // A is n×n const int64_t nr = ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit // chunks per thread const int64_t dr = (nr + nth - 1)/nth; // chunk range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); const float * A = (const float *) src0->data; // [n, n, B1, B2] const float * B = (const float *) src1->data; // [n, k, B1, B2] float * X = ( float *) dst->data; // [n, k, B1, B2] for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*k); const int64_t i02 = (ir - i03*ne02*k)/k; const int64_t i01 = (ir - i03*ne02*k - i02*k); const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); for (int64_t i00 = 0; i00 < n; ++i00) { float sum = 0.0f; for (int64_t t = 0; t < i00; ++t) { sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; } const float diag = A_batch[i00 * n + i00]; assert(diag != 0.0f && "Zero diagonal in triangular matrix"); X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; } } } void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { ggml_compute_forward_solve_tri_f32(params, dst); } else { GGML_ABORT("fatal error"); } } // ggml_compute_forward_rwkv_wkv7 static void ggml_compute_forward_rwkv_wkv7_f32( const ggml_compute_params * params, ggml_tensor * dst) { const int64_t T = dst->src[1]->ne[2]; const int64_t C = dst->ne[0]; const int64_t HEADS = dst->src[1]->ne[1]; const int64_t n_seqs = dst->src[6]->ne[1]; const int64_t head_size = C / HEADS; float * dst_data = (float *) dst->data; float * state = ((float *) dst->data) + C * T; const int ith = params->ith; const int nth = params->nth; if (ith >= HEADS) { return; } const int h_start = (HEADS * ith) / nth; const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; float * r = (float *) dst->src[0]->data; float * w = (float *) dst->src[1]->data; float * k = (float *) dst->src[2]->data; float * v = (float *) dst->src[3]->data; float * a = (float *) dst->src[4]->data; float * b = (float *) dst->src[5]->data; int64_t t_stride = HEADS * head_size; // Same to C int64_t h_stride = C / HEADS; GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS int64_t h_stride_2d = head_size * head_size; #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) // scalar Route to scalar implementation //TODO: Write SVE code and RVV code for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; int64_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { int64_t h_offset = h * h_stride; int64_t t_h_offset = t_offset + h_offset; int64_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { int64_t t_h_i_offset = t_h_offset + i; int64_t h_2d_i_offset = h_2d_offset + i * h_stride; float v_val = v[t_h_i_offset]; float sa = 0, result = 0; for (int64_t j = 0; j < head_size; j++) { sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; } for (int64_t j = 0; j < head_size; j++) { int64_t t_h_j_offset = t_h_offset + j; int64_t h_2d_i_j_offset = h_2d_i_offset + j; float r_val = r[t_h_j_offset]; float w_val = w[t_h_j_offset]; float k_val = k[t_h_j_offset]; float b_val = b[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; result += state_cur[h_2d_i_j_offset] * r_val; } dst_data[t_h_i_offset] = result; } } } #else for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; int64_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { int64_t h_offset = h * h_stride; int64_t t_h_offset = t_offset + h_offset; int64_t h_2d_offset = h * h_stride_2d; for (int64_t ii = 0; ii < head_size; ii++) { int64_t t_h_i_offset = t_h_offset + ii; int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); float sa = 0; { GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; GGML_F32_VEC ax[GGML_F32_ARR]; GGML_F32_VEC ay[GGML_F32_ARR]; for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); } } GGML_F32_VEC_REDUCE(sa, sum); } GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); int64_t j = 0; GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; for (; j < head_size; j += GGML_F32_STEP) { for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); // kv + s * decay + sa * b state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); } } GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); // There shouldn't be left-overs though. for (; j < head_size; j++) { int64_t t_h_j_offset = t_h_offset + j; int64_t h_2d_i_j_offset = h_2d_i_offset + j; float r_val = r[t_h_j_offset]; float w_val = w[t_h_j_offset]; float k_val = k[t_h_j_offset]; float b_val = b[t_h_j_offset]; float kv_val = v[t_h_i_offset] * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; } } } } #endif #else for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; int64_t state_offset = head_size * C * (t / (T / n_seqs)); float * state_cur = state + state_offset; float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; for (int64_t h = h_start; h < h_end; h++) { int64_t h_offset = h * h_stride; int64_t t_h_offset = t_offset + h_offset; int64_t h_2d_offset = h * h_stride_2d; for (int64_t i = 0; i < head_size; i++) { int64_t t_h_i_offset = t_h_offset + i; int64_t h_2d_i_offset = h_2d_offset + i * h_stride; float v_val = v[t_h_i_offset]; float sa = 0, result = 0; for (int64_t j = 0; j < head_size; j++) { sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; } for (int64_t j = 0; j < head_size; j++) { int64_t t_h_j_offset = t_h_offset + j; int64_t h_2d_i_j_offset = h_2d_i_offset + j; float r_val = r[t_h_j_offset]; float w_val = w[t_h_j_offset]; float k_val = k[t_h_j_offset]; float b_val = b[t_h_j_offset]; float kv_val = v_val * k_val; float prev_state_val = state_prev[h_2d_i_j_offset]; state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; result += state_cur[h_2d_i_j_offset] * r_val; } dst_data[t_h_i_offset] = result; } } } #endif } void ggml_compute_forward_rwkv_wkv7( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_rwkv_wkv7_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_map_custom1 void ggml_compute_forward_map_custom1( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * a = dst->src[0]; struct ggml_map_custom1_op_params p; memcpy(&p, dst->op_params, sizeof(p)); p.fun(dst, a, params->ith, params->nth, p.userdata); } // ggml_compute_forward_map_custom2 void ggml_compute_forward_map_custom2( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * a = dst->src[0]; const ggml_tensor * b = dst->src[1]; struct ggml_map_custom2_op_params p; memcpy(&p, dst->op_params, sizeof(p)); p.fun(dst, a, b, params->ith, params->nth, p.userdata); } // ggml_compute_forward_map_custom3 void ggml_compute_forward_map_custom3( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * a = dst->src[0]; const ggml_tensor * b = dst->src[1]; const ggml_tensor * c = dst->src[2]; struct ggml_map_custom3_op_params p; memcpy(&p, dst->op_params, sizeof(p)); p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); } // ggml_compute_forward_custom void ggml_compute_forward_custom( const struct ggml_compute_params * params, struct ggml_tensor * dst) { struct ggml_custom_op_params p; memcpy(&p, dst->op_params, sizeof(p)); p.fun(dst, params->ith, params->nth, p.userdata); } // ggml_compute_forward_cross_entropy_loss static void ggml_compute_forward_cross_entropy_loss_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); GGML_ASSERT(ggml_are_same_shape(src0, src1)); GGML_ASSERT(ggml_is_scalar(dst)); GGML_ASSERT(dst->type == GGML_TYPE_F32); // TODO: handle transposed/permuted matrices const int64_t nc = src0->ne[0]; const int64_t nr = ggml_nrows(src0); const int ith = params->ith; const int nth = params->nth; float * sums = (float *) params->wdata; float * st = ((float *) params->wdata) + nth + ith*nc; float sum_thread = 0.0f; GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); for (int64_t i1 = ir0; i1 < ir1; ++i1) { const float * s0 = (const float *)((const char *) src0->data + i1*src0->nb[1]); const float * s1 = (const float *)((const char *) src1->data + i1*src1->nb[1]); #ifndef NDEBUG for (int64_t i = 0; i < nc; ++i) { //printf("p[%d] = %f\n", i, p[i]); assert(!isnan(s0[i])); assert(!isnan(s1[i])); } #endif float max = -INFINITY; ggml_vec_max_f32(nc, &max, s0); const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); assert(sum_softmax >= 0.0); ggml_vec_add1_f32(nc, st, st, -sum_softmax); ggml_vec_mul_f32(nc, st, st, s1); float sum_st = 0.0f; ggml_vec_sum_f32(nc, &sum_st, st); sum_thread += sum_st; #ifndef NDEBUG for (int64_t i = 0; i < nc; ++i) { assert(!isnan(st[i])); assert(!isinf(st[i])); } #endif } sums[ith] = sum_thread; ggml_barrier(params->threadpool); if (ith == 0) { float * dp = (float *) dst->data; ggml_vec_sum_f32(nth, dp, sums); dp[0] *= -1.0f / (float) nr; } } void ggml_compute_forward_cross_entropy_loss( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_cross_entropy_loss_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } // ggml_compute_forward_cross_entropy_loss_back static void ggml_compute_forward_cross_entropy_loss_back_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_is_contiguous(src0f)); GGML_ASSERT(ggml_is_contiguous(src1f)); GGML_ASSERT(ggml_is_contiguous(grad)); GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); const int64_t ith = params->ith; const int64_t nth = params->nth; // TODO: handle transposed/permuted matrices const int64_t nc = src0f->ne[0]; const int64_t nr = ggml_nrows(src0f); // rows per thread const int64_t dr = (nr + nth - 1)/nth; // row range for this thread const int64_t ir0 = dr*ith; const int64_t ir1 = MIN(ir0 + dr, nr); const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; for (int64_t i1 = ir0; i1 < ir1; i1++) { float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]); const float * s0 = (const float *)((const char *) src0f->data + i1*src0f->nb[1]); const float * s1 = (const float *)((const char *) src1f->data + i1*src1f->nb[1]); #ifndef NDEBUG for (int64_t i = 0; i < nc; ++i) { //printf("p[%d] = %f\n", i, p[i]); assert(!isnan(s0[i])); assert(!isnan(s1[i])); } #endif // soft_max float max = -INFINITY; ggml_vec_max_f32(nc, &max, s0); const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); assert(sum > 0.0); ggml_vec_scale_f32(nc, ds0, 1.0/sum); // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr ggml_vec_sub_f32(nc, ds0, ds0, s1); ggml_vec_scale_f32(nc, ds0, d_by_nr); #ifndef NDEBUG for (int64_t i = 0; i < nc; ++i) { assert(!isnan(ds0[i])); assert(!isinf(ds0[i])); } #endif } } void ggml_compute_forward_cross_entropy_loss_back( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_compute_forward_opt_step_adamw_f32( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src0_grad = dst->src[1]; const ggml_tensor * src0_grad_m = dst->src[2]; const ggml_tensor * src0_grad_v = dst->src[3]; const ggml_tensor * adamw_params = dst->src[4]; GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); GGML_ASSERT(ggml_nelements(adamw_params) == 7); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(nb00 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; // row range for this thread const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); const float alpha = adamw_params_ptr[0]; const float beta1 = adamw_params_ptr[1]; const float beta2 = adamw_params_ptr[2]; const float eps = adamw_params_ptr[3]; const float wd = adamw_params_ptr[4]; const float beta1h = adamw_params_ptr[5]; const float beta2h = adamw_params_ptr[6]; const float keep = 1.f - alpha * wd; for (int ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); const size_t offset = i03*nb03 + i02*nb02 + i01*nb01; float * w = (float *) ((char *) src0->data + offset); // weight const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad float * m = (float *) ((char *) src0_grad_m->data + offset); float * v = (float *) ((char *) src0_grad_v->data + offset); for (int i00 = 0; i00 < ne00; ++i00) { m[i00] = m[i00]*beta1 + g[i00]*(1.0f - beta1); v[i00] = v[i00]*beta2 + g[i00]*g[i00]*(1.0f - beta2); const float mh = m[i00]*beta1h; const float vh = sqrtf(v[i00]*beta2h) + eps; // The weight decay is applied independently of the Adam momenta m and v. // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. // See: https://arxiv.org/pdf/1711.05101v3.pdf w[i00] = w[i00] * keep - alpha * mh / vh; } } } void ggml_compute_forward_opt_step_adamw( const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_opt_step_adamw_f32(params, dst); } break; default: { GGML_ABORT("fatal error"); } } } static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src0_grad = dst->src[1]; const ggml_tensor * sgd_params = dst->src[2]; GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); GGML_ASSERT(ggml_nelements(sgd_params) == 2); const int ith = params->ith; const int nth = params->nth; const int nr = ggml_nrows(src0); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT(nb00 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1) / nth; // row range for this thread const int ir0 = dr * ith; const int ir1 = MIN(ir0 + dr, nr); // using adamw param subset we care about - alpha, wd - could have a separate struct const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); const float alpha = sgd_params_ptr[0]; const float keep = 1.f - alpha * sgd_params_ptr[1]; for (int ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir / (ne02 * ne01); const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; float * w = (float *) ((char *) src0->data + offset); // weight const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad for (int i00 = 0; i00 < ne00; ++i00) { w[i00] = w[i00] * keep - alpha * g[i00]; } } } void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; switch (src0->type) { case GGML_TYPE_F32: { ggml_compute_forward_opt_step_sgd_f32(params, dst); } break; default: { GGML_ABORT("fatal error - sgd is F32 only"); } } } ggml-org-ggml-3678254/src/ggml-cpu/ops.h000066400000000000000000000217461512524704700176520ustar00rootroot00000000000000#pragma once #include "ggml.h" // // cache line // #if defined(__cpp_lib_hardware_interference_size) #define CACHE_LINE_SIZE std::hardware_destructive_interference_size #else #if defined(__POWER9_VECTOR__) #define CACHE_LINE_SIZE 128 #elif defined(__VXE__) || defined(__VXE2__) #define CACHE_LINE_SIZE 256 #else #define CACHE_LINE_SIZE 64 #endif #endif static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); // Work buffer size for im2col operations in CONV2D #define GGML_IM2COL_WORK_SIZE (16 * 1024 * 1024) #ifdef __cplusplus extern "C" { #endif void ggml_compute_forward_dup(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add_id(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sum_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cumsum(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_mean(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argmax(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_count_equal(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_repeat(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_repeat_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_concat(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_silu_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rms_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rms_norm_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_group_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_l2_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_out_prod(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_scale(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_set(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cpy(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cont(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_inf(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_zero(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_soft_max(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_soft_max_ext_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rope(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rope_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_clamp(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_transpose_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_im2col(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_im2col_back_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_im2col_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_transpose_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_2d_dw(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pool_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pool_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pool_2d_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_upscale(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad_reflect_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_roll(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_arange(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_top_k(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_fill(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_flash_attn_back( const struct ggml_compute_params * params, const bool masked, struct ggml_tensor * dst); void ggml_compute_forward_ssm_conv(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_ssm_scan(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_win_part(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_win_unpart(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_unary(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_glu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv7(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_gla(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_map_custom1(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_map_custom2(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_map_custom3(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cross_entropy_loss(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cross_entropy_loss_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_opt_step_adamw(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_opt_step_sgd(const struct ggml_compute_params * params, struct ggml_tensor * dst); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/quants.c000066400000000000000000001164651512524704700203620ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "ggml-quants.h" #include "quants.h" #include "arch-fallback.h" #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q4_0_ref(x, y, k); } void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q4_1_ref(x, y, k); } void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q5_0_ref(x, y, k); } void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q5_1_ref(x, y, k); } void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_0_ref(x, y, k); } void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_1_ref(x, y, k); } void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_mxfp4_ref(x, y, k); } // // 2-6 bit quantization in super-blocks // //========================- 2-bit (de)-quantization void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { quantize_row_q2_K_ref(x, vy, k); } //========================= 3-bit (de)-quantization void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { quantize_row_q3_K_ref(x, vy, k); } // ====================== 4-bit (de)-quantization void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK_K == 0); block_q4_K * GGML_RESTRICT y = vy; quantize_row_q4_K_ref(x, y, k); } // ====================== 5-bit (de)-quantization void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK_K == 0); block_q5_K * GGML_RESTRICT y = vy; quantize_row_q5_K_ref(x, y, k); } // ====================== 6-bit (de)-quantization void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK_K == 0); block_q6_K * GGML_RESTRICT y = vy; quantize_row_q6_K_ref(x, y, k); } // ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK_K == 0); block_tq1_0 * GGML_RESTRICT y = vy; quantize_row_tq1_0_ref(x, y, k); } void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(k % QK_K == 0); block_tq2_0 * GGML_RESTRICT y = vy; quantize_row_tq2_0_ref(x, y, k); } //===================================== Q8_K ============================================== void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_K_ref(x, y, k); } //===================================== Dot products ================================= void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F) - 8; const int v1 = (x[ib].qs[j] >> 4) - 8; sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; } // TODO: add WASM SIMD void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; for (; ib < nb; ++ib) { int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const int v0 = (x[ib].qs[j] & 0x0F); const int v1 = (x[ib].qs[j] >> 4); sumi0 += (v0 * y[ib].qs[j]); sumi1 += (v1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_MXFP4 == 0); static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); const block_mxfp4 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK_MXFP4; int ib = 0; float sumf = 0; for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); int sumi1 = 0; int sumi2 = 0; for (int j = 0; j < QK_MXFP4/2; ++j) { sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; } void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; const int nb = n / qk; int ib = 0; float sumf = 0; assert(n % qk == 0); assert(qk == QK5_1); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_1 * GGML_RESTRICT x = vx; const block_q8_1 * GGML_RESTRICT y = vy; for (; ib < nb; ++ib) { uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); int sumi0 = 0; int sumi1 = 0; for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; sumi0 += (x0 * y[ib].qs[j]); sumi1 += (x1 * y[ib].qs[j + qk/2]); } int sumi = sumi0 + sumi1; sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; } void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; assert(n % qk == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q8_0 * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; int ib = 0; float sumf = 0; for (; ib < nb; ++ib) { int sumi = 0; for (int j = 0; j < qk; j++) { sumi += x[ib].qs[j]*y[ib].qs[j]; } sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; } void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq1_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; float sumf = 0.0f; for (int i = 0; i < nb; ++i) { int sum = 0; for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { for (size_t l = 0; l < 5; ++l) { for (size_t m = 0; m < 32; ++m) { uint8_t q = x[i].qs[j + m] * pow3[l]; uint16_t xi = ((uint16_t) q * 3) >> 8; sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; } } } for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { for (size_t l = 0; l < 5; ++l) { for (size_t m = 0; m < 16; ++m) { uint8_t q = x[i].qs[j + m] * pow3[l]; uint16_t xi = ((uint16_t) q * 3) >> 8; sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; } } } for (size_t l = 0; l < 4; ++l) { for (size_t j = 0; j < sizeof(x->qh); ++j) { uint8_t q = x[i].qh[j] * pow3[l]; uint16_t xi = ((uint16_t) q * 3) >> 8; sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; } } sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d); } *s = sumf; } void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_tq2_0 * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0.0f; for (int i = 0; i < nb; ++i) { int32_t sumi = 0; for (size_t j = 0; j < sizeof(x->qs); j += 32) { for (size_t l = 0; l < 4; ++l) { for (size_t k = 0; k < 32; ++k) { sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); } } } const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); sumf += (float) sumi * d; } *s = sumf; } void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q2_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; int summs = 0; for (int j = 0; j < 16; ++j) { summs += y[i].bsums[j] * (sc[j] >> 4); } const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; int d; for (int k = 0; k < QK_K/128; ++k) { int shift = 0; for (int j = 0; j < 4; ++j) { d = sc[is++] & 0xF; int isuml = 0; for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; d = sc[is++] & 0xF; isuml = 0; for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; shift += 2; q8 += 32; } q2 += 32; } sumf += dall * isum - dmin * summs; } *s = sumf; } void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; // scalar version // This function is written like this so the compiler can manage to vectorize most of it // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the // manually vectorized version above. Every other version I tried would run at least 4 times slower. // The ideal situation would be if we could just write the code once, and the compiler would // automatically produce the best possible set of machine instructions, instead of us having to manually // write vectorized versions for AVX, ARM_NEON, etc. int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); uint32_t auxs[4]; const int8_t * scales = (const int8_t*)auxs; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].hmask; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; q3 += 32; } a = aux8; memcpy(auxs, x[i].scales, 12); uint32_t tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); for (int j = 0; j < QK_K/16; ++j) { for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q4_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; for (int j = 0; j < QK_K/64; ++j) { for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); a += 32; for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); a += 32; q4 += 32; } memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; int sumi = 0; for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; a = aux8; int is = 0; for (int j = 0; j < QK_K/32; ++j) { int32_t scale = scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q5_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; const uint8_t * scales = (const uint8_t*)&utmp[0]; const uint8_t * mins = (const uint8_t*)&utmp[2]; int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K/64; ++j) { for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); a += 32; m <<= 1; q4 += 32; } memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); const uint32_t uaux = utmp[1] & kmask1; utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); utmp[2] = uaux; utmp[0] &= kmask1; int sumi = 0; for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; a = aux8; int is = 0; for (int j = 0; j < QK_K/32; ++j) { int32_t scale = scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_q6_K * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * GGML_RESTRICT a = aux8; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; } a += 128; q4 += 64; qh += 32; } a = aux8; int is = 0; for (int j = 0; j < QK_K/16; ++j) { int scale = x[i].scales[is++]; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; uint32_t aux32[2]; const uint8_t * aux8 = (const uint8_t *)aux32; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(aux32, q2, 2*sizeof(uint32_t)); q2 += 4; const uint32_t ls = 2*(aux32[1] >> 28) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls; } sumf += d * bsum; } *s = 0.125f * sumf; } void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; int32_t sumi = 0; for (int l = 0; l < 2; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls1; sumi = 0; for (int l = 2; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; for (int j = 0; j < 8; ++j) { sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += sumi * ls2; q2 += 4; } sumf += d * bsum; } *s = 0.125f * sumf; } void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq2_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0; for (int i = 0; i < nb; i++) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint8_t * signs = qs + QK_K/8; int bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); int sumi1 = 0, sumi2 = 0; for (int l = 0; l < 2; ++l) { const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); for (int j = 0; j < 8; ++j) { sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } for (int l = 2; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); for (int j = 0; j < 8; ++j) { sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); } q8 += 8; } bsum += ls1 * sumi1 + ls2 * sumi2; qs += 4; signs += 4; } sumf += d * bsum; } *s = 0.125f * sumf; } void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_xxs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; uint32_t aux32; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); const uint32_t ls = 2*(aux32 >> 28) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } q3 += 8; bsum += sumi * ls; } sumf += d * bsum; } *s = 0.25f * sumf; } void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq3_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0.f; for (int i = 0; i < nb; ++i) { const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; int32_t sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } qs += 8; signs += 4; bsum += sumi * ls1; sumi = 0; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); } q8 += 8; } qs += 8; signs += 4; bsum += sumi * ls2; } sumf += d * bsum; } *s = sumf; } void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_s * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0; for (int i = 0; i < nb; i++) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; int sumi = 0, sumi1 = 0; for (int ib = 0; ib < QK_K/32; ++ib) { const int ls = 2*((qh[ib] >> 12) & 7) + 1; const int delta = qh[ib] & 0x8000 ? -1 : 1; int lsum = 0; for (int l = 0; l < 4; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); for (int j = 0; j < 8; ++j) { lsum += q8[j] * grid[j]; } q8 += 8; } sumi += ls * lsum; sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); qs += 4; } sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; } void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); const block_iq1_m * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; iq1m_scale_t scale; int sum1[2], sum2[2], delta[4]; float sumf = 0; for (int i = 0; i < nb; i++) { const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); int sumi1 = 0, sumi2 = 0; for (int ib = 0; ib < QK_K/32; ++ib) { delta[0] = qh[0] & 0x08 ? -1 : 1; delta[1] = qh[0] & 0x80 ? -1 : 1; delta[2] = qh[1] & 0x08 ? -1 : 1; delta[3] = qh[1] & 0x80 ? -1 : 1; sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; for (int l = 0; l < 4; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); int lsum1 = 0, lsum2 = 0; for (int j = 0; j < 8; ++j) { lsum1 += q8[j] * grid[j]; lsum2 += q8[j]; } q8 += 8; sum1[l/2] += lsum1; sum2[l/2] += lsum2*delta[l]; } const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; sumi1 += sum1[0] * ls1 + sum1[1] * ls2; sumi2 += sum2[0] * ls1 + sum2[1] * ls2; qs += 4; qh += 2; } sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); } *s = sumf; } void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK4_NL == 0); static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); const block_iq4_nl * GGML_RESTRICT x = vx; const block_q8_0 * GGML_RESTRICT y = vy; const int nb = n / QK4_NL; int ib = 0; float sumf = 0; for (; ib < nb; ++ib) { const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; } sumf += d * (sumi1 + sumi2); } *s = sumf; } void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); UNUSED(nrc); UNUSED(bx); UNUSED(by); UNUSED(bs); assert(n % QK_K == 0); const block_iq4_xs * GGML_RESTRICT x = vx; const block_q8_K * GGML_RESTRICT y = vy; const int nb = n / QK_K; float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; for (int ib = 0; ib < QK_K/32; ib += 2) { const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); h >>= 4; const float d1 = d4d8*(ls1 - 32); const float d2 = d4d8*(ls2 - 32); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < 16; ++j) { sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; } sumf += d1 * (sumi1 + sumi2); qs += 16; q8 += 32; sumi1 = sumi2 = 0; for (int j = 0; j < 16; ++j) { sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; } sumf += d2 * (sumi1 + sumi2); qs += 16; q8 += 32; } } *s = sumf; } // ============================ 4-bit non-linear quants void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { assert(k % QK4_NL == 0); quantize_row_iq4_nl_ref(x, y, k); } void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); quantize_iq4_xs(x, y, 1, k, NULL); } ggml-org-ggml-3678254/src/ggml-cpu/quants.h000066400000000000000000000225471512524704700203640ustar00rootroot00000000000000#pragma once #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "ggml.h" // GGML CPU internal header #ifdef __cplusplus extern "C" { #endif // Quantization void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); // Dot product void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); // Generic implementation void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/repack.cpp000066400000000000000000003166341512524704700206540ustar00rootroot00000000000000#define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "ggml-backend-impl.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" #include "simd-mappings.h" #include "traits.h" #include "arch-fallback.h" #include #include #include #include // for GGML_ASSERT #include "repack.h" #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #endif #define UNUSED GGML_UNUSED static inline int nearest_int(float fval) { assert(fabsf(fval) <= 4194303.f); float val = fval + 12582912.f; int i; memcpy(&i, &val, sizeof(int)); return (i & 0x007fffff) - 0x00400000; } // Functions to create the interleaved data layout formats // interleave 4 block_q4_0s in blocks of blck_size_interleave // returns an interleaved block_q4_0x4 // in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks // first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave // // - in : an array of block_q4_0 pointers // - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of // blck_size_interleave bytes // - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes // from bias offset form to pure sign form (this saves subtract // operations durin unpacking) // extern "C" { void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; // scalar const int blck_size_interleave = 4; float srcv[4][QK8_0]; float id[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; amax = MAX(amax, fabsf(srcv[row_iter][j])); } const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; src_offset += (j % blck_size_interleave); float x0 = srcv[src_id][src_offset] * id[src_id]; y[i].qs[j] = roundf(x0); } } } void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; // scalar const int blck_size_interleave = 8; float srcv[4][QK8_0]; float id[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; amax = MAX(amax, fabsf(srcv[row_iter][j])); } const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; src_offset += (j % blck_size_interleave); float x0 = srcv[src_id][src_offset] * id[src_id]; y[i].qs[j] = roundf(x0); } } } void ggml_quantize_mat_q8_K_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); assert(k % QK_K == 0); const int nb = k / QK_K; block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; // scalar const int blck_size_interleave = 4; float srcv[4][QK_K]; float iscale[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { float amax = 0.0f; // absolute max float max = 0; for (int j = 0; j < QK_K; j++) { srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; // Update the maximum value of the corresponding super block if(amax < fabsf(srcv[row_iter][j])) { amax = fabsf(srcv[row_iter][j]); max = srcv[row_iter][j]; } } iscale[row_iter] = amax ? -127.f/max : 0; y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; } for (int j = 0; j < QK_K / 4; j++) { y[i].bsums[j] = 0; } // Quants values are interleaved in sequence of four bytes from corresponding super blocks // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on for (int j = 0; j < QK_K * 4; j++) { int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; src_offset += (j % blck_size_interleave); int index = (((j & 15) >> 2) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); float x0 = srcv[src_id][src_offset] * iscale[src_id]; y[i].qs[j] = nearest_int(x0); y[i].bsums[index] += y[i].qs[j]; } } } void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); assert(k % QK_K == 0); const int nb = k / QK_K; block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; // scalar const int blck_size_interleave = 8; float srcv[4][QK_K]; float iscale[4]; for (int i = 0; i < nb; i++) { for (int row_iter = 0; row_iter < 4; row_iter++) { float amax = 0.0f; // absolute max float max = 0; for (int j = 0; j < QK_K; j++) { srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; // Update the maximum value of the corresponding super block if(amax < fabsf(srcv[row_iter][j])) { amax = fabsf(srcv[row_iter][j]); max = srcv[row_iter][j]; } } iscale[row_iter] = amax ? -127.f/max : 0; y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; } for (int j = 0; j < QK_K / 4; j++) { y[i].bsums[j] = 0; } // Quants values are interleaved in sequence of eight bytes from corresponding super blocks // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on for (int j = 0; j < QK_K * 4; j++) { int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; src_offset += (j % blck_size_interleave); int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); float x0 = srcv[src_id][src_offset] * iscale[src_id]; y[i].qs[j] = nearest_int(x0); y[i].bsums[index] += y[i].qs[j]; } } } } // extern "C" template void ggml_quantize_mat_t(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row); template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { assert(nrow == 4); UNUSED(nrow); ggml_quantize_mat_q8_0_4x4(x, vy, n_per_row); } template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { assert(nrow == 4); UNUSED(nrow); ggml_quantize_mat_q8_0_4x8(x, vy, n_per_row); } template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { assert(nrow == 4); UNUSED(nrow); ggml_quantize_mat_q8_K_4x4(x, vy, n_per_row); } template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { assert(nrow == 4); UNUSED(nrow); ggml_quantize_mat_q8_K_4x8(x, vy, n_per_row); } extern "C" { void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(nr == 1); assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[8]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } void ggml_gemv_q4_K_8x4_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 4; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(bs); UNUSED(nr); float sumf[8]; float sum_minf[8]; uint32_t utmp[32]; int sumi1; int sumi2; int sumi; const block_q8_K * a_ptr = (const block_q8_K *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) { sumf[j] = 0.0; sum_minf[j] = 0.0; } for (int l = 0; l < nb; l++) { for (int sb = 0; sb < 8; sb++) { memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); utmp[sb * 4 + 2] = uaux_0; utmp[sb * 4 + 0] &= kmask1; } for (int k = 0; k < (qk / (2 * blocklen)); k++) { uint8_t * scales_0 = (uint8_t *) utmp + (k / 8) * 32; uint8_t * scales_1 = (uint8_t *) utmp + (k / 8) * 32 + 16; for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); sumi1 = (v0 * a_ptr[l].qs[(k / 8) * 64 + (k % 8) * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k / 8) * 64 + (k % 8) * blocklen + i + 32]); sumi1 = sumi1 * scales_0[j]; sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; } } for (int sb = 0; sb < 8; sb++) { uint8_t * mins = (uint8_t *) utmp + 8 + sb * 16; for (int j = 0; j < ncols_interleaved; j++) { sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; } } } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; } } } void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[8]; float sum_minf[8]; uint32_t utmp[32]; int sumi1; int sumi2; int sumi; const block_q8_K * a_ptr = (const block_q8_K *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) { sumf[j] = 0.0; sum_minf[j] = 0.0; } for (int l = 0; l < nb; l++) { for (int sb = 0; sb < 8; sb++) { memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); utmp[sb * 4 + 2] = uaux_0; utmp[sb * 4 + 0] &= kmask1; } for (int k = 0; k < (qk / (2 * blocklen)); k++) { uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]); sumi1 = sumi1 * scales_0[j]; sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; } } for (int sb = 0; sb < 8; sb++) { uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; for (int j = 0; j < ncols_interleaved; j++) { sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; } } } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; } } } void ggml_gemv_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[8]; float sum_minf[8]; int sumi1,sumi2,sumi3,sumi4; int sumi; const block_q8_K * a_ptr = (const block_q8_K *)vy; for(int x = 0; x < nc / ncols_interleaved; x++) { const block_q2_Kx8 * b_ptr = (const block_q2_Kx8 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) { sumf[j] = 0.0; sum_minf[j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (4 * blocklen)); k++) { const uint8_t *scales_0 = b_ptr[l].scales + (k / 4) * 64 ; const uint8_t *scales_1 = b_ptr[l].scales + (k / 4) * 64 + 16; const uint8_t *scales_2 = b_ptr[l].scales + (k / 4) * 64 + 32; const uint8_t *scales_3 = b_ptr[l].scales + (k / 4) * 64 + 48; for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi3 = 0; sumi4 = 0; sumi = 0; int offset = ((k / 2) % 2) + j * 2; for (int i = 0; i < blocklen; ++i){ const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 3); const int v1 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 2 ) & 3); const int v2 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4 ) & 3); const int v3 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 6 ) & 3); sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 32]); sumi3 = (v2 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 64]); sumi4 = (v3 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 96]); sumi1 = sumi1 * (scales_0[offset] & 0xF); sumi2 = sumi2 * (scales_1[offset] & 0xF); sumi3 = sumi3 * (scales_2[offset] & 0xF); sumi4 = sumi4 * (scales_3[offset] & 0xF); sumi += sumi1 + sumi2 + sumi3 + sumi4; } sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; } } for(int sb = 0; sb < 8; sb++) { const uint8_t *mins = b_ptr[l].scales + sb * 16; for(int j = 0; j < ncols_interleaved; j++){ sum_minf[j] += ((mins[j * 2] >> 4) * a_ptr[l].bsums[sb * 2] + (mins[(j * 2)+ 1] >> 4) * a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; } } } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; } } } void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(nr == 1); assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(bs); UNUSED(nr); float sumf[4]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } void ggml_gemv_iq4_nl_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert(nr == 1); assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(bs); UNUSED(nr); float sumf[8]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx8 * b_ptr = (const block_iq4_nlx8 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } void ggml_gemv_q8_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(nr == 1); assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(bs); UNUSED(nr); float sumf[4]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) { sumf[j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / blocklen); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i]; sumi += v0 * a_ptr[l].qs[k * blocklen + i]; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j]; } } } void ggml_gemv_q8_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert(nr == 1); assert(n % qk == 0); assert(nc % ncols_interleaved == 0); UNUSED(bs); UNUSED(nr); float sumf[4]; int sumi; const block_q8_0 * a_ptr = (const block_q8_0 *) vy; for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb); for (int j = 0; j < ncols_interleaved; j++) { sumf[j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / blocklen); k++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i]; sumi += v0 * a_ptr[l].qs[k * blocklen + i]; } sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } for (int j = 0; j < ncols_interleaved; j++) { s[x * ncols_interleaved + j] = sumf[j]; } } } void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); { float sumf[4][4]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } } void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4][4]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4][8]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } void ggml_gemm_q4_K_8x4_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 4; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4][8]; float sum_minf[4][8]; uint32_t utmp[32]; int sumi1; int sumi2; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumf[m][j] = 0.0; sum_minf[m][j] = 0.0; } } for (int l = 0; l < nb; l++) { for (int sb = 0; sb < 8; sb++) { memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); utmp[sb * 4 + 2] = uaux_0; utmp[sb * 4 + 0] &= kmask1; } for (int k = 0; k < (qk / (2 * blocklen)); k++) { uint8_t * scales_0 = (uint8_t *) utmp + (k / 8) * 32; uint8_t * scales_1 = (uint8_t *) utmp + (k / 8) * 32 + 16; for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); sumi1 = (v0 * a_ptr[l].qs[(k / 8) * 256 + (k % 8) * 4 * blocklen + m * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k / 8) * 256 + (k % 8) * 4 * blocklen + m * blocklen + i + 128]); sumi1 = sumi1 * scales_0[j]; sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; } } } for (int sb = 0; sb < 8; sb++) { uint8_t * mins = (uint8_t *) utmp + 8 + sb * 16; for(int m = 0; m < 4; m++) { const int16_t * bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); for(int j = 0; j < ncols_interleaved; j++) { sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; } } } } } void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; static const uint32_t kmask1 = 0x3f3f3f3f; static const uint32_t kmask2 = 0x0f0f0f0f; static const uint32_t kmask3 = 0x03030303; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4][8]; float sum_minf[4][8]; uint32_t utmp[32]; int sumi1; int sumi2; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumf[m][j] = 0.0; sum_minf[m][j] = 0.0; } } for (int l = 0; l < nb; l++) { for (int sb = 0; sb < 8; sb++) { memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); utmp[sb * 4 + 2] = uaux_0; utmp[sb * 4 + 0] &= kmask1; } for (int k = 0; k < (qk / (2 * blocklen)); k++) { uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); sumi1 = sumi1 * scales_0[j]; sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; } } } for (int sb = 0; sb < 8; sb++) { uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; for(int m = 0; m < 4; m++) { const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); for(int j = 0; j < ncols_interleaved; j++) { sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; } } } } } void ggml_gemm_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); float sumf[4][8]; float sum_minf[4][8]; int sumi1, sumi2, sumi3, sumi4; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q2_Kx8 * b_ptr = (const block_q2_Kx8 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumf[m][j] = 0.0; sum_minf[m][j] = 0.0; } } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (4 * blocklen)); k++) { const uint8_t *scales_0 = b_ptr[l].scales + (k / 4) * 64 ; const uint8_t *scales_1 = b_ptr[l].scales + (k / 4) * 64 + 16; const uint8_t *scales_2 = b_ptr[l].scales + (k / 4) * 64 + 32; const uint8_t *scales_3 = b_ptr[l].scales + (k / 4) * 64 + 48; for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi1 = 0; sumi2 = 0; sumi3 = 0; sumi4 = 0; sumi = 0; int offset = ((k / 2) % 2) + j * 2; for (int i = 0; i < blocklen; ++i){ const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 3); const int v1 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 2 ) & 3); const int v2 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4 ) & 3); const int v3 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 6 ) & 3); sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i]); sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); sumi3 = (v2 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 256]); sumi4 = (v3 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 384]); sumi1 = sumi1 * (scales_0[offset] & 0xF); sumi2 = sumi2 * (scales_1[offset] & 0xF); sumi3 = sumi3 * (scales_2[offset] & 0xF); sumi4 = sumi4 * (scales_3[offset] & 0xF); sumi += sumi1 + sumi2 + sumi3 + sumi4; } sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; } } } for(int sb = 0; sb < 8; sb++) { const uint8_t *mins = b_ptr[l].scales + sb * 16; for(int m = 0; m < 4; m++) { const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); for(int j = 0; j < ncols_interleaved; j++) { int mins_prod = ((mins[j * 2] >> 4) * bsums[0] + (mins[(j * 2)+ 1] >> 4) * bsums[1]); sum_minf[m][j] += (mins_prod) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; } } } } } void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert (n % qk == 0); assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); UNUSED(bs); UNUSED(vx); UNUSED(vy); UNUSED(nr); UNUSED(nc); UNUSED(nb); UNUSED(ncols_interleaved); UNUSED(blocklen); { float sumf[4][4]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } } void ggml_gemm_iq4_nl_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; const int blocklen = 8; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); float sumf[4][8]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_iq4_nlx8 * b_ptr = (const block_iq4_nlx8 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / (2 * blocklen)); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } void ggml_gemm_q8_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 4; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); float sumf[4][4]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumf[m][j] = 0.0; } } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / blocklen); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i]; sumi += v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } } void ggml_gemm_q8_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; const int blocklen = 8; assert(n % qk == 0); assert(nr % 4 == 0); assert(nc % ncols_interleaved == 0); float sumf[4][4]; int sumi; for (int y = 0; y < nr / 4; y++) { const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); for (int x = 0; x < nc / ncols_interleaved; x++) { const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb); for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumf[m][j] = 0.0; } } for (int l = 0; l < nb; l++) { for (int k = 0; k < (qk / blocklen); k++) { for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { sumi = 0; for (int i = 0; i < blocklen; ++i) { const int v0 = b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i]; sumi += v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]; } sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } } for (int m = 0; m < 4; m++) { for (int j = 0; j < ncols_interleaved; j++) { s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; } } } } } } // extern "C" static block_q8_0x4 make_block_q8_0x4(block_q8_0 * in, unsigned int blck_size_interleave) { block_q8_0x4 out; for (int i = 0; i < 4; i++) { out.d[i] = in[i].d; } const int end = QK8_0 * 4 / blck_size_interleave; for (int i = 0; i < end; ++i) { int src_id = i % 4; int src_offset = (i / 4) * blck_size_interleave; int dst_offset = i * blck_size_interleave; memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], blck_size_interleave); } return out; } static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { block_q4_0x4 out; for (int i = 0; i < 4; i++) { out.d[i] = in[i].d; } const int end = QK4_0 * 2 / blck_size_interleave; if (blck_size_interleave == 8) { const uint64_t xor_mask = 0x8888888888888888ULL; for (int i = 0; i < end; ++i) { int src_id = i % 4; int src_offset = (i / 4) * blck_size_interleave; int dst_offset = i * blck_size_interleave; uint64_t elems; // Using memcpy to avoid unaligned memory accesses memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); elems ^= xor_mask; memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); } } else if (blck_size_interleave == 4) { const uint32_t xor_mask = 0x88888888; for (int i = 0; i < end; ++i) { int src_id = i % 4; int src_offset = (i / 4) * blck_size_interleave; int dst_offset = i * blck_size_interleave; uint32_t elems; memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); elems ^= xor_mask; memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); } } else { GGML_ASSERT(false); } return out; } // interleave 8 block_q4_0s in blocks of blck_size_interleave // returns an interleaved block_q4_0x8 // in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks // first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { block_q4_0x8 out; for (int i = 0; i < 8; i++) { out.d[i] = in[i].d; } const int end = QK4_0 * 4 / blck_size_interleave; const uint64_t xor_mask = 0x8888888888888888ULL; for (int i = 0; i < end; ++i) { int src_id = i % 8; int src_offset = (i / 8) * blck_size_interleave; int dst_offset = i * blck_size_interleave; uint64_t elems; memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); elems ^= xor_mask; memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); } return out; } static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { block_q4_Kx8 out; //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure for (int i = 0; i < 8; i++) { out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; } for (int i = 0; i < 8; i++) { out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; } const int end = QK_K * 4 / blck_size_interleave; // Interleave Q4_K quants by taking 8 bytes at a time for (int i = 0; i < end; ++i) { int src_id = i % 8; int src_offset = (i / 8) * blck_size_interleave; int dst_offset = i * blck_size_interleave; uint64_t elems; memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); } // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) // The output Q4_Kx8 structure has 96 bytes // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures uint8_t s[8], m[8]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { s[j] = in[j].scales[i] & 63; m[j] = in[j].scales[i + 4] & 63; } out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); } for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); } out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); } return out; } static block_q2_Kx8 make_block_q2_Kx8(block_q2_K * in, unsigned int blck_size_interleave) { block_q2_Kx8 out; // Delta(scale) and dmin values of the eight Q2_K structures are copied onto the output interleaved structure for (int i = 0; i < 8; i++) { out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; } for (int i = 0; i < 8; i++) { out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; } const int end = QK_K * 2 / blck_size_interleave; // Interleave Q2_K quants by taking 8 bytes at a time for (int i = 0; i < end; ++i) { int src_id = i % 8; int src_offset = (i / 8) * blck_size_interleave; int dst_offset = i * blck_size_interleave; uint64_t elems; memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); } // The below logic is designed so as to unpack and rearrange scales and mins values in Q2_K // Currently the Q2_K structure has 16 scales and 16 mins packed in 16 bytes ( 4 bits for each value) // The output Q2_Kx8 structure has 128 bytes for storing scales and mins // Every 16 byte is packed such that it contains scales and mins for corresponding sub blocks from Q2_K structure // For eg - First 16 bytes contains 16 scales and 16 mins - each of first and second sub blocks from different Q2_K structures for(int i = 0; i < 128; i++){ // Index for selecting which q2k super block int src1 = (i % 16) / 2; // Index for selecting scale int src2 = ((i / 16) * 2) + (i % 2); out.scales[i] = in[src1].scales[src2]; } return out; } static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 4 || interleave_block == 8); constexpr int nrows_interleaved = 4; block_q4_0x4 * dst = (block_q4_0x4 *)t->data; const block_q4_0 * src = (const block_q4_0 *)data; block_q4_0 dst_tmp[4]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q4_0x4(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_K); GGML_ASSERT(interleave_block == 8 || interleave_block == 4); constexpr int nrows_interleaved = 8; block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; const block_q4_K * src = (const block_q4_K*) data; block_q4_K dst_tmp[8]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK_K; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++ ) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static int repack_q2_K_to_q2_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q2_K); GGML_ASSERT(interleave_block == 8); constexpr int nrows_interleaved = 8; block_q2_Kx8 * dst = (block_q2_Kx8*)t->data; const block_q2_K * src = (const block_q2_K*) data; block_q2_K dst_tmp[8]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK_K; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q2_K)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++ ) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q2_Kx8(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 8); constexpr int nrows_interleaved = 8; block_q4_0x8 * dst = (block_q4_0x8*)t->data; const block_q4_0 * src = (const block_q4_0*) data; block_q4_0 dst_tmp[8]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++ ) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q4_0x8(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static int repack_q8_0_to_q8_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q8_0); GGML_ASSERT(interleave_block == 4 || interleave_block == 8); constexpr int nrows_interleaved = 4; block_q8_0x4 * dst = (block_q8_0x4 *) t->data; const block_q8_0 * src = (const block_q8_0 *) data; block_q8_0 dst_tmp[4]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK8_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q8_0)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q8_0x4(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; } static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { block_iq4_nlx4 out; for (int i = 0; i < 4; i++) { out.d[i] = in[i].d; } const int end = QK4_NL * 2 / blck_size_interleave; // TODO: this branch seems wrong //if (blck_size_interleave == 8) { // for (int i = 0; i < end; ++i) { // int src_id = i % 4; // int src_offset = (i / 4) * blck_size_interleave; // int dst_offset = i * blck_size_interleave; // // Using memcpy to avoid unaligned memory accesses // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); // } //} else if (blck_size_interleave == 4) { for (int i = 0; i < end; ++i) { int src_id = i % 4; int src_offset = (i / 4) * blck_size_interleave; int dst_offset = i * blck_size_interleave; memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); } } else { GGML_ASSERT(false); } return out; } static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); GGML_ASSERT(interleave_block == 4); const block_iq4_nl * src = (const block_iq4_nl *)data; block_iq4_nlx4 * dst = ( block_iq4_nlx4 *)t->data; block_iq4_nl dst_tmp[4]; int nrow = ggml_nrows(t); int nrows_interleaved = 4; int nblocks = t->ne[0] / QK4_NL; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static block_iq4_nlx8 make_block_iq4_nlx8(block_iq4_nl * in, unsigned int blck_size_interleave) { block_iq4_nlx8 out; for (int i = 0; i < 8; i++) { out.d[i] = in[i].d; } const int end = QK4_NL * 4 / blck_size_interleave; if (blck_size_interleave == 8) { for (int i = 0; i < end; ++i) { int src_id = i % 8; int src_offset = (i / 8) * blck_size_interleave; int dst_offset = i * blck_size_interleave; memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); } } else { GGML_ASSERT(false); } return out; } static int repack_iq4_nl_to_iq4_nl_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); GGML_ASSERT(interleave_block == 8); const block_iq4_nl * src = (const block_iq4_nl *)data; block_iq4_nlx8 * dst = ( block_iq4_nlx8 *)t->data; block_iq4_nl dst_tmp[8]; int nrow = ggml_nrows(t); int nrows_interleaved = 8; int nblocks = t->ne[0] / QK4_NL; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); if (t->ne[1] % nrows_interleaved != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_iq4_nlx8(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } namespace ggml::cpu::repack { // repack template int repack(struct ggml_tensor *, const void *, size_t); // TODO: generalise. template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_K_to_q4_K_8_bl(t, 4, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q2_K_to_q2_K_8_bl(t, 8, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); } // TODO: needs to be revisited //template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { // return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); //} template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_iq4_nl_to_iq4_nl_8_bl(t, 8, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q8_0_to_q8_0_4_bl(t, 4, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q8_0_to_q8_0_4_bl(t, 8, data, data_size); } // gemv template void gemv(int, float *, size_t, const void *, const void *, int, int); template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q4_K_8x4_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q2_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_iq4_nl_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q8_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemv_q8_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); } // gemm template void gemm(int, float *, size_t, const void *, const void *, int, int); template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q4_K_8x4_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q2_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_iq4_nl_8x8_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q8_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); } template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { ggml_gemm_q8_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); } class tensor_traits_base : public ggml::cpu::tensor_traits { public: virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; }; template class tensor_traits : public tensor_traits_base { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { // not realy a GGML_TYPE_Q8_0 but same size. switch (op->op) { case GGML_OP_MUL_MAT: { size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); return true; } case GGML_OP_MUL_MAT_ID: { size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. const int64_t ne02 = op->src[0]->ne[2]; // n_as, n_expert const int64_t ne12 = op->src[1]->ne[2]; // n_tokens const size_t sizeof_mmid_row_mapping = sizeof(int64_t); size += sizeof_mmid_row_mapping*ne02*(ne12 + 1); return true; } default: // GGML_ABORT("fatal error"); break; } return false; } bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_MUL_MAT: forward_mul_mat(params, op); return true; case GGML_OP_MUL_MAT_ID: forward_mul_mat_id(params, op); return true; default: // GGML_ABORT("fatal error"); break; } return false; } void forward_mul_mat_one_chunk(ggml_compute_params * params, ggml_tensor * op, int64_t src0_start, int64_t src0_end, int64_t src1_start, int64_t src1_end) { const ggml_tensor * src0 = op->src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); GGML_ASSERT(ne03 == 1 && ne13 == 1); GGML_ASSERT(ne12 % ne02 == 0); const int64_t r2 = ne12 / ne02; const int64_t i12 = src1_start / ne1; const int64_t i11 = src1_start - i12 * ne1; // Determine batch index const int64_t i02 = i12 / r2; const int64_t i1 = i11; const int64_t i2 = i12; const char * src0_ptr = (const char *) src0->data + i02 * nb02; const char * src1_ptr = (const char *) params->wdata + (i11 + i12 * ne11) * src1_col_stride; char * dst_ptr = ((char *) dst->data + (i1 * nb1 + i2 * nb2)); const int64_t nrows = src1_end - src1_start; const int64_t ncols = src0_end - src0_start; GGML_ASSERT(src1_ptr + src1_col_stride * nrows <= (const char *) params->wdata + params->wsize); // If there are more than three rows in src1, use gemm; otherwise, use gemv. if (nrows > 3) { gemm(ne00, (float *) (dst_ptr) + src0_start, nb1 / nb0, src0_ptr + src0_start * nb01, src1_ptr, nrows - (nrows % 4), ncols); } for (int iter = nrows - (nrows % 4); iter < nrows; iter++) { gemv(ne00, (float *) (dst_ptr + (iter * nb1)) + src0_start, ne01, src0_ptr + src0_start * nb01, src1_ptr + (src1_col_stride * iter), 1 /* nrows */, ncols); } } void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { const ggml_tensor * src0 = op->src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; GGML_ASSERT(ne0 == ne01); GGML_ASSERT(ne1 == ne11); GGML_ASSERT(ne2 == ne12); GGML_ASSERT(ne3 == ne13); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); // TODO: General batched mul mat for 4D tensors // Currently only supports 3D tensors GGML_ASSERT(ne03 == 1); GGML_ASSERT(ne13 == 1); GGML_ASSERT(ne3 == 1); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); char * wdata = static_cast(params->wdata); const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); const size_t nbw2 = nbw1 * ne11; assert(params->wsize >= nbw2 * ne12); const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; // INFO: Quantization is done in planes to avoid extra complexity in chunking. // Flattening dimensions not multiple of INTER_SIZE would require extra handling depending on how // the planes are broadcast. for (int64_t i12 = 0; i12 < ne12; i12++) { char * data_ptr = (char *) src1->data + i12 * nb12; char * wdata_ptr = wdata + i12 * nbw2; for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { ggml_quantize_mat_t((float *) (data_ptr + i11 * nb11), (void *) (wdata_ptr + i11 * nbw1), 4, ne10); } const int64_t i11_processed = ne11 - ne11 % 4; for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { from_float((float *) (data_ptr + i11 * nb11), (void *) (wdata_ptr + i11 * nbw1), ne10); } } // disable for NUMA const bool disable_chunking = ggml_is_numa(); // 4x chunks per thread const int64_t nr0 = ggml_nrows(op->src[0]); int nth_scaled = nth * 4; int64_t chunk_size0 = (nr0 + nth_scaled - 1) / nth_scaled; int64_t nchunk0 = (nr0 + chunk_size0 - 1) / chunk_size0; // src1 is chunked only by full planes. // When we flatten we need to address dimensions not multiple of the q8 INTER_SIZE // to route them thorugh GEMV. // nchunk1 = ne12 also avoids messing the chunking for models with no 3d tensors // to avoid affecting their performance int64_t nchunk1 = ne12; // Ensure minimum chunk size to avoid alignment issues with high thread counts // Minimum chunk size should be at least NB_COLS to prevent overlapping chunks after alignment const int64_t min_chunk_size = NB_COLS; if (nchunk0 > 0 && (nr0 / nchunk0) < min_chunk_size && nr0 >= min_chunk_size) { nchunk0 = (nr0 + min_chunk_size - 1) / min_chunk_size; } int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; // Only increase nchunk0 to nth if it won't make chunks too small if (nth == 1 || ((nchunk0 < nth || disable_chunking) && (nr0 + nth - 1) / nth >= min_chunk_size)) { nchunk0 = nth; dr0 = (nr0 + nchunk0 - 1) / nchunk0; } // Ensure nchunk doesn't exceed the number of rows divided by minimum chunk size // This prevents creating too many tiny chunks that could overlap after alignment const int64_t max_nchunk = (nr0 + min_chunk_size - 1) / min_chunk_size; nchunk0 = MIN(nchunk0, max_nchunk); if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. ggml_threadpool_chunk_set(params->threadpool, nth); } ggml_barrier(params->threadpool); // The first chunk comes from our thread_id, the rest will get auto-assigned. int current_chunk = ith; while (current_chunk < nchunk0 * nchunk1) { const int64_t ith0 = current_chunk % nchunk0; const int64_t ith1 = current_chunk / nchunk0; int64_t src0_start = dr0 * ith0; int64_t src0_end = MIN(src0_start + dr0, nr0); // full-plane range for src1 int64_t src1_start = ith1 * ne11; int64_t src1_end = (ith1 + 1) * ne11; // Align boundaries to NB_COLS - round up to ensure all data is included // The chunk size limiting above ensures chunks are large enough to prevent overlaps src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; src0_end = MIN(src0_end, ne01); // Make sure current plane is the last one before exiting if (src0_start >= src0_end) { current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); continue; } forward_mul_mat_one_chunk(params, dst, src0_start, src0_end, src1_start, src1_end); current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); } } void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { const ggml_tensor * src0 = op->src[0]; const ggml_tensor * src1 = op->src[1]; const ggml_tensor * ids = op->src[2]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(src0->type)); GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); GGML_ASSERT(nb0 <= nb1); GGML_ASSERT(nb1 <= nb2); GGML_ASSERT(nb2 <= nb3); GGML_ASSERT(ne03 == 1); GGML_ASSERT(ne13 == 1); GGML_ASSERT(ne3 == 1); GGML_ASSERT(src1->type == GGML_TYPE_F32); // row groups const int n_ids = ids->ne[0]; // n_expert_used const int n_as = ne02; // n_expert const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); const size_t nbw2 = nbw1*ne11; const size_t nbw3 = nbw2*ne12; struct mmid_row_mapping { int32_t i1; int32_t i2; }; GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as*(ne12 + 1)*sizeof(mmid_row_mapping)) ); auto * wdata = (char *)params->wdata; auto * wdata_src1_end = (char *)wdata + GGML_PAD(nbw3, sizeof(int64_t)); // total of [n_as][ne12 + 1] elemets of type mmid_row_mapping (2*int32_t = int64_t) auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] // src1: float32 => param type for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = ith; i11 < ne11; i11 += nth) { from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), (void *) (wdata + i12 * nbw2 + i11 * nbw1), ne10); } } #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] if (ith == 0) { // initialize matrix_row_counts memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); // group rows by src0 matrix for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { for (int32_t id = 0; id < n_ids; ++id) { const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); GGML_ASSERT(i02 >= 0 && i02 < n_as); MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; matrix_row_counts[i02] += 1; } } } ggml_barrier(params->threadpool); // compute each matrix multiplication in sequence for (int cur_a = 0; cur_a < n_as; ++cur_a) { const int64_t cne1 = matrix_row_counts[cur_a]; if (cne1 == 0) { continue; } const auto * src0_cur = (const char *) src0->data + cur_a*nb02; //const int64_t nr0 = ne01; // src0 rows const int64_t nr1 = cne1; // src1 rows int64_t src0_cur_start = (ith * ne01) / nth; int64_t src0_cur_end = ((ith + 1) * ne01) / nth; // Align boundaries to NB_COLS - round up to ensure all data is included src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; if (src0_cur_end > ne01) { src0_cur_end = ne01; } if (src0_cur_start >= src0_cur_end) { return; } for (int ir1 = 0; ir1 < nr1; ir1++) { struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); const int id = row_mapping.i1; // selected expert index const int64_t i11 = id % ne11; const int64_t i12 = row_mapping.i2; // row index in src1 const int64_t i1 = id; // selected expert index const int64_t i2 = i12; // row const auto * src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); gemv(ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, src0_cur + src0_cur_start * nb01, src1_col, 1, src0_cur_end - src0_cur_start); } } #undef MMID_MATRIX_ROW } int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), (int) NB_COLS, (int) INTER_SIZE); return ggml::cpu::repack::repack(t, data, data_size); } }; } // namespace ggml::cpu::repack static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(const struct ggml_tensor * cur) { // instance for Q4 static const ggml::cpu::repack::tensor_traits q4_0_4x4_q8_0; static const ggml::cpu::repack::tensor_traits q4_0_4x8_q8_0; static const ggml::cpu::repack::tensor_traits q4_0_8x8_q8_0; // instance for Q4_K static const ggml::cpu::repack::tensor_traits q4_K_8x4_q8_K; static const ggml::cpu::repack::tensor_traits q4_K_8x8_q8_K; // instance for Q2 static const ggml::cpu::repack::tensor_traits q2_K_8x8_q8_K; // instance for IQ4 static const ggml::cpu::repack::tensor_traits iq4_nl_4x4_q8_0; static const ggml::cpu::repack::tensor_traits iq4_nl_8x8_q8_0; // instance for Q8_0 static const ggml::cpu::repack::tensor_traits q8_0_4x4_q8_0; static const ggml::cpu::repack::tensor_traits q8_0_4x8_q8_0; if (cur->type == GGML_TYPE_Q4_0) { if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) || (ggml_cpu_has_riscv_v() && (ggml_cpu_get_rvv_vlen() >= QK4_0))) { if (cur->ne[1] % 8 == 0) { return &q4_0_8x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { if (cur->ne[1] % 4 == 0) { return &q4_0_4x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { return &q4_0_4x4_q8_0; } } } else if (cur->type == GGML_TYPE_Q4_K) { if (ggml_cpu_has_avx2()) { if (cur->ne[1] % 8 == 0) { return &q4_K_8x8_q8_K; } } if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { if (cur->ne[1] % 8 == 0) { return &q4_K_8x8_q8_K; } } if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 8 == 0) { return &q4_K_8x4_q8_K; } } } else if (cur->type == GGML_TYPE_Q2_K) { if (ggml_cpu_has_avx512()) { if (cur->ne[1] % 8 == 0) { return &q2_K_8x8_q8_K; } } } else if (cur->type == GGML_TYPE_IQ4_NL) { if (ggml_cpu_has_avx2()) { if (cur->ne[1] % 8 == 0) { return &iq4_nl_8x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { return &iq4_nl_4x4_q8_0; } } } else if (cur->type == GGML_TYPE_Q8_0) { if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { if (cur->ne[1] % 4 == 0) { return &q8_0_4x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { return &q8_0_4x4_q8_0; } } } return nullptr; } static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { tensor->extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(tensor)); GGML_UNUSED(buffer); return GGML_STATUS_SUCCESS; } static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra; auto OK = tensor_traits->repack(tensor, data, size); GGML_ASSERT(OK == 0); GGML_UNUSED(buffer); } static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU_REPACK"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); if (buffer == nullptr) { return nullptr; } buffer->buft = buft; buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor; buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor; buffer->iface.get_tensor = nullptr; buffer->iface.cpy_tensor = nullptr; return buffer; } static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return TENSOR_ALIGNMENT; GGML_UNUSED(buft); } namespace ggml::cpu::repack { class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if ( op->op == GGML_OP_MUL_MAT && op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 2) && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() && ggml_repack_get_optimal_repack_type(op->src[0]) ) { if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } if (op->src[1]->type == GGML_TYPE_F32) { return true; } //if (op->src[1]->type == GGML_TYPE_Q8_0) { // return true; //} // may be possible if Q8_0 packed... } else if (op->op == GGML_OP_MUL_MAT_ID && op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 3) && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() && ggml_repack_get_optimal_repack_type(op->src[0]) ) { if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } if (op->src[1]->type == GGML_TYPE_F32) { return true; } //if (op->src[1]->type == GGML_TYPE_Q8_0) { // return true; //} } return false; } ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type()) { return (ggml::cpu::tensor_traits *) op->src[0]->extra; } } return nullptr; } }; } // namespace ggml::cpu::repack ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment, /* .get_max_size = */ nullptr, // defaults to SIZE_MAX /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes /* .is_host = */ nullptr, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ new ggml::cpu::repack::extra_buffer_type(), }; return &ggml_backend_cpu_buffer_type_repack; } ggml-org-ggml-3678254/src/ggml-cpu/repack.h000066400000000000000000000233161512524704700203110ustar00rootroot00000000000000#pragma once #define GGML_COMMON_DECL_CPP #include "ggml-common.h" #include "traits.h" #include "ggml.h" // GGML internal header ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void); template constexpr int QK_0() { if constexpr (K == 4) { return QK4_0; } if constexpr (K == 8) { return QK8_0; } return -1; } template struct block { ggml_half d[N]; // deltas for N qK_0 blocks int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks }; // control size static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); using block_q4_0x4 = block<4, 4>; using block_q4_0x8 = block<4, 8>; using block_q8_0x4 = block<8, 4>; using block_q8_0x8 = block<8, 8>; struct block_q4_Kx8 { ggml_half d[8]; // super-block scale for quantized scales ggml_half dmin[8]; // super-block scale for quantized mins uint8_t scales[96]; // scales and mins, quantized with 6 bits uint8_t qs[1024]; // 4--bit quants }; static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); struct block_q2_Kx8 { ggml_half d[8]; // super-block scale for quantized scales ggml_half dmin[8]; // super-block scale for quantized mins uint8_t scales[128]; // scales and mins, quantized with 4 bits uint8_t qs[512]; // 2--bit quants }; static_assert(sizeof(block_q2_Kx8) == sizeof(ggml_half) * 16 + QK_K/2 + QK_K * 2, "wrong q2_K block size/padding"); struct block_q8_Kx4 { float d[4]; // delta int8_t qs[QK_K * 4]; // quants int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 }; static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); struct block_iq4_nlx4 { ggml_half d[4]; // deltas for 4 iq4_nl blocks uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks }; static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); struct block_iq4_nlx8 { ggml_half d[8]; // deltas for 8 iq4_nl blocks uint8_t qs[QK4_NL * 4]; // nibbles / quants for 8 iq4_nl blocks }; static_assert(sizeof(block_iq4_nlx8) == 8 * sizeof(ggml_half) + QK4_NL * 4, "wrong iq4_nlx8 block size/padding"); #if defined(__cplusplus) extern "C" { #endif void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_iq4_nl_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q8_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q8_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q8_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q8_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); // Native implementations void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_K_8x4_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_iq4_nl_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x4_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q8_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemv_q8_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q8_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q8_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); #if defined(__cplusplus) } // extern "C" #endif ggml-org-ggml-3678254/src/ggml-cpu/simd-mappings.h000066400000000000000000001402671512524704700216210ustar00rootroot00000000000000#pragma once #include "ggml-cpu-impl.h" #ifdef __ARM_FEATURE_SVE #include #endif // __ARM_FEATURE_SVE #if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__) // if YCM cannot find , make a symbolic link to it, for example: // // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ // #include #endif #if defined(__riscv_v_intrinsic) #include #endif #ifdef __cplusplus extern "C" { #endif // // simd mappings // // FP16 to FP32 conversion // 16-bit float // on Arm, we use __fp16 // on x86, we use uint16_t // // for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 // for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 // #if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) neon_compute_fp16_to_fp32(x) #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) neon_compute_fp32_to_fp16(x) #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) static inline float neon_compute_fp16_to_fp32(ggml_fp16_t h) { __fp16 tmp; memcpy(&tmp, &h, sizeof(ggml_fp16_t)); return (float)tmp; } static inline ggml_fp16_t neon_compute_fp32_to_fp16(float f) { ggml_fp16_t res; __fp16 tmp = f; memcpy(&res, &tmp, sizeof(ggml_fp16_t)); return res; } #elif defined(__F16C__) #ifdef _MSC_VER #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) #else #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) #endif #elif defined(__POWER9_VECTOR__) #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) power_compute_fp16_to_fp32(x) #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) power_compute_fp32_to_fp16(x) /* the inline asm below is about 12% faster than the lookup method */ #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) static inline float power_compute_fp16_to_fp32(ggml_fp16_t h) { float f; double d; __asm__( "mtfprd %0,%2\n" "xscvhpdp %0,%0\n" "frsp %1,%0\n" : /* temp */ "=d"(d), /* out */ "=f"(f): /* in */ "r"(h)); return f; } static inline ggml_fp16_t power_compute_fp32_to_fp16(float f) { double d; ggml_fp16_t r; __asm__( /* xscvdphp can work on double or single precision */ "xscvdphp %0,%2\n" "mffprd %1,%0\n" : /* temp */ "=d"(d), /* out */ "=r"(r): /* in */ "f"(f)); return r; } #elif defined(__riscv) && defined(__riscv_zfhmin) static inline float riscv_compute_fp16_to_fp32(ggml_fp16_t h) { _Float16 hf; memcpy(&hf, &h, sizeof(ggml_fp16_t)); return hf; } static inline ggml_fp16_t riscv_compute_fp32_to_fp16(float f) { ggml_fp16_t res; _Float16 hf = (_Float16)f; memcpy(&res, &hf, sizeof(ggml_fp16_t)); return res; } #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) riscv_compute_fp16_to_fp32(x) #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) riscv_compute_fp32_to_fp16(x) #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) #endif // precomputed f32 table for f16 (256 KB) // defined in ggml-cpu.c, initialized in ggml_cpu_init() extern float ggml_table_f32_f16[1 << 16]; // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, // so we define GGML_CPU_FP16_TO_FP32 and GGML_CPU_FP32_TO_FP16 elsewhere for NEON. // This is also true for POWER9. #if !defined(GGML_CPU_FP16_TO_FP32) inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { uint16_t s; memcpy(&s, &f, sizeof(uint16_t)); return ggml_table_f32_f16[s]; } #define GGML_CPU_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) #endif #if !defined(GGML_CPU_FP32_TO_FP16) #define GGML_CPU_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) #endif // we define a common set of C macros which map to specific intrinsics based on the current architecture // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros // // GGML_F32_STEP / GGML_F16_STEP // number of elements to process in a single step // // GGML_F32_EPR / GGML_F16_EPR // number of elements to fit in a single register // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA) #define GGML_SIMD // F32 SVE #define GGML_F32_EPR 8 #define DEFAULT_PG svptrue_b32() #define GGML_F32xt svfloat32_t #define GGML_F32xt_ZERO svdup_n_f32(0.0f) #define GGML_F32xt_SET1(x) svdup_n_f32(x) #define GGML_F32xt_LOAD_IMPL(pg, a) svld1_f32(pg, a) #define GGML_F32xt_LOAD(a) GGML_F32xt_LOAD_IMPL(DEFAULT_PG, a) #define GGML_F32xt_STORE_IMPL(pg, a, b) svst1_f32(pg, a, b) #define GGML_F32xt_STORE(a, b) GGML_F32xt_STORE_IMPL(DEFAULT_PG, a, b) #define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, b, c, a) #define GGML_F32xt_FMA(a, b, c) GGML_F32xt_FMA_IMPL(DEFAULT_PG, a, b, c) #define GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b) #define GGML_F32xt_ADD(a, b) GGML_F32xt_ADD_IMPL(DEFAULT_PG, a, b) #define GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b) #define GGML_F32xt_MUL(a, b) GGML_F32xt_MUL_IMPL(DEFAULT_PG, a, b) #define GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a) #define GGML_F32xt_REDUCE_ONE(a) GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, a) #define GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \ { \ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \ sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \ sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \ (res) = (ggml_float) GGML_F32xt_REDUCE_ONE(sum1); \ } #define GGML_F32xt_REDUCE(res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \ GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) #define GGML_F32_VEC GGML_F32xt #define GGML_F32_VEC_ZERO GGML_F32xt_ZERO #define GGML_F32_VEC_SET1 GGML_F32xt_SET1 #define GGML_F32_VEC_LOAD GGML_F32xt_LOAD #define GGML_F32_VEC_STORE GGML_F32xt_STORE #define GGML_F32_VEC_FMA GGML_F32xt_FMA #define GGML_F32_VEC_ADD GGML_F32xt_ADD #define GGML_F32_VEC_MUL GGML_F32xt_MUL #define GGML_F32_VEC_REDUCE GGML_F32xt_REDUCE // F16 SVE #define DEFAULT_PG32 svptrue_b32() #define DEFAULT_PG16 svptrue_b16() #define GGML_F32Cxt svfloat16_t #define GGML_F32Cxt_ZERO svdup_n_f16(0.0f) #define GGML_F32Cxt_SET1(x) svdup_n_f16(x) #define GGML_F32Cxt_LOAD(p) svld1_f16(DEFAULT_PG16, (const __fp16 *)(p)) #define GGML_F32Cxt_STORE(dst_ptr, src_vec) svst1_f16(DEFAULT_PG16, (__fp16 *)(dst_ptr), (src_vec)) #define GGML_F32Cxt_FMA_IMPL(pg, a, b, c) svmad_f16_x(pg, b, c, a) #define GGML_F32Cxt_FMA(a, b, c) GGML_F32Cxt_FMA_IMPL(DEFAULT_PG16, a, b, c) #define GGML_F32Cxt_ADD_IMPL(pg, a, b) svadd_f16_x(pg, a, b) #define GGML_F32Cxt_ADD(a, b) GGML_F32Cxt_ADD_IMPL(DEFAULT_PG16, a, b) #define GGML_F32Cxt_MUL_IMPL(pg, a, b) svmul_f16_x(pg, a, b) #define GGML_F32Cxt_MUL(a, b) GGML_F32Cxt_MUL_IMPL(DEFAULT_PG16, a, b) #define GGML_F32Cxt_REDUCE GGML_F16xt_REDUCE_MIXED #define GGML_F16x_VEC GGML_F32Cxt #define GGML_F16x_VEC_ZERO GGML_F32Cxt_ZERO #define GGML_F16x_VEC_SET1 GGML_F32Cxt_SET1 #define GGML_F16x_VEC_LOAD(p, i) GGML_F32Cxt_LOAD(p) #define GGML_F16x_VEC_STORE(p, r, i) GGML_F32Cxt_STORE((__fp16 *)(p), r) #define GGML_F16x_VEC_FMA GGML_F32Cxt_FMA #define GGML_F16x_VEC_ADD GGML_F32Cxt_ADD #define GGML_F16x_VEC_MUL GGML_F32Cxt_MUL #define GGML_F16x_VEC_REDUCE GGML_F32Cxt_REDUCE #define GGML_F16xt_REDUCE_ONE_IMPL(pg, a) svaddv_f16(pg, a) #define GGML_F16xt_REDUCE_ONE(a) GGML_F16xt_REDUCE_ONE_IMPL(DEFAULT_PG16, a) #define GGML_F16xt_REDUCE_MIXED_IMPL(pg16, res, sum1, sum2, sum3, sum4) \ { \ sum1 = svadd_f16_x(pg16, sum1, sum2); \ sum3 = svadd_f16_x(pg16, sum3, sum4); \ sum1 = svadd_f16_x(pg16, sum1, sum3); \ __fp16 sum_f16 = svaddv_f16(pg16, sum1); \ (res) = (ggml_float) sum_f16; \ } #define GGML_F16xt_REDUCE_MIXED(res, sum1, sum2, sum3, sum4) \ GGML_F16xt_REDUCE_MIXED_IMPL(DEFAULT_PG16, res, sum1, sum2, sum3, sum4) // F16 NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #define GGML_F16_STEP 32 #define GGML_F16_EPR 8 #define GGML_F16x8 float16x8_t #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) #define GGML_F16x8_SET1(x) vdupq_n_f16(x) #define GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x)) #define GGML_F16x8_STORE vst1q_f16 #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) #define GGML_F16x8_ADD vaddq_f16 #define GGML_F16x8_MUL vmulq_f16 #define GGML_F16x8_REDUCE(res, x) \ do { \ int offset = GGML_F16_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \ (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ } while (0) #define GGML_F16_VEC GGML_F16x8 #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((__fp16 *)(p), (r)[i]) #define GGML_F16_VEC_FMA GGML_F16x8_FMA #define GGML_F16_VEC_ADD GGML_F16x8_ADD #define GGML_F16_VEC_MUL GGML_F16x8_MUL #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE #else // if FP16 vector arithmetic is not supported, we use FP32 instead // and take advantage of the vcvt_ functions to convert to/from FP16 #define GGML_F16_STEP 16 #define GGML_F16_EPR 4 #define GGML_F32Cx4 float32x4_t #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x))) #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) #define GGML_F32Cx4_ADD vaddq_f32 #define GGML_F32Cx4_MUL vmulq_f32 #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE #define GGML_F16_VEC GGML_F32Cx4 #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((__fp16 *)(p), r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE #endif #elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) #define GGML_SIMD // F32 NEON #define GGML_F32_STEP 16 #define GGML_F32_EPR 4 #define GGML_F32x4 float32x4_t #define GGML_F32x4_ZERO vdupq_n_f32(0.0f) #define GGML_F32x4_SET1(x) vdupq_n_f32(x) #define GGML_F32x4_LOAD vld1q_f32 #define GGML_F32x4_STORE vst1q_f32 #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) #define GGML_F32x4_ADD vaddq_f32 #define GGML_F32x4_MUL vmulq_f32 #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ } \ (res) = (ggml_float) GGML_F32x4_REDUCE_ONE((x)[0]); \ } #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #define GGML_F16_STEP 32 #define GGML_F16_EPR 8 #define GGML_F16x8 float16x8_t #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) #define GGML_F16x8_SET1(x) vdupq_n_f16(x) #define GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x)) #define GGML_F16x8_STORE vst1q_f16 #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) #define GGML_F16x8_ADD vaddq_f16 #define GGML_F16x8_MUL vmulq_f16 #define GGML_F16x8_REDUCE(res, x) \ do { \ int offset = GGML_F16_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ } \ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \ (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ } while (0) #define GGML_F16_VEC GGML_F16x8 #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((__fp16 *)(p), (r)[i]) #define GGML_F16_VEC_FMA GGML_F16x8_FMA #define GGML_F16_VEC_ADD GGML_F16x8_ADD #define GGML_F16_VEC_MUL GGML_F16x8_MUL #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE #else // if FP16 vector arithmetic is not supported, we use FP32 instead // and take advantage of the vcvt_ functions to convert to/from FP16 #define GGML_F16_STEP 16 #define GGML_F16_EPR 4 #define GGML_F32Cx4 float32x4_t #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x))) #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) #define GGML_F32Cx4_ADD vaddq_f32 #define GGML_F32Cx4_MUL vmulq_f32 #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE #define GGML_F16_VEC GGML_F32Cx4 #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((__fp16 *)(p), r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE #endif #elif defined(__AVX512F__) #define GGML_SIMD // F32 AVX512 #define GGML_F32_STEP 64 #define GGML_F32_EPR 16 #define GGML_F32x16 __m512 #define GGML_F32x16_ZERO _mm512_setzero_ps() #define GGML_F32x16_SET1(x) _mm512_set1_ps(x) #define GGML_F32x16_LOAD _mm512_loadu_ps #define GGML_F32x16_STORE _mm512_storeu_ps // _mm512_fmadd_ps is defined in AVX512F so no guard is required #define GGML_F32x16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a) #define GGML_F32x16_ADD _mm512_add_ps #define GGML_F32x16_MUL _mm512_mul_ps #define GGML_F32x16_REDUCE(res, x) \ do { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ res = (ggml_float) _mm512_reduce_add_ps(x[0]); \ } while (0) // TODO: is this optimal ? #define GGML_F32_VEC GGML_F32x16 #define GGML_F32_VEC_ZERO GGML_F32x16_ZERO #define GGML_F32_VEC_SET1 GGML_F32x16_SET1 #define GGML_F32_VEC_LOAD GGML_F32x16_LOAD #define GGML_F32_VEC_STORE GGML_F32x16_STORE #define GGML_F32_VEC_FMA GGML_F32x16_FMA #define GGML_F32_VEC_ADD GGML_F32x16_ADD #define GGML_F32_VEC_MUL GGML_F32x16_MUL #define GGML_F32_VEC_REDUCE GGML_F32x16_REDUCE // F16 AVX512 // F16 AVX #define GGML_F16_STEP 64 #define GGML_F16_EPR 16 // AVX512 has FP16 extension (AVX512_FP16) but I don't have it on my machine so I use FP32 instead #define GGML_F32Cx16 __m512 #define GGML_F32Cx16_ZERO _mm512_setzero_ps() #define GGML_F32Cx16_SET1(x) _mm512_set1_ps(x) // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F // so F16C guard isn't required #define GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x))) #define GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0)) #define GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a) #define GGML_F32Cx16_ADD _mm512_add_ps #define GGML_F32Cx16_MUL _mm512_mul_ps #define GGML_F32Cx16_REDUCE(res, x) \ do { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ res = (ggml_float) _mm512_reduce_add_ps(x[0]); \ } while (0) #define GGML_F16_VEC GGML_F32Cx16 #define GGML_F16_VEC_ZERO GGML_F32Cx16_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx16_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx16_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx16_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx16_FMA #define GGML_F16_VEC_ADD GGML_F32Cx16_ADD #define GGML_F16_VEC_MUL GGML_F32Cx16_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE #elif defined(__AVX__) #define GGML_SIMD // F32 AVX #define GGML_F32_STEP 32 #define GGML_F32_EPR 8 #define GGML_F32x8 __m256 #define GGML_F32x8_ZERO _mm256_setzero_ps() #define GGML_F32x8_SET1(x) _mm256_set1_ps(x) #define GGML_F32x8_LOAD _mm256_loadu_ps #define GGML_F32x8_STORE _mm256_storeu_ps #if defined(__FMA__) #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) #else #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) #endif #define GGML_F32x8_ADD _mm256_add_ps #define GGML_F32x8_MUL _mm256_mul_ps #define GGML_F32x8_REDUCE(res, x) \ do { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm256_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm256_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm256_add_ps(x[i], x[offset+i]); \ } \ const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \ _mm256_extractf128_ps(x[0], 1)); \ const __m128 t1 = _mm_hadd_ps(t0, t0); \ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ } while (0) // TODO: is this optimal ? #define GGML_F32_VEC GGML_F32x8 #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO #define GGML_F32_VEC_SET1 GGML_F32x8_SET1 #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD #define GGML_F32_VEC_STORE GGML_F32x8_STORE #define GGML_F32_VEC_FMA GGML_F32x8_FMA #define GGML_F32_VEC_ADD GGML_F32x8_ADD #define GGML_F32_VEC_MUL GGML_F32x8_MUL #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE // F16 AVX #define GGML_F16_STEP 32 #define GGML_F16_EPR 8 // F16 arithmetic is not supported by AVX, so we use F32 instead #define GGML_F32Cx8 __m256 #define GGML_F32Cx8_ZERO _mm256_setzero_ps() #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) #if defined(__F16C__) // the _mm256_cvt intrinsics require F16C #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) #else static inline __m256 __avx_f32cx8_load(const ggml_fp16_t * x) { float tmp[8]; for (int i = 0; i < 8; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); } static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { float arr[8]; _mm256_storeu_ps(arr, y); for (int i = 0; i < 8; i++) x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) #endif #define GGML_F32Cx8_FMA GGML_F32x8_FMA #define GGML_F32Cx8_ADD _mm256_add_ps #define GGML_F32Cx8_MUL _mm256_mul_ps #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE #define GGML_F16_VEC GGML_F32Cx8 #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE #elif defined(__POWER9_VECTOR__) #define GGML_SIMD // F32 POWER9 #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 #define GGML_F32x4 vector float #define GGML_F32x4_ZERO {0.0f} #define GGML_F32x4_SET1 vec_splats #define GGML_F32x4_LOAD(p) vec_xl(0, p) #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) #define GGML_F32x4_ADD vec_add #define GGML_F32x4_MUL vec_mul #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset+i]); \ } \ res = vec_extract(x[0], 0) + \ vec_extract(x[0], 1) + \ vec_extract(x[0], 2) + \ vec_extract(x[0], 3); \ } #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 POWER9 #define GGML_F16_STEP GGML_F32_STEP #define GGML_F16_EPR GGML_F32_EPR #define GGML_F16_VEC GGML_F32x4 #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO #define GGML_F16_VEC_SET1 GGML_F32x4_SET1 #define GGML_F16_VEC_FMA GGML_F32x4_FMA #define GGML_F16_VEC_ADD GGML_F32x4_ADD #define GGML_F16_VEC_MUL GGML_F32x4_MUL #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE // Use vec_xl, not vec_ld, in case the load address is not aligned. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ vec_extract_fp32_from_shortl(vec_xl(0, p)) static inline unsigned char ggml_endian_byte(int i) { uint16_t tmp_val = 1; return ((unsigned char *)&tmp_val)[i]; } #define GGML_ENDIAN_BYTE(i) ggml_endian_byte(i) #define GGML_F16_VEC_STORE(p, r, i) \ if (i & 0x1) \ vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ r[i - GGML_ENDIAN_BYTE(0)]), \ 0, p - GGML_F16_EPR) #elif defined(__wasm_simd128__) #define GGML_SIMD // F32 WASM #define GGML_F32_STEP 16 #define GGML_F32_EPR 4 #define GGML_F32x4 v128_t #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) #define GGML_F32x4_LOAD wasm_v128_load #define GGML_F32x4_STORE wasm_v128_store #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) #define GGML_F32x4_ADD wasm_f32x4_add #define GGML_F32x4_MUL wasm_f32x4_mul #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ res = wasm_f32x4_extract_lane(x[0], 0) + \ wasm_f32x4_extract_lane(x[0], 1) + \ wasm_f32x4_extract_lane(x[0], 2) + \ wasm_f32x4_extract_lane(x[0], 3); \ } #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 WASM #define GGML_F16_STEP 16 #define GGML_F16_EPR 4 inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { float tmp[4]; tmp[0] = GGML_CPU_FP16_TO_FP32(p[0]); tmp[1] = GGML_CPU_FP16_TO_FP32(p[1]); tmp[2] = GGML_CPU_FP16_TO_FP32(p[2]); tmp[3] = GGML_CPU_FP16_TO_FP32(p[3]); return wasm_v128_load(tmp); } inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { float tmp[4]; wasm_v128_store(tmp, x); p[0] = GGML_CPU_FP32_TO_FP16(tmp[0]); p[1] = GGML_CPU_FP32_TO_FP16(tmp[1]); p[2] = GGML_CPU_FP32_TO_FP16(tmp[2]); p[3] = GGML_CPU_FP32_TO_FP16(tmp[3]); } #define GGML_F16x4 v128_t #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) #define GGML_F16x4_FMA GGML_F32x4_FMA #define GGML_F16x4_ADD wasm_f32x4_add #define GGML_F16x4_MUL wasm_f32x4_mul #define GGML_F16x4_REDUCE(res, x) \ { \ int offset = GGML_F16_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ res = (ggml_float) (wasm_f32x4_extract_lane(x[0], 0) + \ wasm_f32x4_extract_lane(x[0], 1) + \ wasm_f32x4_extract_lane(x[0], 2) + \ wasm_f32x4_extract_lane(x[0], 3)); \ } #define GGML_F16_VEC GGML_F16x4 #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO #define GGML_F16_VEC_SET1 GGML_F16x4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F16x4_FMA #define GGML_F16_VEC_ADD GGML_F16x4_ADD #define GGML_F16_VEC_MUL GGML_F16x4_MUL #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE #elif defined(__SSE3__) #define GGML_SIMD // F32 SSE #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 #define GGML_F32x4 __m128 #define GGML_F32x4_ZERO _mm_setzero_ps() #define GGML_F32x4_SET1(x) _mm_set1_ps(x) #define GGML_F32x4_LOAD _mm_loadu_ps #define GGML_F32x4_STORE _mm_storeu_ps #if defined(__FMA__) // TODO: Does this work? #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) #else #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) #endif #define GGML_F32x4_ADD _mm_add_ps #define GGML_F32x4_MUL _mm_mul_ps #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm_add_ps(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm_add_ps(x[i], x[offset+i]); \ } \ const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \ } // TODO: is this optimal ? #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 SSE #define GGML_F16_STEP 32 #define GGML_F16_EPR 4 static inline __m128 __sse_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return _mm_loadu_ps(tmp); } static inline void __sse_f16x4_store(ggml_fp16_t * x, __m128 y) { float arr[4]; _mm_storeu_ps(arr, y); x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 #define GGML_F32Cx4_ZERO _mm_setzero_ps() #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) #define GGML_F32Cx4_FMA GGML_F32x4_FMA #define GGML_F32Cx4_ADD _mm_add_ps #define GGML_F32Cx4_MUL _mm_mul_ps #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE #define GGML_F16_VEC GGML_F32Cx4 #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE #elif defined(__loongarch_asx) #define GGML_SIMD // F32 LASX #define GGML_F32_STEP 32 #define GGML_F32_EPR 8 #define GGML_F32x8 __m256 #define GGML_F32x8_ZERO (__m256)__lasx_xvldi(0) #define GGML_F32x8_SET1(x) (__m256)__lasx_xvreplfr2vr_s((x)) #define GGML_F32x8_LOAD(x) (__m256)__lasx_xvld((x), 0) #define GGML_F32x8_STORE(x,y) __lasx_xvst((y), (x), 0) #define GGML_F32x8_FMA(a, b, c) __lasx_xvfmadd_s(b, c, a) #define GGML_F32x8_ADD __lasx_xvfadd_s #define GGML_F32x8_MUL __lasx_xvfmul_s #define GGML_F32x8_REDUCE(res, x) \ do { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \ } \ float *tmp_p = (float *)&x[0]; \ res = tmp_p[0] + tmp_p[1] + tmp_p[2] + tmp_p[3] + tmp_p[4] + tmp_p[5] + tmp_p[6] + tmp_p[7]; \ } while (0) // TODO: is this optimal ? #define GGML_F32_VEC GGML_F32x8 #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO #define GGML_F32_VEC_SET1 GGML_F32x8_SET1 #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD #define GGML_F32_VEC_STORE GGML_F32x8_STORE #define GGML_F32_VEC_FMA GGML_F32x8_FMA #define GGML_F32_VEC_ADD GGML_F32x8_ADD #define GGML_F32_VEC_MUL GGML_F32x8_MUL #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE // F16 LASX #define GGML_F16_STEP 32 #define GGML_F16_EPR 8 // F16 arithmetic is not supported by LASX, so we use F32 instead #define GGML_F32Cx8 __m256 #define GGML_F32Cx8_ZERO (__m256)__lasx_xvldi(0) #define GGML_F32Cx8_SET1(x) (__m256)__lasx_xvreplfr2vr_s((x)) static inline __m256 __lasx_f32cx8_load(const ggml_fp16_t * x) { __m256i a; memcpy(&a, x, sizeof(ggml_fp16_t) * 8); a = __lasx_xvpermi_d(a, 0 | (1 << 4)); return __lasx_xvfcvtl_s_h(a); } static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { __m256i a = __lasx_xvfcvt_h_s(y, y); a = __lasx_xvpermi_d(a, 0 | (2 << 2)); memcpy(x, &a, sizeof(ggml_fp16_t) * 8); } #define GGML_F32Cx8_LOAD(x) __lasx_f32cx8_load(x) #define GGML_F32Cx8_STORE(x, y) __lasx_f32cx8_store(x, y) #define GGML_F32Cx8_FMA GGML_F32x8_FMA #define GGML_F32Cx8_ADD __lasx_xvfadd_s #define GGML_F32Cx8_MUL __lasx_xvfmul_s #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE #define GGML_F16_VEC GGML_F32Cx8 #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE #elif defined(__loongarch_sx) #define GGML_SIMD // F32 LSX #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 #define GGML_F32x4 __m128 #define GGML_F32x4_ZERO (__m128)__lsx_vldi(0) #define GGML_F32x4_SET1(x) (__m128)__lsx_vreplfr2vr_s((x)) #define GGML_F32x4_LOAD(x) (__m128)__lsx_vld((x), 0) #define GGML_F32x4_STORE(x, y) __lsx_vst(y, x, 0) #define GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a) #define GGML_F32x4_ADD __lsx_vfadd_s #define GGML_F32x4_MUL __lsx_vfmul_s #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ } \ __m128i t0 = __lsx_vpickev_w((__m128i)x[0], (__m128i)x[0]); \ __m128i t1 = __lsx_vpickod_w((__m128i)x[0], (__m128i)x[0]); \ __m128 t2 = __lsx_vfadd_s((__m128)t0, (__m128)t1); \ __m128i t3 = __lsx_vpickev_w((__m128i)t2, (__m128i)t2); \ __m128i t4 = __lsx_vpickod_w((__m128i)t2, (__m128i)t2); \ __m128 t5 = __lsx_vfadd_s((__m128)t3, (__m128)t4); \ res = (ggml_float) ((v4f32)t5)[0]; \ } #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 LSX #define GGML_F16_STEP 32 #define GGML_F16_EPR 4 static inline __m128 __lsx_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return (__m128)__lsx_vld(tmp, 0); } static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { float arr[4]; __lsx_vst(y, arr, 0); x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 #define GGML_F32Cx4_ZERO (__m128)__lsx_vldi(0) #define GGML_F32Cx4_SET1(x) (__m128)__lsx_vreplfr2vr_s((x)) #define GGML_F32Cx4_LOAD(x) (__m128)__lsx_f16x4_load(x) #define GGML_F32Cx4_STORE(x, y) __lsx_f16x4_store(x, y) #define GGML_F32Cx4_FMA GGML_F32x4_FMA #define GGML_F32Cx4_ADD __lsx_vfadd_s #define GGML_F32Cx4_MUL __lsx_vfmul_s #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE #define GGML_F16_VEC GGML_F32Cx4 #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE #elif defined(__VXE__) || defined(__VXE2__) #define GGML_SIMD // F32 s390x #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 #define GGML_F32x4 float32x4_t #define GGML_F32x4_ZERO vec_splats(0.0f) #define GGML_F32x4_SET1 vec_splats #define GGML_F32x4_LOAD(p) vec_xl(0, p) #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) #define GGML_F32x4_ADD vec_add #define GGML_F32x4_MUL vec_mul #define GGML_F32x4_REDUCE(res, x) \ { \ int offset = GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset + i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset + i]); \ } \ offset >>= 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset + i]); \ } \ float32x4_t tmp = x[0] + vec_reve(x[0]); \ res = tmp[0] + tmp[1]; \ } #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE // F16 s390x #define GGML_F16_STEP GGML_F32_STEP #define GGML_F16_EPR GGML_F32_EPR static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) { float tmp[4]; for (int i = 0; i < 4; i++) { tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 return vec_xl(0, (const float *)(tmp)); } static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) { float arr[4]; // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 vec_xst(v_y, 0, (float *)(arr)); for (int i = 0; i < 4; i++) { x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } } #define GGML_F16_VEC GGML_F32x4 #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO #define GGML_F16_VEC_SET1 GGML_F32x4_SET1 #define GGML_F16_VEC_LOAD(p, i) __lzs_f16cx4_load(p) #define GGML_F16_VEC_STORE(p, r, i) __lzs_f16cx4_store(p, r[i]) #define GGML_F16_VEC_FMA GGML_F32x4_FMA #define GGML_F16_VEC_ADD GGML_F32x4_ADD #define GGML_F16_VEC_MUL GGML_F32x4_MUL #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE #elif defined(__riscv_v_intrinsic) // compatible with vlen >= 128 #define GGML_SIMD // F32 #define GGML_F32_STEP 16 #define GGML_F32_EPR 4 #define GGML_F32x4 vfloat32m1_t #define GGML_F32x4_ZERO __riscv_vfmv_v_f_f32m1(0.0f, GGML_F32_EPR) #define GGML_F32x4_SET1(x) __riscv_vfmv_v_f_f32m1(x, GGML_F32_EPR) #define GGML_F32x4_LOAD(x) __riscv_vle32_v_f32m1(x, GGML_F32_EPR) #define GGML_F32x4_STORE(b, v) __riscv_vse32_v_f32m1(b, v, GGML_F32_EPR) #define GGML_F32x4_FMA(a, b, c) __riscv_vfmacc_vv_f32m1(a, b, c, GGML_F32_EPR) #define GGML_F32x4_ADD(a, b) __riscv_vfadd_vv_f32m1(a, b, GGML_F32_EPR) #define GGML_F32x4_MUL(a, b) __riscv_vfmul_vv_f32m1(a, b, GGML_F32_EPR) #define GGML_F32_VEC GGML_F32x4 #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO #define GGML_F32_VEC_SET1 GGML_F32x4_SET1 #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD #define GGML_F32_VEC_STORE GGML_F32x4_STORE #define GGML_F32_VEC_FMA GGML_F32x4_FMA #define GGML_F32_VEC_ADD GGML_F32x4_ADD #define GGML_F32_VEC_MUL GGML_F32x4_MUL #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE #endif // GGML_F32_ARR / GGML_F16_ARR // number of registers to use per step #ifdef GGML_SIMD #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) #endif #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/spacemit/000077500000000000000000000000001512524704700204735ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cpu/spacemit/ime.cpp000066400000000000000000001236771512524704700217710ustar00rootroot00000000000000#define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ime.h" #include "ggml-backend-impl.h" #include "ggml-common.h" #include "ggml-cpu.h" #include "ime_kernels.h" #include "traits.h" #include #include #include #include // for GGML_ASSERT #include #include // clang-format off #if defined(__riscv) #if !defined(__riscv_v) || !defined(__riscv_v_intrinsic) #error "riscv v extension or v_intrinsic not enabled" #else #include #endif #if !defined(__riscv_zfh) #error "riscv zfh extension not enabled" #endif #if defined(RISCV64_SPACEMIT_IME1) #else #error "RISCV64_SPACEMIT_IME1 not defined" #endif #else #error "riscv not enabled in this build" #endif #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #pragma GCC diagnostic ignored "-Wcast-qual" #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #if defined(RISCV64_SPACEMIT_IME1) #define QGEMM_STRIDEN_THREAD_ALIGN 16 #else #define QGEMM_STRIDEN_THREAD_ALIGN 32 #endif // clang-format on struct qnbitgemm_spacemit_ime_args { const float * a_ptr = nullptr; size_t lda = 0; const std::byte * packed_quant_b_data = nullptr; const float * quant_b_scale = nullptr; const void * quant_b_zp = nullptr; const float * quant_b_blksum = nullptr; const float * bias = nullptr; float * c_ptr = nullptr; size_t ldc = 0; }; constexpr size_t div_round_up(size_t up, size_t down) { return (up + down - 1) / down; } constexpr size_t q8_blk_size(size_t blk_len) { const size_t blk_size = sizeof(float) + blk_len * sizeof(int8_t); // Currently, the strictest alignment requirement of a block is for a float. // Ensure contiguous blocks are suitably aligned. assert(blk_size % alignof(float) == 0); return blk_size; } namespace ggml::cpu::riscv64_spacemit { const int num_ai_cores = std::thread::hardware_concurrency() / 2; } // namespace ggml::cpu::riscv64_spacemit static void sqnbitgemm_spacemit_ime_i8i4(const size_t blk_len, const size_t gemm_k, const qnbitgemm_spacemit_ime_args * gemm_args, void * const per_gemm_ws, const size_t m_start, const size_t m_count, const size_t n_start, const size_t n_count) { constexpr size_t scale_stride = sizeof(uint16_t); constexpr size_t blk_bitwidth = 4; const size_t k_blks = div_round_up(gemm_k, blk_len); const size_t lda = k_blks * q8_blk_size(blk_len); const size_t ldc = gemm_args->ldc; const size_t ldb = k_blks * (blk_len * blk_bitwidth / 8); const std::byte * quant_a_ptr = static_cast(per_gemm_ws) + m_start * lda; const size_t zero_point_stride = gemm_args->quant_b_zp != nullptr ? sizeof(uint8_t) : 0; const size_t packed_b_stride = ldb + k_blks * (scale_stride + zero_point_stride); const std::byte * packed_quant_b_data = gemm_args->packed_quant_b_data + n_start * packed_b_stride; float * c_ptr = gemm_args->c_ptr + m_start * ldc + n_start; size_t count_n = 0; const size_t compute_block_count_n = m_count == 1 ? n_count : 16; for (size_t n = 0; n < n_count; n += count_n) { count_n = std::min(n_count - n, compute_block_count_n); const std::byte * a_row = quant_a_ptr; const std::byte * b_col = packed_quant_b_data + n * packed_b_stride; const std::byte * b_col_zp = (zero_point_stride != 0) ? b_col : nullptr; float * c_blk = c_ptr + n; int32_t rows_remaining = m_count; while (rows_remaining > 0) { const auto rows_handled = sqnbitgemm_spacemit_ime::ime1::gemm_kernel_i8i4( blk_len, a_row, b_col, nullptr, b_col_zp, c_blk, rows_remaining, count_n, gemm_k, k_blks, ldc, nullptr, scale_stride); c_blk += rows_handled * ldc; a_row += rows_handled * lda; rows_remaining -= rows_handled; } } } template constexpr int QK_0() { if constexpr (K == 4) { return QK4_0; } if constexpr (K == 8) { return QK8_0; } return -1; } template struct block { ggml_half d[N]; // deltas for N qK_0 blocks uint8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks }; template struct block_with_zp { ggml_half d[N]; // deltas for N qK_1 blocks uint8_t zp[N]; // zero points for N qK_1 blocks uint8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_1 blocks }; // control size static_assert(sizeof(block<4, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 8, "wrong block<4,16> size/padding"); static_assert(sizeof(block_with_zp<4, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 8 + 16 * sizeof(uint8_t), "wrong block_with_zp<4,16> size/padding"); static_assert(sizeof(block<8, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 16, "wrong block<8,16> size/padding"); using block_q4_0x16 = block<4, 16>; using block_q4_1x16 = block_with_zp<4, 16>; using block_q8_0x16 = block<8, 16>; static block_q4_0x16 make_block_q4_0x16(block_q4_0 * in, unsigned int blck_size_interleave) { block_q4_0x16 out; GGML_ASSERT(QK4_0 / blck_size_interleave == 2); for (int i = 0; i < 16; i++) { out.d[i] = in[i].d; } for (int i = 0; i < 16; i++) { // [0, 15], in.d & 0x0F for (int j = 0; j < QK4_0 / 4; j++) { //src [b0 b16] ......... [b8 b24] ......... [b15 b31] //dst [b0 b8] ......... [b7 b15] out.qs[i * QK4_0 / 4 + j] = (in[i].qs[j] & 0x0F) | ((in[i].qs[j + QK4_0 / 4] & 0x0F) << 4); } } for (int i = 0; i < 16; i++) { // [16, 31], in.d & 0xF0 for (int j = 0; j < QK4_0 / 4; j++) { //src [b0 b16] ......... [b8 b24] ......... [b15 b31] //dst [b16 b24] ......... [b23 b31] out.qs[4 * QK4_0 + i * QK4_0 / 4 + j] = ((in[i].qs[j] & 0xF0) >> 4) | (in[i].qs[j + QK4_0 / 4] & 0xF0); } } return out; } static block_q4_1x16 make_block_q4_1x16(block_q4_1 * in, unsigned int blck_size_interleave) { block_q4_1x16 out; GGML_ASSERT(QK4_1 / blck_size_interleave == 2); for (int i = 0; i < 16; i++) { float d = GGML_FP16_TO_FP32(in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d); float m = GGML_FP16_TO_FP32(in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.m); float mid = -std::nearbyintf(m / d); mid = std::min(15.0f, std::max(0.0f, mid)); out.d[i] = GGML_FP32_TO_FP16(d); out.zp[i] = static_cast(mid); } for (int i = 0; i < 16; i++) { // [0, 15], in.d & 0x0F for (int j = 0; j < QK4_1 / 4; j++) { //src [b0 b16] ......... [b8 b24] ......... [b15 b31] //dst [b0 b8] ......... [b7 b15] out.qs[i * QK4_1 / 4 + j] = (in[i].qs[j] & 0x0F) | ((in[i].qs[j + QK4_1 / 4] & 0x0F) << 4); } } for (int i = 0; i < 16; i++) { // [16, 31], in.d & 0xF0 for (int j = 0; j < QK4_1 / 4; j++) { //src [b0 b16] ......... [b8 b24] ......... [b15 b31] //dst [b16 b24] ......... [b23 b31] out.qs[4 * QK4_1 + i * QK4_1 / 4 + j] = ((in[i].qs[j] & 0xF0) >> 4) | (in[i].qs[j + QK4_1 / 4] & 0xF0); } } return out; } static int repack_q4_0_to_q4_0_16_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 16); constexpr int nrows_interleaved = 16; block_q4_0x16 * dst = (block_q4_0x16 *) t->data; const block_q4_0 * src = (const block_q4_0 *) data; block_q4_0 dst_tmp[16]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK4_0 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q4_0x16(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static int repack_q4_1_to_q4_1_16_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_1); GGML_ASSERT(interleave_block == 16); constexpr int nrows_interleaved = 16; block_q4_1x16 * dst = (block_q4_1x16 *) t->data; const block_q4_1 * src = (const block_q4_1 *) data; block_q4_1 dst_tmp[16]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_1; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_1)); if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK4_1 != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int i = 0; i < nrows_interleaved; i++) { dst_tmp[i] = src[x + i * nblocks]; } *dst++ = make_block_q4_1x16(dst_tmp, interleave_block); } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } static inline void get_scale_min_k4(int j, const uint8_t * GGML_RESTRICT q, uint8_t * GGML_RESTRICT d, uint8_t * GGML_RESTRICT m) { if (j < 4) { *d = q[j] & 63; *m = q[j + 4] & 63; } else { *d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4); *m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4); } } static int repack_q4_k_to_q4_1_16_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_K); GGML_ASSERT(interleave_block == 16); GGML_ASSERT(QK_K / QK4_1 == 8); constexpr int nrows_interleaved = 16; block_q4_1x16 * dst = (block_q4_1x16 *) t->data; const block_q4_K * src = (const block_q4_K *) data; block_q4_1 dst_tmp[16]; int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK_K; if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK_K != 0) { return -1; } for (int b = 0; b < nrow; b += nrows_interleaved) { for (int64_t x = 0; x < nblocks; x++) { for (int j = 0; j < 8; j++) { for (int i = 0; i < nrows_interleaved; i++) { uint8_t sc, m; const float d = GGML_FP16_TO_FP32(src[x + i * nblocks].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d); const float min = GGML_FP16_TO_FP32(src[x + i * nblocks].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin); get_scale_min_k4(j, src[x + i * nblocks].scales, &sc, &m); const float d1 = d * sc; const float m1 = min * m; dst_tmp[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d = GGML_FP32_TO_FP16(d1); dst_tmp[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.m = GGML_FP32_TO_FP16(-m1); // src -> [b0, b32] [b1, b33] ... [b31, b63] // dst -> [b0, b16] [b1, b17] ... [b15, b31] [b32, b48] [b33, b49] ... [b47, b63] const uint8_t * q = src[x + i * nblocks].qs + (j / 2) * QK4_1; if (j % 2 == 0) { for (int ii = 0; ii < 16; ii++) { dst_tmp[i].qs[ii] = (q[ii] & 0x0F) | ((q[ii + 16] & 0x0F) << 4); } } else { for (int ii = 0; ii < 16; ii++) { dst_tmp[i].qs[ii] = ((q[ii] & 0xF0) >> 4) | (q[ii + 16] & 0xF0); } } } *dst++ = make_block_q4_1x16(dst_tmp, interleave_block); } } src += nrows_interleaved * nblocks; } return 0; GGML_UNUSED(data_size); } namespace ggml::cpu::riscv64_spacemit { template int repack(struct ggml_tensor *, const void *, size_t); template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_0_to_q4_0_16_bl(t, 16, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_1_to_q4_1_16_bl(t, 16, data, data_size); } template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { return repack_q4_k_to_q4_1_16_bl(t, 16, data, data_size); } class tensor_traits_base : public ggml::cpu::tensor_traits { public: virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; }; template class tensor_traits : public tensor_traits_base { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { switch (op->op) { case GGML_OP_MUL_MAT: size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])) * 4; size = ((size + QK4_0 - 1) / QK4_0) * (QK4_0 * sizeof(float) + sizeof(float)); return true; default: // GGML_ABORT("fatal error"); break; } return false; } bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_MUL_MAT: if (op->src[0]->type == GGML_TYPE_Q4_0 || // op->src[0]->type == GGML_TYPE_Q4_1 || // op->src[0]->type == GGML_TYPE_Q4_K) { forward_mul_mat_q4(params, op); return true; } default: // GGML_ABORT("fatal error"); break; } return false; } void forward_mul_mat_q4(ggml_compute_params * params, ggml_tensor * op) { const ggml_tensor * src0 = op->src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS int ith = params->ith; int nth = params->nth; [[maybe_unused]] const enum ggml_type type = src0->type; void * w_data = (void *) src0->data; const float * feature = (const float *) src1->data; float * output = (float *) dst->data; const size_t batch_feature = ne12 * ne13; [[maybe_unused]] const size_t batch_weight = ne02 * ne03; const size_t gemm_m = ne11; const size_t gemm_k = ne10; const size_t gemm_n = ne01; GGML_ASSERT(batch_weight == 1); const size_t block_count_k = div_round_up(gemm_k, QK4_0); const size_t per_gemm_workspace_size = gemm_m * block_count_k * q8_blk_size(QK4_0); const size_t per_gemm_workspace_stride = div_round_up(per_gemm_workspace_size, alignof(uint64_t)) * alignof(uint64_t); const size_t gemm_workspace_size = batch_feature * per_gemm_workspace_stride; const size_t desired_wsize = gemm_workspace_size + alignof(uint64_t) - 1; if (ith == 0 && params->wsize < desired_wsize) { throw std::runtime_error("wsize less than desired_wsize"); } std::vector qnbitgemm_args(batch_feature); for (size_t i = 0; i < batch_feature; i++) { qnbitgemm_args[i].a_ptr = feature + gemm_m * gemm_k * i; qnbitgemm_args[i].lda = gemm_k; qnbitgemm_args[i].packed_quant_b_data = (const std::byte *) w_data; qnbitgemm_args[i].quant_b_scale = nullptr; if constexpr (std::is_same_v) { qnbitgemm_args[i].quant_b_zp = nullptr; } else { qnbitgemm_args[i].quant_b_zp = w_data; } qnbitgemm_args[i].bias = nullptr; qnbitgemm_args[i].c_ptr = output + gemm_m * gemm_n * i; qnbitgemm_args[i].ldc = gemm_n; } const uintptr_t ws_ptr = reinterpret_cast(params->wdata); void * ws = reinterpret_cast((ws_ptr + alignof(uint64_t) - 1) & (~(alignof(uint64_t) - 1))); const size_t quant_a_stride = block_count_k * q8_blk_size(QK4_0); { constexpr size_t block_size_m = 4; size_t per_gemm_block_count_m = div_round_up(gemm_m, block_size_m); int32_t task_count = batch_feature * per_gemm_block_count_m; int32_t task_per_thread = (task_count + nth - 1) / nth; int32_t start = ith * task_per_thread; int32_t end = std::min((ith + 1) * task_per_thread, task_count); for (int32_t compute_idx = start; compute_idx < end; compute_idx++) { int32_t gemm_idx = compute_idx / per_gemm_block_count_m; int32_t block_idx_in_gemm = compute_idx % per_gemm_block_count_m; int32_t m_idx = block_idx_in_gemm * block_size_m; const qnbitgemm_spacemit_ime_args & data = qnbitgemm_args[gemm_idx]; int32_t rows_tobe_handled = (gemm_m - m_idx) > block_size_m ? block_size_m : (gemm_m - m_idx); if (rows_tobe_handled == block_size_m) { const float * a_row_ptr = data.a_ptr + m_idx * data.lda; std::byte * quant_a_row_ptr = static_cast(ws) + gemm_idx * per_gemm_workspace_stride + m_idx * quant_a_stride; sqnbitgemm_spacemit_ime::ime1::quantize_a_4row_i8(QK4_0, a_row_ptr, gemm_k, quant_a_row_ptr); } else { while (rows_tobe_handled) { const float * a_row_ptr = data.a_ptr + m_idx * data.lda; std::byte * quant_a_row_ptr = static_cast(ws) + gemm_idx * per_gemm_workspace_stride + m_idx * quant_a_stride; sqnbitgemm_spacemit_ime::ime1::quantize_a_row_i8(QK4_0, a_row_ptr, gemm_k, quant_a_row_ptr); rows_tobe_handled -= 1; m_idx += 1; } } } } ggml_barrier(params->threadpool); if (ith >= ggml::cpu::riscv64_spacemit::num_ai_cores) { return; } nth = std::min(nth, int{ ggml::cpu::riscv64_spacemit::num_ai_cores }); size_t threads_per_gemm = nth / batch_feature; constexpr size_t gemm_m_stride = 128; size_t nc = gemm_n; const size_t gemm_m_blocked = div_round_up(gemm_m, gemm_m_stride); const size_t max_nc = div_round_up(gemm_n * gemm_m_blocked, threads_per_gemm); if (max_nc < nc) { nc = std::min(nc, div_round_up(max_nc, QGEMM_STRIDEN_THREAD_ALIGN) * QGEMM_STRIDEN_THREAD_ALIGN); } const size_t gemm_n_stride = nc; const size_t thread_count_m = div_round_up(gemm_m, gemm_m_stride); const size_t thread_count_n = div_round_up(gemm_n, gemm_n_stride); threads_per_gemm = thread_count_m * thread_count_n; { int task_count = batch_feature * threads_per_gemm; int task_per_thread = (task_count + nth - 1) / nth; int start = ith * task_per_thread; int end = std::min((ith + 1) * task_per_thread, task_count); for (int compute_idx = start; compute_idx < end; compute_idx++) { const auto gemm_i = compute_idx / threads_per_gemm; const auto blk_i = compute_idx % threads_per_gemm; const auto * data = &qnbitgemm_args[gemm_i]; const auto tid_n = blk_i / thread_count_m; const auto tid_m = blk_i % thread_count_m; const size_t m_start = tid_m * gemm_m_stride; const size_t m_count = std::min(gemm_m - m_start, (size_t) gemm_m_stride); const size_t n_start = tid_n * gemm_n_stride; const size_t n_count = std::min(gemm_n - n_start, (size_t) gemm_n_stride); void * per_gemm_ws = reinterpret_cast(ws) + gemm_i * per_gemm_workspace_stride; sqnbitgemm_spacemit_ime_i8i4(QK4_0, gemm_k, data, per_gemm_ws, m_start, m_count, n_start, n_count); } } } int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), (int) NB_COLS, (int) INTER_SIZE); return ggml::cpu::riscv64_spacemit::repack(t, data, data_size); } }; class tensor_traits_common : public tensor_traits_base { bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { switch (op->op) { case GGML_OP_NORM: case GGML_OP_RMS_NORM: size = 0; return true; default: // GGML_ABORT("fatal error"); break; } return false; } bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_NORM: forward_norm_f32(params, op); return true; case GGML_OP_RMS_NORM: forward_rms_norm_f32(params, op); return true; default: // GGML_ABORT("fatal error"); break; } return false; } void forward_norm_f32(ggml_compute_params * params, ggml_tensor * op) { const ggml_tensor * src0 = op->src[0]; ggml_tensor * dst = op; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float epsilon; memcpy(&epsilon, dst->op_params, sizeof(float)); GGML_ASSERT(epsilon > 0.0f); auto * input = (float *) src0->data; auto * output = (float *) dst->data; const auto hidden_size = ne00; const auto task_count = ne01 * ne02 * ne03; const auto task_per_thread = (task_count + nth - 1) / nth; const auto task_begin = ith * task_per_thread; const auto task_end = std::min((ith + 1) * task_per_thread, task_count); for (auto task_idx = task_begin; task_idx < task_end; task_idx++) { auto offset = task_idx * hidden_size; auto * p_input = const_cast(input + offset); auto * p_output = output + offset; auto * p_temp_output = p_output; auto * p_gamma_data = (const float *) nullptr; auto * p_beta_data = (const float *) nullptr; size_t gvl = __riscv_vsetvlmax_e32m4(); vfloat32m4_t sum = __riscv_vfmv_v_f_f32m4(0.f, gvl); vfloat32m4_t sum_sq = __riscv_vfmv_v_f_f32m4(0.f, gvl); int64_t length = hidden_size; while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); // load data vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_input, gvl); sum = __riscv_vfadd_vv_f32m4(sum, src_data, gvl); sum_sq = __riscv_vfmacc_vv_f32m4(sum_sq, src_data, src_data, gvl); __riscv_vse32_v_f32m4(p_temp_output, src_data, gvl); p_input += gvl; p_temp_output += gvl; length -= gvl; } gvl = __riscv_vsetvlmax_e32m1(); float mean = 0.f; vfloat32m1_t zero_v = __riscv_vfmv_v_f_f32m1(0.f, gvl); vfloat32m1_t mean_v = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum, 0), __riscv_vget_v_f32m4_f32m1(sum, 1), gvl); mean_v = __riscv_vfadd_vv_f32m1(mean_v, __riscv_vget_v_f32m4_f32m1(sum, 2), gvl); mean_v = __riscv_vfadd_vv_f32m1(mean_v, __riscv_vget_v_f32m4_f32m1(sum, 3), gvl); mean_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_v, zero_v, gvl); mean = __riscv_vfmv_f_s_f32m1_f32(mean_v); mean /= hidden_size; vfloat32m1_t mean_square_v = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum_sq, 0), __riscv_vget_v_f32m4_f32m1(sum_sq, 1), gvl); mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 2), gvl); mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 3), gvl); mean_square_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_square_v, zero_v, gvl); float mean_square = __riscv_vfmv_f_s_f32m1_f32(mean_square_v); mean_square /= hidden_size; mean_square = sqrt(mean_square - mean * mean + epsilon); mean_square = 1.0f / mean_square; length = hidden_size; p_temp_output = p_output; if (p_gamma_data == nullptr && p_beta_data == nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; length -= gvl; } } else if (p_beta_data == nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl); src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl); __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; p_gamma_data += gvl; length -= gvl; } } else if (p_gamma_data != nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl); src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl); vfloat32m4_t beta_data_v = __riscv_vle32_v_f32m4(p_beta_data, gvl); src_data = __riscv_vfadd_vv_f32m4(src_data, beta_data_v, gvl); p_beta_data += gvl; __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; p_gamma_data += gvl; length -= gvl; } } } } void forward_rms_norm_f32(ggml_compute_params * params, ggml_tensor * op) { const ggml_tensor * src0 = op->src[0]; ggml_tensor * dst = op; GGML_ASSERT(ggml_are_same_shape(src0, dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS float epsilon; memcpy(&epsilon, dst->op_params, sizeof(float)); GGML_ASSERT(epsilon > 0.0f); auto * input = (float *) src0->data; auto * output = (float *) dst->data; const auto hidden_size = ne00; const auto task_count = ne01 * ne02 * ne03; const auto task_per_thread = (task_count + nth - 1) / nth; const auto task_begin = ith * task_per_thread; const auto task_end = std::min((ith + 1) * task_per_thread, task_count); for (auto task_idx = task_begin; task_idx < task_end; task_idx++) { auto offset = task_idx * hidden_size; auto * p_input = const_cast(input + offset); auto * p_output = output + offset; auto * p_temp_output = p_output; auto * p_gamma_data = (const float *) nullptr; auto * p_beta_data = (const float *) nullptr; size_t gvl = __riscv_vsetvlmax_e32m4(); // vfloat32m4_t sum = __riscv_vfmv_v_f_f32m4(0.f, gvl); vfloat32m4_t sum_sq = __riscv_vfmv_v_f_f32m4(0.f, gvl); int64_t length = hidden_size; while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); // load data vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_input, gvl); sum_sq = __riscv_vfmacc_vv_f32m4(sum_sq, src_data, src_data, gvl); __riscv_vse32_v_f32m4(p_temp_output, src_data, gvl); p_input += gvl; p_temp_output += gvl; length -= gvl; } gvl = __riscv_vsetvlmax_e32m1(); // float mean = 0.f; vfloat32m1_t zero_v = __riscv_vfmv_v_f_f32m1(0.f, gvl); vfloat32m1_t mean_square_v = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum_sq, 0), __riscv_vget_v_f32m4_f32m1(sum_sq, 1), gvl); mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 2), gvl); mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 3), gvl); mean_square_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_square_v, zero_v, gvl); float mean_square = __riscv_vfmv_f_s_f32m1_f32(mean_square_v); mean_square /= hidden_size; mean_square = sqrt(mean_square + epsilon); mean_square = 1.0f / mean_square; length = hidden_size; p_temp_output = p_output; if (p_gamma_data == nullptr && p_beta_data == nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; length -= gvl; } } else if (p_beta_data == nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl); __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; p_gamma_data += gvl; length -= gvl; } } else if (p_gamma_data != nullptr) { while (length > 0) { gvl = __riscv_vsetvl_e32m4(length); vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl); vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl); src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl); src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl); vfloat32m4_t beta_data_v = __riscv_vle32_v_f32m4(p_beta_data, gvl); src_data = __riscv_vfadd_vv_f32m4(src_data, beta_data_v, gvl); p_beta_data += gvl; __riscv_vse32_v_f32m4(p_output, src_data, gvl); p_temp_output += gvl; p_output += gvl; p_gamma_data += gvl; length -= gvl; } } } } int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { memcpy(t->data, data, data_size); return 0; } }; static const tensor_traits q4_0_16x8_q8_0; static const tensor_traits q4_1_16x8_q8_0; static const tensor_traits q4_k_16x8_q8_0; static const tensor_traits_common rvv_impl; } // namespace ggml::cpu::riscv64_spacemit static const ggml::cpu::tensor_traits * ggml_riscv64_spacemit_get_optimal_repack_type(const struct ggml_tensor * cur) { if (cur->type == GGML_TYPE_Q4_0) { if (cur->ne[1] % 16 == 0) { return &ggml::cpu::riscv64_spacemit::q4_0_16x8_q8_0; } } else if (cur->type == GGML_TYPE_Q4_1) { if (cur->ne[1] % 16 == 0) { return &ggml::cpu::riscv64_spacemit::q4_1_16x8_q8_0; } } else if (cur->type == GGML_TYPE_Q4_K) { if (cur->ne[1] % 16 == 0) { return &ggml::cpu::riscv64_spacemit::q4_k_16x8_q8_0; } } else if (cur->type == GGML_TYPE_F32) { return &ggml::cpu::riscv64_spacemit::rvv_impl; } return nullptr; } static enum ggml_status ggml_backend_riscv64_spacemit_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { tensor->extra = (void *) const_cast(ggml_riscv64_spacemit_get_optimal_repack_type(tensor)); GGML_UNUSED(buffer); return GGML_STATUS_SUCCESS; } static void ggml_backend_riscv64_spacemit_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor->extra; if (tensor_traits) { auto OK = tensor_traits->repack(tensor, data, size); GGML_ASSERT(OK == 0); } GGML_UNUSED(buffer); } static const char * ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "CPU_RISCV64_SPACEMIT"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); if (buffer == nullptr) { return nullptr; } buffer->buft = buft; buffer->iface.init_tensor = ggml_backend_riscv64_spacemit_buffer_init_tensor; buffer->iface.set_tensor = ggml_backend_riscv64_spacemit_buffer_set_tensor; buffer->iface.get_tensor = nullptr; buffer->iface.cpy_tensor = nullptr; return buffer; } static size_t ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 64; GGML_UNUSED(buft); } static size_t ggml_backend_cpu_riscv64_spacemit_nbytes(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { for (int i = 0; i < GGML_MAX_DIMS; ++i) { if (tensor->ne[i] <= 0) { return 0; } } size_t nbytes; const size_t blck_size = ggml_blck_size(tensor->type); if (blck_size == 1) { nbytes = ggml_type_size(tensor->type); for (int i = 0; i < GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1) * tensor->nb[i]; } } else { nbytes = tensor->ne[0] * tensor->nb[0] / blck_size; if (tensor->type == GGML_TYPE_Q4_K) { GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0); nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; for (int i = 1; i < GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1) * (tensor->nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; } } else { for (int i = 1; i < GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1) * tensor->nb[i]; } } } GGML_UNUSED(buft); return nbytes; } namespace ggml::cpu::riscv64_spacemit { class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_MUL_MAT: if (op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 2) && op->src[0]->buffer->buft == ggml_backend_cpu_riscv64_spacemit_buffer_type() && ggml_riscv64_spacemit_get_optimal_repack_type(op->src[0])) { if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } if (op->src[1]->type == GGML_TYPE_F32) { return true; } } break; case GGML_OP_NORM: case GGML_OP_RMS_NORM: if (op->src[0]->type == GGML_TYPE_F32) { return true; } break; default: // GGML_ABORT("fatal error"); break; } return false; } ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_MUL_MAT: if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_riscv64_spacemit_buffer_type()) { return (ggml::cpu::tensor_traits *) op->src[0]->extra; } break; case GGML_OP_NORM: case GGML_OP_RMS_NORM: return (ggml::cpu::tensor_traits *) (&ggml::cpu::riscv64_spacemit::rvv_impl); default: // GGML_ABORT("fatal error"); break; } return nullptr; } }; } // namespace ggml::cpu::riscv64_spacemit ggml_backend_buffer_type_t ggml_backend_cpu_riscv64_spacemit_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_riscv64_spacemit = { /* .iface = */ { /* .get_name = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment, /* .get_max_size = */ nullptr, /* .get_alloc_size = */ ggml_backend_cpu_riscv64_spacemit_nbytes, /* .is_host = */ nullptr, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), /* .context = */ new ggml::cpu::riscv64_spacemit::extra_buffer_type(), }; return &ggml_backend_cpu_buffer_type_riscv64_spacemit; } ggml-org-ggml-3678254/src/ggml-cpu/spacemit/ime.h000066400000000000000000000002741512524704700214210ustar00rootroot00000000000000#pragma once #include "ggml-alloc.h" #ifdef __cplusplus extern "C" { #endif ggml_backend_buffer_type_t ggml_backend_cpu_riscv64_spacemit_buffer_type(void); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/spacemit/ime1_kernels.cpp000066400000000000000000005312271512524704700235670ustar00rootroot00000000000000#include "ggml.h" #include "ime_kernels.h" #include #include // clang-format off #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #pragma GCC diagnostic ignored "-Wcast-qual" #pragma GCC diagnostic ignored "-Wunused-parameter" #endif // clang-format on namespace sqnbitgemm_spacemit_ime { #define QUANTIZEM4ROW_KERNEL \ "vmv.s.x v16, zero \n\t" \ "vfabs.v v8, v0 \n\t" \ "vfredmax.vs v16, v8, v16 \n\t" \ "vfmv.f.s f10, v16 \n\t" \ "fmul.s f10, f10, %[RMAXREC] \n\t" \ "fsw f10, (a1) \n\t" \ "fdiv.s f11, %[FONE], f10 \n\t" \ "vfmul.vf v16, v0, f11 \n\t" \ "vfcvt.x.f.v v16, v16 \n\t" \ "vsetvli t0, zero, e16, mf2 \n\t" \ "vnclip.wx v16, v16, zero \n\t" \ "vnclip.wx v17, v17, zero \n\t" \ "vnclip.wx v18, v18, zero \n\t" \ "vnclip.wx v19, v19, zero \n\t" \ "vnclip.wx v20, v20, zero \n\t" \ "vnclip.wx v21, v21, zero \n\t" \ "vnclip.wx v22, v22, zero \n\t" \ "vnclip.wx v23, v23, zero \n\t" \ "vsetvli t0, zero, e8, mf4 \n\t" \ "vnclip.wx v24, v16, zero \n\t" \ "vnclip.wx v25, v17, zero \n\t" \ "vnclip.wx v26, v18, zero \n\t" \ "vnclip.wx v27, v19, zero \n\t" \ "vnclip.wx v28, v20, zero \n\t" \ "vnclip.wx v29, v21, zero \n\t" \ "vnclip.wx v30, v22, zero \n\t" \ "vnclip.wx v31, v23, zero \n\t" #define QUANTIZEM4ROW_STORE \ "addi t1, %[BlkLen], 0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v24, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v25, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v26, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v27, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v28, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v29, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v30, (s1) \n\t" \ "addi s1, s1, 32 \n\t" \ "sub t1, t1, t0 \n\t" \ "vsetvli t0, t1, e8, mf4 \n\t" \ "vse8.v v31, (s1) \n\t" namespace ime1 { void quantize_a_4row_i8(size_t BlkLen, const float * A, size_t CountK, std::byte * QuantA) { constexpr float range_max_reciprocal = 1.0f / ((1 << 7) - 1); const float fone = 1.0f; if (BlkLen == 16 || BlkLen == 32 || BlkLen == 64) { for (size_t row_index = 0; row_index < 4; ++row_index) { const float * SRC = A + row_index * CountK; std::byte * DST = QuantA + row_index * sizeof(float); const size_t offset = (4 - row_index) * 4 + row_index * 8; const size_t stride = 4 * (sizeof(float) + BlkLen); __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "addi t2, %[CountK], 0 \n\t" "addi a1, %[DST], 0 \n\t" "blt t2, %[BlkLen], TAIL%= \n\t" "LOOP%=: \n\t" "vsetvli t0, %[BlkLen], e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "sub t2, t2, t0 \n\t" "slli t1, t0, 2 \n\t" "add %[SRC], %[SRC], t1 \n\t" "add s1, a1, %[OFFSET] \n\t" QUANTIZEM4ROW_KERNEL QUANTIZEM4ROW_STORE "add a1, a1, %[STRIDE] \n\t" "bge t2, %[BlkLen], LOOP%= \n\t" "TAIL%=: \n\t" "blez t2, QUIT%= \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v24, v24, v24 \n\t" "vsetvli t0, t2, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "add s1, a1, %[OFFSET] \n\t" QUANTIZEM4ROW_KERNEL "addi t3, %[BlkLen], 0 \n\t" "addi s2, s1, 0 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vxor.vv v8, v8, v8 \n\t" "SET_ZERO%=: \n\t" "vse8.v v8, (s2) \n\t" "addi s2, s2, 32 \n\t" "addi t3, t3, -8 \n\t" "bnez t3, SET_ZERO%= \n\t" QUANTIZEM4ROW_STORE "QUIT%=: \n\t" : [SRC] "+r"(SRC) : [DST] "r"(DST), [BlkLen] "r"(BlkLen), [OFFSET] "r"(offset), [STRIDE] "r"(stride), [CountK] "r"(CountK), [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal) : "cc", "t0", "t1", "t2", "t3", "a1", "s1", "s2", "f10", "f11"); } } else if (BlkLen == 128) { for (size_t row_index = 0; row_index < 4; ++row_index) { const float * SRC = A + row_index * CountK; std::byte * DST = QuantA + row_index * sizeof(float); const size_t offset = (4 - row_index) * 4 + row_index * 8; const size_t stride = 4 * (sizeof(float) + BlkLen); __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "li t6, 32 \n\t" "addi t2, %[CountK], 0 \n\t" "addi a1, %[DST], 0 \n\t" "add s1, a1, %[OFFSET] \n\t" "blt t2, %[BlkLen], TAIL%= \n\t" "LOOP%=: \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "addi t2, t2, -128 \n\t" "QUANTIZE%=: \n\t" "add s1, a1, %[OFFSET] \n\t" "vfabs.v v16, v0 \n\t" "vfabs.v v24, v8 \n\t" "vfmax.vv v16, v24, v16 \n\t" "vfredmax.vs v24, v16, v24 \n\t" "vfmv.f.s f10, v24 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (a1) \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vfmul.vf v16, v0, f11 \n\t" "vfmul.vf v24, v8, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v20, v24, zero \n\t" "vsetvli t0, zero, e8, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vsetvli t0, zero, e64, m4 \n\t" "vsse64.v v16, (s1), t6 \n\t" "add a1, a1, %[STRIDE] \n\t" "bge t2, %[BlkLen], LOOP%= \n\t" "TAIL%=: \n\t" "blez t2, QUIT%= \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vxor.vv v8, v8, v8 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v24, v24, v24 \n\t" "vsetvli t0, t2, e32, m8 \n\t" "sub t2, t2, t0 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vsetvli t0, t2, e32, m8 \n\t" "vle32.v v8, (%[SRC]) \n\t" "sub t2, t2, t2 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "jal x0, QUANTIZE%= \n\t" "QUIT%=: \n\t" : [SRC] "+r"(SRC) : [DST] "r"(DST), [BlkLen] "r"(BlkLen), [OFFSET] "r"(offset), [STRIDE] "r"(stride), [CountK] "r"(CountK), [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal) : "cc", "t0", "t1", "t2", "t6", "a1", "s1", "s2", "f10", "f11"); } } else if (BlkLen == 256) { for (size_t row_index = 0; row_index < 4; ++row_index) { const float * SRC = A + row_index * CountK; std::byte * DST = QuantA + row_index * sizeof(float); const size_t offset = (4 - row_index) * 4 + row_index * 8; const size_t stride = 4 * (sizeof(float) + BlkLen); __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "li t6, 32 \n\t" "addi t2, %[CountK], 0 \n\t" "addi a1, %[DST], 0 \n\t" "add s1, a1, %[OFFSET] \n\t" "blt t2, %[BlkLen], TAIL%= \n\t" "LOOP%=: \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v16, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v24, (%[SRC]) \n\t" "addi %[SRC], %[SRC], -768 \n\t" "addi t2, t2, -256 \n\t" "vfabs.v v0, v0 \n\t" "vfabs.v v8, v8 \n\t" "vfabs.v v16, v16 \n\t" "vfabs.v v24, v24 \n\t" "vfmax.vv v8, v0, v8 \n\t" "vfmax.vv v24, v24, v16 \n\t" "vfmax.vv v8, v8, v24 \n\t" "vfredmax.vs v24, v8, v24 \n\t" "vfmv.f.s f10, v24 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v16, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v24, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "QUANTIZE%=: \n\t" "add s1, a1, %[OFFSET] \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (a1) \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vfmul.vf v0, v0, f11 \n\t" "vfmul.vf v8, v8, f11 \n\t" "vfmul.vf v16, v16, f11 \n\t" "vfmul.vf v24, v24, f11 \n\t" "vfcvt.x.f.v v0, v0 \n\t" "vfcvt.x.f.v v8, v8 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v0, v0, zero \n\t" "vnclip.wx v4, v8, zero \n\t" "vnclip.wx v8, v16, zero \n\t" "vnclip.wx v12, v24, zero \n\t" "vsetvli t0, zero, e8, m4 \n\t" "vnclip.wx v0, v0, zero \n\t" "vnclip.wx v4, v8, zero \n\t" "vsetvli t0, zero, e64, m8 \n\t" "vsse64.v v0, (s1), t6 \n\t" "add a1, a1, %[STRIDE] \n\t" "bge t2, %[BlkLen], LOOP%= \n\t" "TAIL%=: \n\t" "blez t2, QUIT%= \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vxor.vv v8, v8, v8 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v24, v24, v24 \n\t" "addi t1, t2, 0 \n\t" "vsetvli t0, t1, e32, m8 \n\t" "sub t1, t1, t0 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vsetvli t0, t1, e32, m8 \n\t" "sub t1, t1, t0 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vsetvli t0, t1, e32, m8 \n\t" "sub t1, t1, t0 \n\t" "vle32.v v16, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vsetvli t0, t1, e32, m8 \n\t" "vle32.v v24, (%[SRC]) \n\t" "addi %[SRC], %[SRC], -768 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfabs.v v0, v0 \n\t" "vfabs.v v8, v8 \n\t" "vfabs.v v16, v16 \n\t" "vfabs.v v24, v24 \n\t" "vfmax.vv v8, v0, v8 \n\t" "vfmax.vv v24, v16, v24 \n\t" "vfmax.vv v8, v8, v24 \n\t" "vfredmax.vs v24, v8, v24 \n\t" "vfmv.f.s f10, v24 \n\t" "add s1, a1, %[OFFSET] \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (a1) \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vsetvli t0, zero, e64, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vsse64.v v0, (s1), t6 \n\t" "TAIL_LOOP%=: \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v0, v0, v0 \n\t" "vsetvli t0, t2, e32, m1 \n\t" "sub t2, t2, t0 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 32 \n\t" "vfmul.vf v1, v0, f11 \n\t" "vfcvt.x.f.v v2, v1 \n\t" "vsetvli t0, zero, e16, mf2 \n\t" "vnclip.wx v3, v2, zero \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vnclip.wx v3, v3, zero \n\t" "vse8.v v3, (s1) \n\t" "addi s1, s1, 32 \n\t" "bnez t2, TAIL_LOOP%= \n\t" "QUIT%=: \n\t" : [SRC] "+r"(SRC) : [DST] "r"(DST), [BlkLen] "r"(BlkLen), [OFFSET] "r"(offset), [STRIDE] "r"(stride), [CountK] "r"(CountK), [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal) : "cc", "t0", "t1", "t2", "t6", "a1", "s1", "s2", "f10", "f11"); } } } void quantize_a_row_i8(size_t BlkLen, const float * A, size_t CountK, std::byte * QuantA) { const float * SRC = A; std::byte * DST = QuantA; constexpr float range_max_reciprocal = 1.0f / ((1 << 7) - 1); const float fone = 1.0f; std::byte * QuantA_offset = QuantA + CountK + 4 * ((CountK + BlkLen - 1) / BlkLen); size_t offset = (CountK + BlkLen - 1) / BlkLen * BlkLen - CountK; if (CountK <= BlkLen) { float max_abs_A = 0.0f; for (size_t k = 0; k < CountK; k++) { max_abs_A = std::max(max_abs_A, fabsf(A[k])); } float scale_A = max_abs_A * range_max_reciprocal; ((float *) QuantA)[0] = scale_A; auto * QuantAData_offset = (int8_t *) (QuantA + sizeof(float)); for (size_t k = 0; k < CountK; k++) { QuantAData_offset[k] = (int8_t) std::clamp(roundf(A[k] / scale_A), (float) std::numeric_limits::lowest(), (float) std::numeric_limits::max()); } for (size_t k = CountK; k < BlkLen; k++) { QuantAData_offset[k] = 0; } return; } if (BlkLen != 32 || BlkLen != 64 || BlkLen != 128) { __asm__ volatile( "vsetvli t0, zero, e8, m8 \n\t" "vxor.vv v24, v24, v24 \n\t" "LOOP%=: \n\t" "vsetvli t0, %[CNT], e8, m8 \n\t" "vse8.v v24, (%[DST]) \n\t" "addi %[DST], %[DST], 128 \n\t" "sub %[CNT], %[CNT], t0 \n\t" "bnez %[CNT], LOOP%= \n\t" : [DST] "+r"(QuantA_offset), [CNT] "+r"(offset) : : "cc", "t0"); } if (BlkLen == 16) { float buffer[64] = { 0.0f }; __asm__ volatile( "addi t3, zero, 16*8 \n\t" "addi t2, zero, 16 \n\t" "blt %[K], t3, LOOP_K%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_MAIN%=: \n\t" "vsetvli t1, zero, e32, m2 \n\t" "addi %[K], %[K], -128 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v2, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v4, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v6, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v10, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v12, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "vle32.v v14, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "addi a1, %[BUFFER], 0 \n\t" "vfabs.v v16, v0 \n\t" "vfabs.v v18, v2 \n\t" "vfabs.v v20, v4 \n\t" "vfabs.v v22, v6 \n\t" "vfabs.v v24, v8 \n\t" "vfabs.v v26, v10 \n\t" "vfabs.v v28, v12 \n\t" "vfabs.v v30, v14 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vfmax.vv v18, v18, v19 \n\t" "vfmax.vv v20, v20, v21 \n\t" "vfmax.vv v22, v22, v23 \n\t" "vfmax.vv v24, v24, v25 \n\t" "vfmax.vv v26, v26, v27 \n\t" "vfmax.vv v28, v28, v29 \n\t" "vfmax.vv v30, v30, v31 \n\t" "vse32.v v16, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v18, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v20, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v22, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v24, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v26, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v28, (a1) \n\t" "addi a1, a1, 32 \n\t" "vse32.v v30, (a1) \n\t" "addi a1, %[BUFFER], 0 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f10, f3, f7 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f10, %[FONE], f10 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f11, f3, f7 \n\t" "fmul.s f11, f11, %[RMAXREC] \n\t" "fsw f11, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f11, %[FONE], f11 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f12, f3, f7 \n\t" "fmul.s f12, f12, %[RMAXREC] \n\t" "fsw f12, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f12, %[FONE], f12 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f13, f3, f7 \n\t" "fmul.s f13, f13, %[RMAXREC] \n\t" "fsw f13, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f13, %[FONE], f13 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f14, f3, f7 \n\t" "fmul.s f14, f14, %[RMAXREC] \n\t" "fsw f14, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f14, %[FONE], f14 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f15, f3, f7 \n\t" "fmul.s f15, f15, %[RMAXREC] \n\t" "fsw f15, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f15, %[FONE], f15 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f16, f3, f7 \n\t" "fmul.s f16, f16, %[RMAXREC] \n\t" "fsw f16, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "fdiv.s f16, %[FONE], f16 \n\t" "flw f0, (a1) \n\t" "flw f1, 4(a1) \n\t" "flw f2, 8(a1) \n\t" "flw f3, 12(a1) \n\t" "flw f4, 16(a1) \n\t" "flw f5, 20(a1) \n\t" "flw f6, 24(a1) \n\t" "flw f7, 28(a1) \n\t" "addi a1, a1, 32 \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f17, f3, f7 \n\t" "fmul.s f17, f17, %[RMAXREC] \n\t" "fsw f17, (%[DST]) \n\t" "addi %[DST], %[DST], -136 \n\t" "fdiv.s f17, %[FONE], f17 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmul.vf v16, v0, f10 \n\t" "vfmul.vf v18, v2, f11 \n\t" "vfmul.vf v20, v4, f12 \n\t" "vfmul.vf v22, v6, f13 \n\t" "vfmul.vf v24, v8, f14 \n\t" "vfmul.vf v26, v10, f15 \n\t" "vfmul.vf v28, v12, f16 \n\t" "vfmul.vf v30, v14, f17 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v18, v18 \n\t" "vfcvt.x.f.v v20, v20 \n\t" "vfcvt.x.f.v v22, v22 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vfcvt.x.f.v v26, v26 \n\t" "vfcvt.x.f.v v28, v28 \n\t" "vfcvt.x.f.v v30, v30 \n\t" "vsetvli t0, zero, e16, m1 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v18, v18, zero \n\t" "vnclip.wx v20, v20, zero \n\t" "vnclip.wx v22, v22, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vnclip.wx v26, v26, zero \n\t" "vnclip.wx v28, v28, zero \n\t" "vnclip.wx v30, v30, zero \n\t" "vsetvli t0, t1, e8, mf2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v18, v18, zero \n\t" "vnclip.wx v20, v20, zero \n\t" "vnclip.wx v22, v22, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vnclip.wx v26, v26, zero \n\t" "vnclip.wx v28, v28, zero \n\t" "vnclip.wx v30, v30, zero \n\t" "vse8.v v16, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v18, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v20, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v22, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v24, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v26, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v28, (%[DST]) \n\t" "addi %[DST], %[DST], 20 \n\t" "vse8.v v30, (%[DST]) \n\t" "addi %[DST], %[DST], 16 \n\t" "bge %[K], t3, LOOP_MAIN%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_K%=: \n\t" "vsetvli t1, %[K], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 64 \n\t" "sub %[K], %[K], t1 \n\t" "vfabs.v v16, v0 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vse32.v v16, (%[BUFFER]) \n\t" "flw f0, (%[BUFFER]) \n\t" "flw f1, 4(%[BUFFER]) \n\t" "flw f2, 8(%[BUFFER]) \n\t" "flw f3, 12(%[BUFFER]) \n\t" "flw f4, 16(%[BUFFER]) \n\t" "flw f5, 20(%[BUFFER]) \n\t" "flw f6, 24(%[BUFFER]) \n\t" "flw f7, 28(%[BUFFER]) \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f10, f3, f7 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (%[DST]) \n\t" "addi %[DST], %[DST], 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmul.vf v16, v0, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vsetvli t0, zero, e16, m1 \n\t" "vnclip.wx v16, v16, zero \n\t" "vsetvli t0, t1, e8, mf2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vse8.v v16, (%[DST]) \n\t" "addi %[DST], %[DST], 16 \n\t" "bge %[K], t2, LOOP_K%= \n\t" "TAIL%=: \n\t" "blez %[K], END%= \n\t" "vsetvli t0, t3, e32, m2 \n\t" "vxor.vv v16, v16, v16 \n\t" "jal x0, LOOP_K%= \n\t" "END%=: \n\t" : [SRC] "+r"(SRC), [DST] "+r"(DST), [K] "+r"(CountK) : [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal), [BUFFER] "r"(buffer) : "cc", "t3", "t2", "t1", "t0", "a1", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17"); } else if (BlkLen == 32) { __asm__ volatile( "addi t3, zero, 32*4 \n\t" "addi t2, zero, 32 \n\t" "addi a1, %[SRC], 0 \n\t" "addi a2, %[SRC], 128 \n\t" "addi a3, %[SRC], 256 \n\t" "addi a4, %[SRC], 384 \n\t" "addi s1, %[DST], 0 \n\t" "addi s2, %[DST], 36 \n\t" "addi s3, %[DST], 72 \n\t" "addi s4, %[DST], 108 \n\t" "blt %[K], t3, LOOP_K%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_MAIN%=: \n\t" "vsetvli t1, zero, e32, m4 \n\t" "addi %[K], %[K], -128 \n\t" "vle32.v v0, (a1) \n\t" "addi a1, a1, 512 \n\t" "vle32.v v4, (a2) \n\t" "addi a2, a2, 512 \n\t" "vle32.v v8, (a3) \n\t" "addi a3, a3, 512 \n\t" "vle32.v v12, (a4) \n\t" "addi a4, a4, 512 \n\t" "vfabs.v v16, v0 \n\t" "vfabs.v v20, v4 \n\t" "vfabs.v v24, v8 \n\t" "vfabs.v v28, v12 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v16, v16, v18 \n\t" "vfmax.vv v20, v20, v22 \n\t" "vfmax.vv v24, v24, v26 \n\t" "vfmax.vv v28, v28, v30 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vfmax.vv v20, v20, v21 \n\t" "vfmax.vv v24, v24, v25 \n\t" "vfmax.vv v28, v28, v29 \n\t" "vfredmax.vs v17, v16, v17 \n\t" "vfredmax.vs v21, v20, v21 \n\t" "vfredmax.vs v25, v24, v25 \n\t" "vfredmax.vs v29, v28, v29 \n\t" "vfmv.f.s f10, v17 \n\t" "vfmv.f.s f11, v21 \n\t" "vfmv.f.s f12, v25 \n\t" "vfmv.f.s f13, v29 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fmul.s f11, f11, %[RMAXREC] \n\t" "fmul.s f12, f12, %[RMAXREC] \n\t" "fmul.s f13, f13, %[RMAXREC] \n\t" "fsw f10, (s1) \n\t" "addi s1, s1, 4 \n\t" "fsw f11, (s2) \n\t" "addi s2, s2, 4 \n\t" "fsw f12, (s3) \n\t" "addi s3, s3, 4 \n\t" "fsw f13, (s4) \n\t" "addi s4, s4, 4 \n\t" "fdiv.s f10, %[FONE], f10 \n\t" "fdiv.s f11, %[FONE], f11 \n\t" "fdiv.s f12, %[FONE], f12 \n\t" "fdiv.s f13, %[FONE], f13 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmul.vf v16, v0, f10 \n\t" "vfmul.vf v20, v4, f11 \n\t" "vfmul.vf v24, v8, f12 \n\t" "vfmul.vf v28, v12, f13 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v20, v20 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vfcvt.x.f.v v28, v28 \n\t" "vsetvli t0, zero, e16, m2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v20, v20, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vnclip.wx v28, v28, zero \n\t" "vsetvli t0, t1, e8, m1 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v20, v20, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vnclip.wx v28, v28, zero \n\t" "vse8.v v16, (s1) \n\t" "addi s1, s1, 140 \n\t" "vse8.v v20, (s2) \n\t" "addi s2, s2, 140 \n\t" "vse8.v v24, (s3) \n\t" "addi s3, s3, 140 \n\t" "vse8.v v28, (s4) \n\t" "addi s4, s4, 140 \n\t" "bge %[K], t3, LOOP_MAIN%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_K%=: \n\t" "vsetvli t1, %[K], e32, m4 \n\t" "vle32.v v0, (a1) \n\t" "addi a1, a1, 128 \n\t" "sub %[K], %[K], t1 \n\t" "vfabs.v v16, v0 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v16, v16, v18 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vfredmax.vs v17, v16, v17 \n\t" "vfmv.f.s f10, v17 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (s1) \n\t" "addi s1, s1, 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmul.vf v16, v0, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vsetvli t0, zero, e16, m2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vsetvli t0, zero, e8, m1 \n\t" "vnclip.wx v16, v16, zero \n\t" "vse8.v v16, (s1) \n\t" "addi s1, s1, 32 \n\t" "bge %[K], t2, LOOP_K%= \n\t" "TAIL%=: \n\t" "blez %[K], END%= \n\t" "vsetvli t0, t3, e32, m4 \n\t" "vxor.vv v0, v0, v0 \n\t" "vxor.vv v16, v16, v16 \n\t" "jal x0, LOOP_K%= \n\t" "END%=: \n\t" : [K] "+r"(CountK) : [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal), [SRC] "r"(SRC), [DST] "r"(DST) : "cc", "t3", "t2", "t1", "t0", "a1", "a2", "a3", "a4", "s1", "s2", "s3", "s4", "f10", "f11", "f12", "f13"); } else if (BlkLen == 64) { __asm__ volatile( "addi t3, zero, 64*2 \n\t" "addi t2, zero, 64 \n\t" "addi a1, %[SRC], 0 \n\t" "addi a2, %[SRC], 256 \n\t" "addi s1, %[DST], 0 \n\t" "addi s2, %[DST], 68 \n\t" "blt %[K], t3, LOOP_K%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_MAIN%=: \n\t" "vsetvli t1, zero, e32, m8 \n\t" "addi %[K], %[K], -128 \n\t" "vle32.v v0, (a1) \n\t" "addi a1, a1, 512 \n\t" "vle32.v v8, (a2) \n\t" "addi a2, a2, 512 \n\t" "vfabs.v v16, v0 \n\t" "vfabs.v v24, v8 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmax.vv v16, v16, v20 \n\t" "vfmax.vv v24, v24, v28 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v16, v16, v18 \n\t" "vfmax.vv v24, v24, v26 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vfmax.vv v24, v24, v25 \n\t" "vfredmax.vs v17, v16, v17 \n\t" "vfredmax.vs v25, v24, v25 \n\t" "vfmv.f.s f10, v17 \n\t" "vfmv.f.s f11, v25 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fmul.s f11, f11, %[RMAXREC] \n\t" "fsw f10, (s1) \n\t" "addi s1, s1, 4 \n\t" "fsw f11, (s2) \n\t" "addi s2, s2, 4 \n\t" "fdiv.s f10, %[FONE], f10 \n\t" "fdiv.s f11, %[FONE], f11 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfmul.vf v16, v0, f10 \n\t" "vfmul.vf v24, v8, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vsetvli t0, t1, e8, m2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v24, v24, zero \n\t" "vse8.v v16, (s1) \n\t" "addi s1, s1, 132 \n\t" "vse8.v v24, (s2) \n\t" "addi s2, s2, 132 \n\t" "bge %[K], t3, LOOP_MAIN%= \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_K%=: \n\t" "vsetvli t1, %[K], e32, m8 \n\t" "vle32.v v0, (a1) \n\t" "addi a1, a1, 256 \n\t" "sub %[K], %[K], t1 \n\t" "vfabs.v v16, v0 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmax.vv v16, v16, v20 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v16, v16, v18 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v16, v16, v17 \n\t" "vfredmax.vs v17, v16, v17 \n\t" "vfmv.f.s f10, v17 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (s1) \n\t" "addi s1, s1, 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfmul.vf v16, v0, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vsetvli t0, zero, e8, m2 \n\t" "vnclip.wx v16, v16, zero \n\t" "vse8.v v16, (s1) \n\t" "addi s1, s1, 64 \n\t" "bge %[K], t2, LOOP_K%= \n\t" "TAIL%=: \n\t" "blez %[K], END%= \n\t" "vsetvli t0, t3, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vxor.vv v16, v16, v16 \n\t" "jal x0, LOOP_K%= \n\t" "END%=: \n\t" : [K] "+r"(CountK) : [SRC] "r"(SRC), [DST] "r"(DST), [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal) : "cc", "t3", "t2", "t1", "t0", "a1", "a2", "s1", "s2", "f10", "f11"); } else if (BlkLen == 128) { __asm__ volatile( "addi t2, zero, 128 \n\t" "addi a1, %[SRC], 0 \n\t" "addi a2, %[SRC], 256 \n\t" "blt %[K], t2, TAIL%= \n\t" "LOOP_K%=: \n\t" "vsetvli t1, zero, e32, m8 \n\t" "vle32.v v0, (a1) \n\t" "addi a1, a1, 512 \n\t" "vle32.v v8, (a2) \n\t" "addi a2, a2, 512 \n\t" "sub %[K], %[K], t2 \n\t" "QUANT%=: \n\t" "vfabs.v v16, v0 \n\t" "vfabs.v v24, v8 \n\t" "vfmax.vv v24, v16, v24 \n\t" "vsetvli t1, zero, e32, m4 \n\t" "vfmax.vv v28, v24, v28 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v30, v28, v30 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v30, v30, v31 \n\t" "vfredmax.vs v31, v30, v31 \n\t" "vfmv.f.s f10, v31 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (%[DST]) \n\t" "addi %[DST], %[DST], 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfmul.vf v16, v0, f11 \n\t" "vfmul.vf v24, v8, f11 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vnclip.wx v20, v24, zero \n\t" "vsetvli t0, zero, e8, m4 \n\t" "vnclip.wx v16, v16, zero \n\t" "vse8.v v16, (%[DST]) \n\t" "addi %[DST], %[DST], 128 \n\t" "bge %[K], t2, LOOP_K%= \n\t" "TAIL%=: \n\t" "blez %[K], END%= \n\t" "vsetvli t1, zero, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vxor.vv v8, v8, v8 \n\t" "vsetvli t0, %[K], e32, m8 \n\t" "vle32.v v0, (a1) \n\t" "sub %[K], %[K], t0 \n\t" "vsetvli t0, %[K], e32, m8 \n\t" "vle32.v v8, (a2) \n\t" "sub %[K], %[K], t0 \n\t" "vsetvli t1, zero, e32, m8 \n\t" "jal x0, QUANT%= \n\t" "END%=: \n\t" : [DST] "+r"(DST), [K] "+r"(CountK) : [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal), [SRC] "r"(SRC) : "cc", "t2", "t1", "t0", "a1", "a2", "f10", "f11"); } else { float buffer[8] = { 0.0f }; size_t cnt = BlkLen / 256; __asm__ volatile( "slli t3, %[BLK], 2 \n\t" "blt %[K], %[BLK], LOOP_TAIL%= \n\t" "LOOP_MAIN%=: \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vxor.vv v31, v31, v31 \n\t" "vse32.v v31, (%[BUFFER]) \n\t" "addi t6, %[CNT], 0 \n\t" "LOOP_CMP%=: \n\t" "addi t6, t6, -1 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v16, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v24, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vfabs.v v0, v0 \n\t" "vfabs.v v8, v8 \n\t" "vfabs.v v16, v16 \n\t" "vfabs.v v24, v24 \n\t" "vfmax.vv v8, v0, v8 \n\t" "vfmax.vv v16, v16, v24 \n\t" "vfmax.vv v0, v0, v16 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmax.vv v0, v0, v4 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v0, v0, v2 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v0, v0, v1 \n\t" "vle32.v v30, (%[BUFFER]) \n\t" "vfmax.vv v31, v30, v0 \n\t" "vse32.v v31, (%[BUFFER]) \n\t" "bnez t6, LOOP_CMP%= \n\t" "sub %[SRC], %[SRC], t3 \n\t" "addi t6, %[CNT], 0 \n\t" "flw f0, (%[BUFFER]) \n\t" "flw f1, 4(%[BUFFER]) \n\t" "flw f2, 8(%[BUFFER]) \n\t" "flw f3, 12(%[BUFFER]) \n\t" "flw f4, 16(%[BUFFER]) \n\t" "flw f5, 20(%[BUFFER]) \n\t" "flw f6, 24(%[BUFFER]) \n\t" "flw f7, 28(%[BUFFER]) \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f10, f3, f7 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (%[DST]) \n\t" "addi %[DST], %[DST], 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "addi t6, %[CNT], 0 \n\t" "LOOP_QUANT%=: \n\t" "addi t6, t6, -1 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v8, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v16, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vle32.v v24, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfmul.vf v0, v0, f11 \n\t" "vfmul.vf v8, v8, f11 \n\t" "vfmul.vf v16, v16, f11 \n\t" "vfmul.vf v24, v24, f11 \n\t" "vfcvt.x.f.v v0, v0 \n\t" "vfcvt.x.f.v v8, v8 \n\t" "vfcvt.x.f.v v16, v16 \n\t" "vfcvt.x.f.v v24, v24 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v0, v0, zero \n\t" "vnclip.wx v4, v8, zero \n\t" "vnclip.wx v8, v16, zero \n\t" "vnclip.wx v12, v24, zero \n\t" "vsetvli t0, zero, e8, m4 \n\t" "vnclip.wx v0, v0, zero \n\t" "vnclip.wx v4, v8, zero \n\t" "vse8.v v0, (%[DST]) \n\t" "addi %[DST], %[DST], 128 \n\t" "vse8.v v4, (%[DST]) \n\t" "addi %[DST], %[DST], 128 \n\t" "bnez t6, LOOP_QUANT%= \n\t" "sub %[K], %[K], %[BLK] \n\t" "bge %[K], %[BLK], LOOP_MAIN%= \n\t" "blez %[K], END%= \n\t" "LOOP_TAIL%=: \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vxor.vv v31, v31, v31 \n\t" "vse32.v v31, (%[BUFFER]) \n\t" "addi t6, %[K], 0 \n\t" "addi s1, %[SRC], 0 \n\t" "TAIL_CMP%=: \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vsetvli t0, t6, e32, m8 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi %[SRC], %[SRC], 256 \n\t" "sub t6, t6, t0 \n\t" "vfabs.v v0, v0 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vfmax.vv v0, v0, v4 \n\t" "vsetvli t0, zero, e32, m2 \n\t" "vfmax.vv v0, v0, v2 \n\t" "vsetvli t0, zero, e32, m1 \n\t" "vfmax.vv v0, v0, v1 \n\t" "vle32.v v30, (%[BUFFER]) \n\t" "vfmax.vv v31, v30, v0 \n\t" "vse32.v v31, (%[BUFFER]) \n\t" "bnez t6, TAIL_CMP%= \n\t" "addi t6, %[K], 0 \n\t" "flw f0, (%[BUFFER]) \n\t" "flw f1, 4(%[BUFFER]) \n\t" "flw f2, 8(%[BUFFER]) \n\t" "flw f3, 12(%[BUFFER]) \n\t" "flw f4, 16(%[BUFFER]) \n\t" "flw f5, 20(%[BUFFER]) \n\t" "flw f6, 24(%[BUFFER]) \n\t" "flw f7, 28(%[BUFFER]) \n\t" "fmax.s f1, f0, f1 \n\t" "fmax.s f3, f2, f3 \n\t" "fmax.s f5, f4, f5 \n\t" "fmax.s f7, f6, f7 \n\t" "fmax.s f3, f1, f3 \n\t" "fmax.s f7, f5, f7 \n\t" "fmax.s f10, f3, f7 \n\t" "fmul.s f10, f10, %[RMAXREC] \n\t" "fsw f10, (%[DST]) \n\t" "addi %[DST], %[DST], 4 \n\t" "fdiv.s f11, %[FONE], f10 \n\t" "addi t6, %[K], 0 \n\t" "TAIL_QUANT%=: \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v0, v0, v0 \n\t" "vsetvli t1, t6, e32, m8 \n\t" "vle32.v v0, (s1) \n\t" "addi s1, s1, 256 \n\t" "sub t6, t6, t1 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vfmul.vf v0, v0, f11 \n\t" "vfcvt.x.f.v v0, v0 \n\t" "vsetvli t0, zero, e16, m4 \n\t" "vnclip.wx v0, v0, zero \n\t" "vsetvli t0, t1, e8, m2 \n\t" "vnclip.wx v0, v0, zero \n\t" "vse8.v v0, (%[DST]) \n\t" "addi %[DST], %[DST], 64 \n\t" "bnez t6, TAIL_QUANT%= \n\t" "END%=: \n\t" : [SRC] "+r"(SRC), [DST] "+r"(DST), [K] "+r"(CountK) : [FONE] "f"(fone), [RMAXREC] "f"(range_max_reciprocal), [BLK] "r"(BlkLen), [BUFFER] "r"(buffer), [CNT] "r"(cnt) : "cc", "t1", "t0", "t6", "s1", "f0", "f1", "f2", "f3", "f4", "f5", "f6"); } } } // namespace ime1 namespace { #define SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 \ "vmadot v16, v14, v0 \n\t" \ "vmadot v18, v14, v1 \n\t" \ "vmadot v20, v14, v2 \n\t" \ "vmadot v22, v14, v3 \n\t" \ "vmadot v16, v15, v4 \n\t" \ "vmadot v18, v15, v5 \n\t" \ "vmadot v20, v15, v6 \n\t" \ "vmadot v22, v15, v7 \n\t" #define SQ4BIT_KERNEL_ACC_1X4X4 \ "vfcvt.f.x.v v16, v16 \n\t" \ "vfcvt.f.x.v v18, v18 \n\t" \ "vfcvt.f.x.v v20, v20 \n\t" \ "vfcvt.f.x.v v22, v22 \n\t" \ "addi s2, s1, 16 \n\t" \ "addi s3, s1, 32 \n\t" \ "addi s4, s1, 48 \n\t" \ "addi s6, s5, 12 \n\t" \ "vfmacc.vv v28, v16, v24 \n\t" \ "vfmacc.vv v29, v18, v25 \n\t" \ "vfmacc.vv v30, v20, v26 \n\t" \ "vfmacc.vv v31, v22, v27 \n\t" #define SQ4BIT_KERNEL_ACC_F16_1X4X4 \ "vfcvt.f.x.v v16, v16 \n\t" \ "vfcvt.f.x.v v18, v18 \n\t" \ "vfcvt.f.x.v v20, v20 \n\t" \ "vfcvt.f.x.v v22, v22 \n\t" \ "addi s2, s1, 8 \n\t" \ "addi s3, s1, 16 \n\t" \ "addi s4, s1, 24 \n\t" \ "addi s6, s5, 12 \n\t" \ "vfmacc.vv v28, v16, v24 \n\t" \ "vfmacc.vv v29, v18, v25 \n\t" \ "vfmacc.vv v30, v20, v26 \n\t" \ "vfmacc.vv v31, v22, v27 \n\t" #define SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 \ "vle8.v v4, (s1) \n\t" \ "addi s1, s1, 128 \n\t" \ "vle8.v v5, (s2) \n\t" \ "addi s2, s2, 128 \n\t" \ "vle8.v v6, (s3) \n\t" \ "addi s3, s3, 128 \n\t" \ "vle8.v v7, (s4) \n\t" \ "addi s4, s4, 128 \n\t" \ "vsetvli t0, zero, e8, mf4 \n\t" \ "vle8.v v14, (s5) \n\t" \ "addi s5, s5, 16 \n\t" \ "vle8.v v15, (s6) \n\t" \ "addi s6, s6, 16 \n\t" \ "addi t5, t5, -1 \n\t" \ "vsetvli t0, zero, e8, m1 \n\t" \ "vand.vi v0, v4, 15 \n\t" \ "vand.vi v1, v5, 15 \n\t" \ "vand.vi v2, v6, 15 \n\t" \ "vand.vi v3, v7, 15 \n\t" \ "vsrl.vi v4, v4, 4 \n\t" \ "vsrl.vi v5, v5, 4 \n\t" \ "vsrl.vi v6, v6, 4 \n\t" \ "vsrl.vi v7, v7, 4 \n\t" #define SQ4BIT_KERNEL_LOAD_ZP_16X1 \ "vsetvli t0, zero, e8, mf2 \n\t" \ "vle8.v v1, (s7) \n\t" \ "vsetvli t0, zero, e8, m1 \n\t" \ "vrgather.vv v8, v1, v13 \n\t" \ "vadd.vi v13, v13, 4 \n\t" \ "vrgather.vv v9, v1, v13 \n\t" \ "vadd.vi v13, v13, 4 \n\t" \ "vrgather.vv v10, v1, v13 \n\t" \ "vadd.vi v13, v13, 4 \n\t" \ "vrgather.vv v11, v1, v13 \n\t" \ "vadd.vi v13, v13, -12 \n\t" // using for M4Kernel #define LOAD_B_16x8x2 \ "vsetvli t0, zero, e8, m1 \n\t" \ "vle8.v v6, (s1) \n\t" \ "addi s1, s1, 32*4 \n\t" \ "vle8.v v7, (s2) \n\t" \ "addi s2, s2, 32*4 \n\t" \ "vle8.v v8, (s3) \n\t" \ "addi s3, s3, 32*4 \n\t" \ "vle8.v v9, (s4) \n\t" \ "addi s4, s4, 32*4 \n\t" \ \ "vand.vi v2, v6, 15 \n\t" \ "vand.vi v3, v7, 15 \n\t" \ "vand.vi v4, v8, 15 \n\t" \ "vand.vi v5, v9, 15 \n\t" \ \ "vsrl.vi v6, v6, 4 \n\t" \ "vsrl.vi v7, v7, 4 \n\t" \ "vsrl.vi v8, v8, 4 \n\t" \ "vsrl.vi v9, v9, 4 \n\t" // [s2|s5, s3, s4, s6] #define LOAD_SCALE_4x16_FP16 \ "addi s2, s5, -8 \n\t" \ "addi s3, s5, 8 \n\t" \ "addi s4, s5, 16 \n\t" \ "addi s6, s5, 24 \n\t" \ "li t1, 0xf0 \n\t" \ "vmv.s.x v0, t1 \n\t" \ "vsetvli t0, zero, e16, mf4 \n\t" \ "vle16.v v9, (s5) \n\t" \ "vle16.v v11, (s3) \n\t" \ "vle16.v v13, (s4) \n\t" \ "vle16.v v15, (s6) \n\t" \ "vsetvli t0, zero, e16, mf2 \n\t" \ "vle16.v v9, (s2), v0.t \n\t" \ "vle16.v v11, (s5), v0.t \n\t" \ "vle16.v v13, (s3), v0.t \n\t" \ "vle16.v v15, (s4), v0.t \n\t" \ "vfwcvt.f.f.v v8, v9 \n\t" \ "vfwcvt.f.f.v v10, v11 \n\t" \ "vfwcvt.f.f.v v12, v13 \n\t" \ "vfwcvt.f.f.v v14, v15 \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ "vmv.v.v v9, v8 \n\t" \ "vmv.v.v v11, v10 \n\t" \ "vmv.v.v v13, v12 \n\t" \ "vmv.v.v v15, v14 \n\t" \ "li t1, 0xf0 \n\t" \ "vmv.s.x v0, t1 \n\t" \ "vsetvli t0, zero, e32, mf2 \n\t" \ "vfmul.vf v8, v8, f1 \n\t" \ "vfmul.vf v10, v10, f1 \n\t" \ "vfmul.vf v12, v12, f1 \n\t" \ "vfmul.vf v14, v14, f1 \n\t" \ "vfmul.vf v9, v9, f3 \n\t" \ "vfmul.vf v11, v11, f3 \n\t" \ "vfmul.vf v13, v13, f3 \n\t" \ "vfmul.vf v15, v15, f3 \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ "vfmul.vf v8, v8, f2, v0.t \n\t" \ "vfmul.vf v10, v10, f2, v0.t \n\t" \ "vfmul.vf v12, v12, f2, v0.t \n\t" \ "vfmul.vf v14, v14, f2, v0.t \n\t" \ "vfmul.vf v9, v9, f4, v0.t \n\t" \ "vfmul.vf v11, v11, f4, v0.t \n\t" \ "vfmul.vf v13, v13, f4, v0.t \n\t" \ "vfmul.vf v15, v15, f4, v0.t \n\t" // [s2|s5, s3, s4, s6] #define LOAD_SCALE_4x16 \ "addi s2, s5, -16 \n\t" \ "addi s3, s5, 16 \n\t" \ "addi s4, s5, 32 \n\t" \ "addi s6, s5, 48 \n\t" \ "li t1, 0xf0 \n\t" \ "vmv.s.x v0, t1 \n\t" \ "vsetvli t0, zero, e32, mf2 \n\t" \ "vle32.v v8, (s5) \n\t" \ "vle32.v v10, (s3) \n\t" \ "vle32.v v12, (s4) \n\t" \ "vle32.v v14, (s6) \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ "vle32.v v8, (s2), v0.t \n\t" \ "vle32.v v10, (s5), v0.t \n\t" \ "vle32.v v12, (s3), v0.t \n\t" \ "vle32.v v14, (s4), v0.t \n\t" \ "vmv.v.v v9, v8 \n\t" \ "vmv.v.v v11, v10 \n\t" \ "vmv.v.v v13, v12 \n\t" \ "vmv.v.v v15, v14 \n\t" \ "vsetvli t0, zero, e32, mf2 \n\t" \ "vfmul.vf v8, v8, f1 \n\t" \ "vfmul.vf v10, v10, f1 \n\t" \ "vfmul.vf v12, v12, f1 \n\t" \ "vfmul.vf v14, v14, f1 \n\t" \ "vfmul.vf v9, v9, f3 \n\t" \ "vfmul.vf v11, v11, f3 \n\t" \ "vfmul.vf v13, v13, f3 \n\t" \ "vfmul.vf v15, v15, f3 \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ "vfmul.vf v8, v8, f2, v0.t \n\t" \ "vfmul.vf v10, v10, f2, v0.t \n\t" \ "vfmul.vf v12, v12, f2, v0.t \n\t" \ "vfmul.vf v14, v14, f2, v0.t \n\t" \ "vfmul.vf v9, v9, f4, v0.t \n\t" \ "vfmul.vf v11, v11, f4, v0.t \n\t" \ "vfmul.vf v13, v13, f4, v0.t \n\t" \ "vfmul.vf v15, v15, f4, v0.t \n\t" //[s1| BIAS, s2, s3, s4] #define LOAD_BIAS \ "vsetvli t0, zero, e32, mf2 \n\t" \ "li t1, 0xf0 \n\t" \ "vmv.s.x v0, t1 \n\t" \ "addi s1, %[BIAS], -16 \n\t" \ "addi s2, %[BIAS], 16 \n\t" \ "addi s3, %[BIAS], 32 \n\t" \ "addi s4, %[BIAS], 48 \n\t" \ \ "vle32.v v24, (%[BIAS]) \n\t" \ "vle32.v v26, (s2) \n\t" \ "vle32.v v28, (s3) \n\t" \ "vle32.v v30, (s4) \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ "vle32.v v24, (s1), v0.t \n\t" \ "vle32.v v26, (%[BIAS]), v0.t \n\t" \ "vle32.v v28, (s2), v0.t \n\t" \ "vle32.v v30, (s3), v0.t \n\t" \ "vmv.v.v v25, v24 \n\t" \ "vmv.v.v v27, v26 \n\t" \ "vmv.v.v v29, v28 \n\t" \ "vmv.v.v v31, v30 \n\t" #define SQ4BIT_KERNEL_COMP_4x16x16 \ "vmadot v16, v10, v2 \n\t" \ "vmadot v18, v10, v3 \n\t" \ "vmadot v20, v10, v4 \n\t" \ "vmadot v22, v10, v5 \n\t" \ "vmadot v16, v11, v6 \n\t" \ "vmadot v18, v11, v7 \n\t" \ "vmadot v20, v11, v8 \n\t" \ "vmadot v22, v11, v9 \n\t" #define SAVE_RESULT_4x16 \ "addi a1, %[C], 0 \n\t" \ "add a2, %[C], %[LDC] \n\t" \ "add a3, a2, %[LDC] \n\t" \ "add a4, a3, %[LDC] \n\t" \ "addi a2, a2, -16 \n\t" \ "addi a4, a4, -16 \n\t" \ "li t1, 0xf0 \n\t" \ "vmv.s.x v0, t1 \n\t" \ "vsetvli t0, zero, e32, mf2 \n\t" \ \ "vse32.v v24, (a1) \n\t" \ "addi a1, a1, 16 \n\t" \ "vse32.v v25, (a3) \n\t" \ "addi a3, a3, 16 \n\t" \ \ "vse32.v v26, (a1) \n\t" \ "addi a1, a1, 16 \n\t" \ "vse32.v v27, (a3) \n\t" \ "addi a3, a3, 16 \n\t" \ \ "vse32.v v28, (a1) \n\t" \ "addi a1, a1, 16 \n\t" \ "vse32.v v29, (a3) \n\t" \ "addi a3, a3, 16 \n\t" \ \ "vse32.v v30, (a1) \n\t" \ "vse32.v v31, (a3) \n\t" \ "vsetvli t0, zero, e32, m1 \n\t" \ \ "vse32.v v24, (a2), v0.t \n\t" \ "addi a2, a2, 16 \n\t" \ "vse32.v v25, (a4), v0.t \n\t" \ "addi a4, a4, 16 \n\t" \ \ "vse32.v v26, (a2), v0.t \n\t" \ "addi a2, a2, 16 \n\t" \ "vse32.v v27, (a4), v0.t \n\t" \ "addi a4, a4, 16 \n\t" \ \ "vse32.v v28, (a2), v0.t \n\t" \ "addi a2, a2, 16 \n\t" \ "vse32.v v29, (a4), v0.t \n\t" \ "addi a4, a4, 16 \n\t" \ \ "vse32.v v30, (a2), v0.t \n\t" \ "vse32.v v31, (a4), v0.t \n\t" #define SQ4BIT_KERNEL_LOAD_ZP_16X1_v2 \ "vsetvli t0, zero, e8, mf2 \n\t" \ "vle8.v v11, (s6) \n\t" \ "vsetvli t0, zero, e8, m1 \n\t" \ "vrgather.vv v12, v11, v1 \n\t" \ "vadd.vi v1, v1, 4 \n\t" \ "vrgather.vv v13, v11, v1 \n\t" \ "vadd.vi v1, v1, 4 \n\t" \ "vrgather.vv v14, v11, v1 \n\t" \ "vadd.vi v1, v1, 4 \n\t" \ "vrgather.vv v15, v11, v1 \n\t" \ "vadd.vi v1, v1, -12 \n\t" template void SQ4BitGemmM4Kernel_CompInt8_ScaleFp16_Impl(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountN, size_t BlockCountK, const float * Bias, const size_t ldc) { GGML_UNUSED(QuantBScale); GGML_UNUSED(QuantBZeroPoint); size_t LDC = ldc * sizeof(float); const size_t INNER = BlkLen / 16; float tmp[4 * 16]; if constexpr (HasZeroPoint) { for (size_t n = 0; n < CountN; n += 16) { size_t NBLKS = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(uint8_t) + // zp n * BlockCountK * sizeof(_Float16); // scale float * CPtr = C + n; if (NBLKS < 16) { CPtr = tmp; LDC = 16 * sizeof(float); } if (Bias != nullptr) { const float * bias = Bias + n; if (NBLKS < 16) { __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "vse32.v v0, (%[DST]) \n\t" : : [SRC] "r"(bias), [DST] "r"(tmp), [N] "r"(NBLKS) : "cc", "t0"); bias = tmp; } __asm__ volatile(LOAD_BIAS "addi t3, %[BlockCountK], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "li s1, 24 \n\t" "vmv.v.i v1, 3 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v1, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v1, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v1, 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" // scale offset "addi s5, s1, 0 \n\t" // zp offset "addi s6, s1, 32 \n\t" "addi s1, s6, 16 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1_v2 "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vsub.vv v2, v2, v12 \n\t" "vsub.vv v6, v6, v12 \n\t" "vsub.vv v3, v3, v13 \n\t" "vsub.vv v7, v7, v13 \n\t" "vsub.vv v4, v4, v14 \n\t" "vsub.vv v8, v8, v14 \n\t" "vsub.vv v5, v5, v15 \n\t" "vsub.vv v9, v9, v15 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16_FP16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr), [BIAS] "r"(bias) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v24, v24, v24 \n\t" "addi t3, %[BlockCountK], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "li s1, 24 \n\t" "vmv.v.i v1, 3 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v1, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v1, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v1, 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" // scale offset "addi s5, s1, 0 \n\t" // zp offset "addi s6, s1, 32 \n\t" "addi s1, s6, 16 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1_v2 "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vsub.vv v2, v2, v12 \n\t" "vsub.vv v6, v6, v12 \n\t" "vsub.vv v3, v3, v13 \n\t" "vsub.vv v7, v7, v13 \n\t" "vsub.vv v4, v4, v14 \n\t" "vsub.vv v8, v8, v14 \n\t" "vsub.vv v5, v5, v15 \n\t" "vsub.vv v9, v9, v15 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16_FP16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } } } else { for (size_t n = 0; n < CountN; n += 16) { size_t NBLKS = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(_Float16); // scale float * CPtr = C + n; if (NBLKS < 16) { CPtr = tmp; LDC = 16 * sizeof(float); } if (Bias != nullptr) { const float * bias = Bias + n; if (NBLKS < 16) { __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "vse32.v v0, (%[DST]) \n\t" : : [SRC] "r"(bias), [DST] "r"(tmp), [N] "r"(NBLKS) : "cc", "t0"); bias = tmp; } __asm__ volatile(LOAD_BIAS "addi t3, %[BlockCountK], 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" "addi s5, s1, 0 \n\t" "addi s1, s5, 32 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vsetvli t0, zero, e8, m1 \n\t" "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" "vadd.vi v8, v8, -8 \n\t" "vadd.vi v9, v9, -8 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16_FP16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr), [BIAS] "r"(bias) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v24, v24, v24 \n\t" "addi t3, %[BlockCountK], 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" "addi s5, s1, 0 \n\t" "addi s1, s5, 32 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vsetvli t0, zero, e8, m1 \n\t" "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" "vadd.vi v8, v8, -8 \n\t" "vadd.vi v9, v9, -8 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16_FP16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } } } if (CountN % 16 != 0) { // stroe output from tmp to C when NBLKS less than 16. float * CPtr = C + CountN / 16 * 16; const size_t N = CountN % 16; LDC = ldc * sizeof(float); __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi s2, %[SRC], 64 \n\t" "addi s3, %[SRC], 64*2 \n\t" "addi s4, %[SRC], 64*3 \n\t" "vle32.v v2, (s2) \n\t" "vle32.v v4, (s3) \n\t" "vle32.v v6, (s4) \n\t" "add t2, %[DST], %[LDC] \n\t" "add t3, t2, %[LDC] \n\t" "add t4, t3, %[LDC] \n\t" "vse32.v v0, (%[DST]) \n\t" "vse32.v v2, (t2) \n\t" "vse32.v v4, (t3) \n\t" "vse32.v v6, (t4) \n\t" : : [N] "r"(N), [SRC] "r"(tmp), [DST] "r"(CPtr), [LDC] "r"(LDC) : "cc", "t0", "t2", "t3", "t4", "s2", "s3", "s4"); } } template void SQ4BitGemmM4Kernel_CompInt8_Impl(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountN, size_t BlockCountK, const float * Bias, const size_t ldc) { GGML_UNUSED(QuantBScale); GGML_UNUSED(QuantBZeroPoint); size_t LDC = ldc * sizeof(float); const size_t INNER = BlkLen / 16; float tmp[4 * 16]; if constexpr (HasZeroPoint) { for (size_t n = 0; n < CountN; n += 16) { size_t NBLKS = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(uint8_t) + // zp n * BlockCountK * sizeof(float); // scale float * CPtr = C + n; if (NBLKS < 16) { CPtr = tmp; LDC = 16 * sizeof(float); } if (Bias != nullptr) { const float * bias = Bias + n; if (NBLKS < 16) { __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "vse32.v v0, (%[DST]) \n\t" : : [SRC] "r"(bias), [DST] "r"(tmp), [N] "r"(NBLKS) : "cc", "t0"); bias = tmp; } __asm__ volatile(LOAD_BIAS "addi t3, %[BlockCountK], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "li s1, 24 \n\t" "vmv.v.i v1, 3 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v1, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v1, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v1, 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" // scale offset "addi s5, s1, 0 \n\t" // zp offset "addi s6, s1, 64 \n\t" "addi s1, s6, 16 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1_v2 "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vsub.vv v2, v2, v12 \n\t" "vsub.vv v6, v6, v12 \n\t" "vsub.vv v3, v3, v13 \n\t" "vsub.vv v7, v7, v13 \n\t" "vsub.vv v4, v4, v14 \n\t" "vsub.vv v8, v8, v14 \n\t" "vsub.vv v5, v5, v15 \n\t" "vsub.vv v9, v9, v15 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr), [BIAS] "r"(bias) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v24, v24, v24 \n\t" "addi t3, %[BlockCountK], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "li s1, 24 \n\t" "vmv.v.i v1, 3 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v1, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v1, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v1, 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" // scale offset "addi s5, s1, 0 \n\t" // zp offset "addi s6, s1, 64 \n\t" "addi s1, s6, 16 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1_v2 "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vsub.vv v2, v2, v12 \n\t" "vsub.vv v6, v6, v12 \n\t" "vsub.vv v3, v3, v13 \n\t" "vsub.vv v7, v7, v13 \n\t" "vsub.vv v4, v4, v14 \n\t" "vsub.vv v8, v8, v14 \n\t" "vsub.vv v5, v5, v15 \n\t" "vsub.vv v9, v9, v15 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } } } else { for (size_t n = 0; n < CountN; n += 16) { size_t NBLKS = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(float); // scale float * CPtr = C + n; if (NBLKS < 16) { CPtr = tmp; LDC = 16 * sizeof(float); } if (Bias != nullptr) { const float * bias = Bias + n; if (NBLKS < 16) { __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "vse32.v v0, (%[DST]) \n\t" : : [SRC] "r"(bias), [DST] "r"(tmp), [N] "r"(NBLKS) : "cc", "t0"); bias = tmp; } __asm__ volatile(LOAD_BIAS "addi t3, %[BlockCountK], 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" "addi s5, s1, 0 \n\t" "addi s1, s5, 64 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vsetvli t0, zero, e8, m1 \n\t" "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" "vadd.vi v8, v8, -8 \n\t" "vadd.vi v9, v9, -8 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr), [BIAS] "r"(bias) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v24, v24, v24 \n\t" "addi t3, %[BlockCountK], 0 \n\t" "addi a1, %[A], 0 \n\t" "addi s1, %[B], 0 \n\t" "BLOCK_COUNTK_LOOP%=: \n\t" "addi s5, s1, 0 \n\t" "addi s1, s5, 64 \n\t" "addi s2, s1, 32 \n\t" "addi s3, s1, 32*2 \n\t" "addi s4, s1, 32*3 \n\t" "vsetvli t0, zero, e32, m8 \n\t" "vxor.vv v16, v16, v16 \n\t" // load a scale "flw f1, (a1) \n\t" "flw f2, 4(a1) \n\t" "flw f3, 8(a1) \n\t" "flw f4, 12(a1) \n\t" "addi a1, a1, 16 \n\t" "addi t2, %[INNER], 0 \n\t" "BLOCK_INNER_LOOP%=: \n\t" LOAD_B_16x8x2 "vsetvli t0, zero, e8, m1 \n\t" "vle8.v v10, (a1) \n\t" "addi a1, a1, 32 \n\t" "vle8.v v11, (a1) \n\t" "addi a1, a1, 32 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" "vadd.vi v8, v8, -8 \n\t" "vadd.vi v9, v9, -8 \n\t" SQ4BIT_KERNEL_COMP_4x16x16 "addi t2, t2, -1 \n\t" "bnez t2, BLOCK_INNER_LOOP%= \n\t" LOAD_SCALE_4x16 "vsetvli t0, zero, e32, m8 \n\t" "vfcvt.f.x.v v16, v16 \n\t" "vfmacc.vv v24, v16, v8 \n\t" "addi t3, t3, -1 \n\t" "bnez t3, BLOCK_COUNTK_LOOP%= \n\t" "RESULT_SAVE%=: \n\t" SAVE_RESULT_4x16 : : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [LDC] "r"(LDC), [BlockCountK] "r"(BlockCountK), [C] "r"(CPtr) : "cc", "t0", "t1", "t2", "t3", "a1", "a2", "a3", "a4", "f1", "f2", "f3", "f4", "s1", "s2", "s3", "s4", "s5", "s6"); } } } if (CountN % 16 != 0) { // stroe output from tmp to C when NBLKS less than 16. float * CPtr = C + CountN / 16 * 16; const size_t N = CountN % 16; LDC = ldc * sizeof(float); __asm__ volatile( "vsetvli t0, %[N], e32, m2 \n\t" "vle32.v v0, (%[SRC]) \n\t" "addi s2, %[SRC], 64 \n\t" "addi s3, %[SRC], 64*2 \n\t" "addi s4, %[SRC], 64*3 \n\t" "vle32.v v2, (s2) \n\t" "vle32.v v4, (s3) \n\t" "vle32.v v6, (s4) \n\t" "add t2, %[DST], %[LDC] \n\t" "add t3, t2, %[LDC] \n\t" "add t4, t3, %[LDC] \n\t" "vse32.v v0, (%[DST]) \n\t" "vse32.v v2, (t2) \n\t" "vse32.v v4, (t3) \n\t" "vse32.v v6, (t4) \n\t" : : [N] "r"(N), [SRC] "r"(tmp), [DST] "r"(CPtr), [LDC] "r"(LDC) : "cc", "t0", "t2", "t3", "t4", "s2", "s3", "s4"); } } template void SQ4BitGemmM1Kernel_CompInt8_ScaleFp16_Impl(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountN, size_t BlockCountK, const float * Bias) { GGML_UNUSED(QuantBScale); GGML_UNUSED(QuantBZeroPoint); size_t INNER = BlkLen / 16; if constexpr (HasZeroPoint) { for (size_t n = 0; n < CountN; n += 16) { size_t nblks = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(uint8_t) + // zp n * BlockCountK * sizeof(_Float16); // scale float * CPtr = C + n; size_t cnt = BlockCountK; if (Bias != nullptr) { const float * bias = Bias + n; __asm__ volatile( "addi t3, %[NBLKS], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "vmv.v.i v13, 3 \n\t" "li s1, 24 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v13, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v13, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v13, 0 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 8 \n\t" "addi s3, %[B], 16 \n\t" "addi s4, %[B], 24 \n\t" // zp offset "addi s7, %[B], 32 \n\t" // a offset "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v28, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v29, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v30, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v31, (%[BIAS]) \n\t" "LOOP_K%=: \n\t" "vsetvli t0, zero, e16, mf4 \n\t" "vle16.v v4, (s1) \n\t" "addi s1, s1, 48 \n\t" "vle16.v v5, (s2) \n\t" "addi s2, s2, 72 \n\t" "vle16.v v6, (s3) \n\t" "addi s3, s3, 96 \n\t" "vle16.v v7, (s4) \n\t" "addi s4, s4, 120 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "vfwcvt.f.f.v v8, v4 \n\t" "vfwcvt.f.f.v v9, v5 \n\t" "vfwcvt.f.f.v v10, v6 \n\t" "vfwcvt.f.f.v v11, v7 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1 "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vsub.vv v0, v0, v8 \n\t" "vsub.vv v4, v4, v8 \n\t" "vsub.vv v1, v1, v9 \n\t" "vsub.vv v5, v5, v9 \n\t" "vsub.vv v2, v2, v10 \n\t" "vsub.vv v6, v6, v10 \n\t" "vsub.vv v3, v3, v11 \n\t" "vsub.vv v7, v7, v11 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_F16_1X4X4 "addi s7, s1, 32 \n\t" "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks), [BIAS] "+r"(bias) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6", "s7"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v28, v28, v28 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "vmv.v.i v13, 3 \n\t" "li s1, 24 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v13, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v13, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v13, 0 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 8 \n\t" "addi s3, %[B], 16 \n\t" "addi s4, %[B], 24 \n\t" "addi s7, %[B], 32 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "LOOP_K%=: \n\t" "vsetvli t0, zero, e16, mf4 \n\t" "vle16.v v4, (s1) \n\t" "addi s1, s1, 48 \n\t" "vle16.v v5, (s2) \n\t" "addi s2, s2, 72 \n\t" "vle16.v v6, (s3) \n\t" "addi s3, s3, 96 \n\t" "vle16.v v7, (s4) \n\t" "addi s4, s4, 120 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "vfwcvt.f.f.v v8, v4 \n\t" "vfwcvt.f.f.v v9, v5 \n\t" "vfwcvt.f.f.v v10, v6 \n\t" "vfwcvt.f.f.v v11, v7 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1 "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vsub.vv v0, v0, v8 \n\t" "vsub.vv v4, v4, v8 \n\t" "vsub.vv v1, v1, v9 \n\t" "vsub.vv v5, v5, v9 \n\t" "vsub.vv v2, v2, v10 \n\t" "vsub.vv v6, v6, v10 \n\t" "vsub.vv v3, v3, v11 \n\t" "vsub.vv v7, v7, v11 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_F16_1X4X4 "addi s7, s1, 32 \n\t" "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6", "s7"); } } } else { for (size_t n = 0; n < CountN; n += 16) { size_t nblks = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(_Float16); // scale float * CPtr = C + n; size_t cnt = BlockCountK; if (Bias != nullptr) { const float * bias = Bias + n; __asm__ volatile( "addi t3, %[NBLKS], 0 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 8 \n\t" "addi s3, %[B], 16 \n\t" "addi s4, %[B], 24 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v28, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v29, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v30, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v31, (%[BIAS]) \n\t" "LOOP_K%=: \n\t" "vsetvli t0, zero, e16, mf4 \n\t" "vle16.v v4, (s1) \n\t" "addi s1, s1, 32 \n\t" "vle16.v v5, (s2) \n\t" "addi s2, s2, 56 \n\t" "vle16.v v6, (s3) \n\t" "addi s3, s3, 80 \n\t" "vle16.v v7, (s4) \n\t" "addi s4, s4, 104 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "vfwcvt.f.f.v v8, v4 \n\t" "vfwcvt.f.f.v v9, v5 \n\t" "vfwcvt.f.f.v v10, v6 \n\t" "vfwcvt.f.f.v v11, v7 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vadd.vi v0, v0, -8 \n\t" "vadd.vi v1, v1, -8 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_F16_1X4X4 "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks), [BIAS] "+r"(bias) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v28, v28, v28 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 8 \n\t" "addi s3, %[B], 16 \n\t" "addi s4, %[B], 24 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "LOOP_K%=: \n\t" "vsetvli t0, zero, e16, mf4 \n\t" "vle16.v v4, (s1) \n\t" "addi s1, s1, 32 \n\t" "vle16.v v5, (s2) \n\t" "addi s2, s2, 56 \n\t" "vle16.v v6, (s3) \n\t" "addi s3, s3, 80 \n\t" "vle16.v v7, (s4) \n\t" "addi s4, s4, 104 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "vfwcvt.f.f.v v8, v4 \n\t" "vfwcvt.f.f.v v9, v5 \n\t" "vfwcvt.f.f.v v10, v6 \n\t" "vfwcvt.f.f.v v11, v7 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vadd.vi v0, v0, -8 \n\t" "vadd.vi v1, v1, -8 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_F16_1X4X4 "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6"); } } } } template void SQ4BitGemmM1Kernel_CompInt8_Impl(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountN, size_t BlockCountK, const float * Bias) { GGML_UNUSED(QuantBScale); GGML_UNUSED(QuantBZeroPoint); const size_t INNER = BlkLen / 16; if constexpr (HasZeroPoint) { for (size_t n = 0; n < CountN; n += 16) { size_t nblks = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(uint8_t) + // zp n * BlockCountK * sizeof(float); // scale float * CPtr = C + n; size_t cnt = BlockCountK; if (Bias != nullptr) { const float * bias = Bias + n; __asm__ volatile( "addi t3, %[NBLKS], 0 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "vmv.v.i v13, 3 \n\t" "li s1, 24 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v13, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v13, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v13, 0 \n\t" "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v28, v28, v28 \n\t" // scale offset, scale0.0, scale1.0, scale2.0, scale3.0....scale15.0 "addi s1, %[B], 0 \n\t" "addi s2, %[B], 16 \n\t" "addi s3, %[B], 32 \n\t" "addi s4, %[B], 48 \n\t" // zp offset "addi s7, %[B], 64 \n\t" // a offset "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v28, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v29, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v30, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v31, (%[BIAS]) \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "LOOP_K%=: \n\t" // load scale "vle32.v v8, (s1) \n\t" "addi s1, s1, 80 \n\t" "vle32.v v9, (s2) \n\t" "addi s2, s2, 96 \n\t" "vle32.v v10, (s3) \n\t" "addi s3, s3, 112 \n\t" "vle32.v v11, (s4) \n\t" "addi s4, s4, 128 \n\t" // load a scale "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" // a scale * b scale "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1 "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vsub.vv v0, v0, v8 \n\t" "vsub.vv v4, v4, v8 \n\t" "vsub.vv v1, v1, v9 \n\t" "vsub.vv v5, v5, v9 \n\t" "vsub.vv v2, v2, v10 \n\t" "vsub.vv v6, v6, v10 \n\t" "vsub.vv v3, v3, v11 \n\t" "vsub.vv v7, v7, v11 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_1X4X4 "addi s7, s1, 64 \n\t" "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks), [BIAS] "+r"(bias) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6", "s7"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v28, v28, v28 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "vmv.v.i v13, 3 \n\t" "li s1, 24 \n\t" "vsetvli t0, s1, e8, m1 \n\t" "vmv.v.i v13, 2 \n\t" "vsetvli t0, zero, e8, mf2 \n\t" "vmv.v.i v13, 1 \n\t" "vsetvli t0, zero, e8, mf4 \n\t" "vmv.v.i v13, 0 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 16 \n\t" "addi s3, %[B], 32 \n\t" "addi s4, %[B], 48 \n\t" "addi s7, %[B], 64 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "LOOP_K%=: \n\t" "vle32.v v8, (s1) \n\t" "addi s1, s1, 80 \n\t" "vle32.v v9, (s2) \n\t" "addi s2, s2, 96 \n\t" "vle32.v v10, (s3) \n\t" "addi s3, s3, 112 \n\t" "vle32.v v11, (s4) \n\t" "addi s4, s4, 128 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" SQ4BIT_KERNEL_LOAD_ZP_16X1 "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vsub.vv v0, v0, v8 \n\t" "vsub.vv v4, v4, v8 \n\t" "vsub.vv v1, v1, v9 \n\t" "vsub.vv v5, v5, v9 \n\t" "vsub.vv v2, v2, v10 \n\t" "vsub.vv v6, v6, v10 \n\t" "vsub.vv v3, v3, v11 \n\t" "vsub.vv v7, v7, v11 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_1X4X4 "addi s7, s1, 64 \n\t" "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6", "s7"); } } } else { for (size_t n = 0; n < CountN; n += 16) { size_t nblks = (CountN - n) > 16 ? 16 : CountN - n; std::byte * QuantBDataPtr = (std::byte *) QuantBData + // n * BlockCountK * BlkLen / 2 + // b data n * BlockCountK * sizeof(float); // scale float * CPtr = C + n; size_t cnt = BlockCountK; if (Bias != nullptr) { const float * bias = Bias + n; __asm__ volatile( "addi t3, %[NBLKS], 0 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 16 \n\t" "addi s3, %[B], 32 \n\t" "addi s4, %[B], 48 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v28, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v29, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v30, (%[BIAS]) \n\t" "sub t3, t3, t0 \n\t" "addi %[BIAS], %[BIAS], 16 \n\t" "vsetvli t0, t3, e32, mf2 \n\t" "vle32.v v31, (%[BIAS]) \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "LOOP_K%=: \n\t" "vle32.v v8, (s1) \n\t" "addi s1, s1, 64 \n\t" "vle32.v v9, (s2) \n\t" "addi s2, s2, 80 \n\t" "vle32.v v10, (s3) \n\t" "addi s3, s3, 96 \n\t" "vle32.v v11, (s4) \n\t" "addi s4, s4, 112 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vadd.vi v0, v0, -8 \n\t" "vadd.vi v1, v1, -8 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_1X4X4 "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks), [BIAS] "+r"(bias) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6"); } else { __asm__ volatile( "vsetvli t0, zero, e32, m4 \n\t" "vxor.vv v28, v28, v28 \n\t" "addi s1, %[B], 0 \n\t" "addi s2, %[B], 16 \n\t" "addi s3, %[B], 32 \n\t" "addi s4, %[B], 48 \n\t" "addi s5, %[A], 0 \n\t" "addi s6, %[A], 12 \n\t" "vsetvli t0, zero, e32, mf2 \n\t" "LOOP_K%=: \n\t" "vle32.v v8, (s1) \n\t" "addi s1, s1, 64 \n\t" "vle32.v v9, (s2) \n\t" "addi s2, s2, 80 \n\t" "vle32.v v10, (s3) \n\t" "addi s3, s3, 96 \n\t" "vle32.v v11, (s4) \n\t" "addi s4, s4, 112 \n\t" "flw f1, (s5) \n\t" "addi s5, s5, 4 \n\t" "addi t5, %[INNER], 0 \n\t" "vxor.vv v16, v16, v16 \n\t" "vxor.vv v18, v18, v18 \n\t" "vxor.vv v20, v20, v20 \n\t" "vxor.vv v22, v22, v22 \n\t" "vfmul.vf v24, v8, f1 \n\t" "vfmul.vf v25, v9, f1 \n\t" "vfmul.vf v26, v10, f1 \n\t" "vfmul.vf v27, v11, f1 \n\t" "addi %[CNT], %[CNT], -1 \n\t" "vsetvli t0, zero, e8, m1 \n\t" "LOOP_INNER%=: \n\t" SQ4BIT_KERNEL_LOAD_1x8x2_4X8X4 "vadd.vi v0, v0, -8 \n\t" "vadd.vi v1, v1, -8 \n\t" "vadd.vi v2, v2, -8 \n\t" "vadd.vi v3, v3, -8 \n\t" "vadd.vi v4, v4, -8 \n\t" "vadd.vi v5, v5, -8 \n\t" "vadd.vi v6, v6, -8 \n\t" "vadd.vi v7, v7, -8 \n\t" SQ4BIT_KERNEL_COMP_1x8x2_4X8X4 "bnez t5, LOOP_INNER%= \n\t" "vsetvli t0, zero, e32, mf2 \n\t" SQ4BIT_KERNEL_ACC_1X4X4 "bnez %[CNT], LOOP_K%= \n\t" "addi t3, zero, 16 \n\t" "addi s1, %[C], 16 \n\t" "addi s2, %[C], 32 \n\t" "addi s3, %[C], 48 \n\t" "blt %[NBLKS], t3, ST_TAIL%= \n\t" "vse32.v v28, (%[C]) \n\t" "vse32.v v29, (s1) \n\t" "vse32.v v30, (s2) \n\t" "vse32.v v31, (s3) \n\t" "jal x0, END%= \n\t" "ST_TAIL%=: \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v28, (%[C]) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v29, (s1) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v30, (s2) \n\t" "vsetvli t0, %[NBLKS], e32, mf2 \n\t" "sub %[NBLKS], %[NBLKS], t0 \n\t" "vse32.v v31, (s3) \n\t" "END%=: \n\t" : [CNT] "+r"(cnt), [NBLKS] "+r"(nblks) : [INNER] "r"(INNER), [A] "r"(QuantA), [B] "r"(QuantBDataPtr), [C] "r"(CPtr) : "cc", "t0", "t5", "t3", "f1", "s1", "s2", "s3", "s4", "s5", "s6"); } } } } template inline void SQ4BitGemmM4Kernel_CompInt8_DispatchOnBlkLen(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountM, size_t CountN, size_t BlockStrideQuantB, const float * Bias, const size_t ldc, const size_t scalestride) { if (scalestride == 4) { SQ4BitGemmM4Kernel_CompInt8_Impl(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountN, BlockStrideQuantB, Bias, ldc); } else if (scalestride == 2) { SQ4BitGemmM4Kernel_CompInt8_ScaleFp16_Impl( BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountN, BlockStrideQuantB, Bias, ldc); } } template inline void SQ4BitGemmM1Kernel_CompInt8_DispatchOnBlkLen(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountM, size_t CountN, size_t BlockStrideQuantB, const float * Bias, const size_t ldc, const size_t scalestride) { if (scalestride == 4) { SQ4BitGemmM1Kernel_CompInt8_Impl(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountN, BlockStrideQuantB, Bias); } else if (scalestride == 2) { SQ4BitGemmM1Kernel_CompInt8_ScaleFp16_Impl(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountN, BlockStrideQuantB, Bias); } } } // namespace namespace ime1 { size_t gemm_kernel_i8i4(size_t BlkLen, const std::byte * QuantA, const std::byte * QuantBData, const float * QuantBScale, const std::byte * QuantBZeroPoint, float * C, size_t CountM, size_t CountN, size_t CountK, size_t BlockCountK, size_t ldc, const float * Bias, const size_t ScaleStride) { GGML_UNUSED(CountM); GGML_UNUSED(CountK); GGML_UNUSED(ldc); if (CountM >= 4) { if (QuantBZeroPoint != nullptr) { SQ4BitGemmM4Kernel_CompInt8_DispatchOnBlkLen(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountM, CountN, BlockCountK, Bias, ldc, ScaleStride); } else { SQ4BitGemmM4Kernel_CompInt8_DispatchOnBlkLen(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountM, CountN, BlockCountK, Bias, ldc, ScaleStride); } return 4; } else { if (QuantBZeroPoint != nullptr) { SQ4BitGemmM1Kernel_CompInt8_DispatchOnBlkLen(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountM, CountN, BlockCountK, Bias, ldc, ScaleStride); } else { SQ4BitGemmM1Kernel_CompInt8_DispatchOnBlkLen(BlkLen, QuantA, QuantBData, QuantBScale, QuantBZeroPoint, C, CountM, CountN, BlockCountK, Bias, ldc, ScaleStride); } return 1; } } } // namespace ime1 } // namespace sqnbitgemm_spacemit_ime ggml-org-ggml-3678254/src/ggml-cpu/spacemit/ime_kernels.h000066400000000000000000000020201512524704700231330ustar00rootroot00000000000000#pragma once #include namespace sqnbitgemm_spacemit_ime { namespace ime1 { size_t gemm_kernel_i8i4(size_t blk_len, const std::byte * quant_a_ptr, const std::byte * quant_b_data, const float * quant_b_scale, const std::byte * quant_b_zp, float * c_ptr, size_t count_m, size_t count_n, size_t count_k, size_t block_count_k, size_t ldc, const float * bias, const size_t scale_stride); void quantize_a_row_i8(size_t blk_len, const float * a_ptr, size_t count_k, std::byte * quant_a_ptr); void quantize_a_4row_i8(size_t blk_len, const float * a_ptr, size_t count_k, std::byte * quant_a_ptr); } // namespace ime1 } // namespace sqnbitgemm_spacemit_ime ggml-org-ggml-3678254/src/ggml-cpu/traits.cpp000066400000000000000000000023171512524704700207030ustar00rootroot00000000000000#include "traits.h" #include "ggml-backend-impl.h" #include "ggml-backend.h" namespace ggml::cpu { tensor_traits::~tensor_traits() {} extra_buffer_type::~extra_buffer_type() {} } // namespace ggml::cpu bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { for (auto extra : ggml_backend_cpu_get_extra_buffer_types()) { if (extra && extra->context) { auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; auto tensor_traits = buf_extra->get_tensor_traits(op); if (tensor_traits && tensor_traits->compute_forward(params, op)) { return true; } } } return false; } bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size) { for (auto extra : ggml_backend_cpu_get_extra_buffer_types()) { if (extra && extra->context) { auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; auto tensor_traits = buf_extra->get_tensor_traits(op); if (tensor_traits && tensor_traits->work_size(n_threads, op, *size)) { return true; } } } return false; } ggml-org-ggml-3678254/src/ggml-cpu/traits.h000066400000000000000000000022051512524704700203440ustar00rootroot00000000000000#pragma once #include "ggml-backend-impl.h" #include "ggml-cpu-impl.h" #include "ggml.h" #ifdef __cplusplus # include extern "C" { #endif // return true if op part of extra "accelerator" bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op); bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size); #ifdef __cplusplus } namespace ggml::cpu { // register in tensor->extra class tensor_traits { public: virtual ~tensor_traits(); virtual bool work_size(int n_threads, const struct ggml_tensor * op, size_t & size) = 0; virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; }; class extra_buffer_type { public: virtual ~extra_buffer_type(); virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; }; } // namespace ggml::cpu // implemented in ggml-cpu.cpp. std::vector & ggml_backend_cpu_get_extra_buffer_types(); #endif ggml-org-ggml-3678254/src/ggml-cpu/unary-ops.cpp000066400000000000000000000264351512524704700213410ustar00rootroot00000000000000#include "unary-ops.h" static inline float op_abs(float x) { return fabsf(x); } static inline float op_sgn(float x) { return (x > 0.f) ? 1.f : ((x < 0.f) ? -1.f : 0.f); } static inline float op_neg(float x) { return -x; } static inline float op_step(float x) { return (x > 0.f) ? 1.f : 0.f; } static inline float op_tanh(float x) { return tanhf(x); } static inline float op_elu(float x) { return (x > 0.f) ? x : expm1f(x); } static inline float op_relu(float x) { return (x > 0.f) ? x : 0.f; } static inline float op_sigmoid(float x) { return 1.f / (1.f + expf(-x)); } static inline float op_hardsigmoid(float x) { return fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); } static inline float op_exp(float x) { return expf(x); } static inline float op_hardswish(float x) { return x * fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); } static inline float op_sqr(float x) { return x * x; } static inline float op_sqrt(float x) { return sqrtf(x); } static inline float op_xielu(float x, float alpha_n, float alpha_p, float beta, float eps) { if (x > 0.0f) { return alpha_p * x * x + beta * x; } else { const float min_x_eps = fminf(x, eps); return (expm1f(min_x_eps) - x) * alpha_n + beta * x; } } static inline float op_sin(float x) { return sinf(x); } static inline float op_cos(float x) { return cosf(x); } static inline float op_log(float x) { return logf(x); } static inline float op_expm1(float x) { return expf(x) - 1.0f; } static inline float op_softplus(float x) { return (x > 20.0f) ? x : logf(1.0f + expf(x)); } static inline float op_floor(float x) { return floorf(x); } static inline float op_ceil(float x) { return ceilf(x); } static inline float op_round(float x) { return roundf(x); } static inline float op_trunc(float x) { return truncf(x); } template static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) { constexpr auto src0_to_f32 = type_conversion_table::to_f32; constexpr auto f32_to_dst = type_conversion_table::from_f32; for (int i = 0; i < n; i++) { y[i] = f32_to_dst(op(src0_to_f32(x[i]))); } } template static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst)); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT( nb0 == sizeof(dst_t)); GGML_ASSERT(nb00 == sizeof(src0_t)); const auto [ir0, ir1] = get_thread_range(params, src0); for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 ); const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); vec_unary_op(ne0, dst_ptr, src0_ptr); } } // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates template static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) { apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { apply_unary_op(params, dst); } else { fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type)); GGML_ABORT("fatal error"); } } template static void unary_op_params(const ggml_compute_params * params, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) { apply_unary_op(params, dst); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { apply_unary_op(params, dst); } else { fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type)); GGML_ABORT("fatal error"); } } // Extend vec_unary_op to support functors template static inline void vec_unary_op_functor(int64_t n, dst_t * y, const src0_t * x, Op op) { constexpr auto src0_to_f32 = type_conversion_table::to_f32; constexpr auto f32_to_dst = type_conversion_table::from_f32; for (int i = 0; i < n; i++) { y[i] = f32_to_dst(op(src0_to_f32(x[i]))); } } // Extend apply_unary_op to support functors template static void apply_unary_op_functor(const ggml_compute_params * params, ggml_tensor * dst, Op op) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst)); GGML_TENSOR_UNARY_OP_LOCALS GGML_ASSERT( nb0 == sizeof(dst_t)); GGML_ASSERT(nb00 == sizeof(src0_t)); const auto [ir0, ir1] = get_thread_range(params, src0); for (int64_t ir = ir0; ir < ir1; ++ir) { const int64_t i03 = ir/(ne02*ne01); const int64_t i02 = (ir - i03*ne02*ne01)/ne01; const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 ); const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); vec_unary_op_functor(ne0, dst_ptr, src0_ptr, op); } } // Generic dispatcher for functors template static void unary_op_functor(const ggml_compute_params * params, ggml_tensor * dst, Op op) { const ggml_tensor * src0 = dst->src[0]; /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32 apply_unary_op_functor(params, dst, op); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16 apply_unary_op_functor(params, dst, op); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 apply_unary_op_functor(params, dst, op); } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) { apply_unary_op_functor(params, dst, op); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { apply_unary_op_functor(params, dst, op); } else { fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type)); GGML_ABORT("fatal error"); } } void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_sgn(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_neg(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_step(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_tanh(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_elu(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_relu(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_sigmoid(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_hardsigmoid(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_exp(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_hardswish(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_sqr(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_sqrt(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_expm1(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_softplus(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_floor(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_ceil(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_round(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_trunc(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } void ggml_compute_forward_xielu(const ggml_compute_params * params, ggml_tensor * dst) { const float alpha_n = ggml_get_op_params_f32(dst, 1); const float alpha_p = ggml_get_op_params_f32(dst, 2); const float beta = ggml_get_op_params_f32(dst, 3); const float eps = ggml_get_op_params_f32(dst, 4); const auto xielu_op_params = [alpha_n, alpha_p, beta, eps](float f) { return op_xielu(f, alpha_n, alpha_p, beta, eps); }; unary_op_functor(params, dst, xielu_op_params); } ggml-org-ggml-3678254/src/ggml-cpu/unary-ops.h000066400000000000000000000046121512524704700207770ustar00rootroot00000000000000#pragma once #include "common.h" #ifdef __cplusplus extern "C" { #endif void ggml_compute_forward_abs(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sgn(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_neg(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_step(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_tanh(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_elu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_hardsigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_exp(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_hardswish(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sqr(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sqrt(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_expm1(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_softplus(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_floor(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_ceil(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_round(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_trunc(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_xielu(const struct ggml_compute_params * params, struct ggml_tensor * dst); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cpu/vec.cpp000066400000000000000000000577371512524704700201720ustar00rootroot00000000000000#include "vec.h" #include // precomputed gelu table for f16 (128 KB) ggml_fp16_t ggml_table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc) { assert(nrc == 1); GGML_UNUSED(nrc); GGML_UNUSED(bx); GGML_UNUSED(by); GGML_UNUSED(bs); #if defined(GGML_SIMD) float sumf = 0.0f; #if defined(__ARM_FEATURE_SVE) const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers const int np = (n & ~(ggml_f32_step - 1)); svfloat32_t sum1 = svdup_n_f32(0.0f); svfloat32_t sum2 = svdup_n_f32(0.0f); svfloat32_t sum3 = svdup_n_f32(0.0f); svfloat32_t sum4 = svdup_n_f32(0.0f); svfloat32_t sum5 = svdup_n_f32(0.0f); svfloat32_t sum6 = svdup_n_f32(0.0f); svfloat32_t sum7 = svdup_n_f32(0.0f); svfloat32_t sum8 = svdup_n_f32(0.0f); svfloat32_t ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8; svfloat32_t ay1,ay2,ay3,ay4,ay5,ay6,ay7,ay8; for (int i = 0; i < np; i += ggml_f32_step) { ax1 = GGML_F32_VEC_LOAD(x + i); ay1 = GGML_F32_VEC_LOAD(y + i); sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1); ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); sum2 = GGML_F32_VEC_FMA(sum2, ax2, ay2); ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); sum3 = GGML_F32_VEC_FMA(sum3, ax3, ay3); ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); sum4 = GGML_F32_VEC_FMA(sum4, ax4, ay4); ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); sum5 = GGML_F32_VEC_FMA(sum5, ax5, ay5); ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); sum6 = GGML_F32_VEC_FMA(sum6, ax6, ay6); ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); sum7 = GGML_F32_VEC_FMA(sum7, ax7, ay7); ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); sum8 = GGML_F32_VEC_FMA(sum8, ax8, ay8); } // leftovers // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop const int np2 = (n & ~(ggml_f32_epr - 1)); for (int i = np; i < np2; i += ggml_f32_epr) { ax1 = GGML_F32_VEC_LOAD(x + i); ay1 = GGML_F32_VEC_LOAD(y + i); sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1); } // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only if (np2 < n) { svbool_t pg = svwhilelt_b32(np2, n); ax1 = svld1_f32(pg, x + np2); ay1 = svld1_f32(pg, y + np2); sum1 = svmad_f32_m(pg, ax1, ay1, sum1); } // reduce sum1,sum2 to sum1 GGML_F32_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8); #elif defined(__riscv_v_intrinsic) int vl = __riscv_vsetvlmax_e32m8(); vfloat32m1_t vs = __riscv_vfmv_v_f_f32m1(0.0f, 1); vfloat32m8_t vsum; vfloat32m8_t ax; vfloat32m8_t ay; vsum = __riscv_vfmv_v_f_f32m8_tu(vsum, 0.0f, vl); for (int i = 0; i < n; i += vl) { vl = __riscv_vsetvl_e32m8(n - i); ax = __riscv_vle32_v_f32m8_tu(ax, &x[i], vl); ay = __riscv_vle32_v_f32m8_tu(ay, &y[i], vl); vsum = __riscv_vfmacc_vv_f32m8_tu(vsum, ax, ay, vl); } vl = __riscv_vsetvlmax_e32m8(); vs = __riscv_vfredusum_vs_f32m8_f32m1(vsum, vs, vl); sumf += __riscv_vfmv_f_s_f32m1_f32(vs); #else const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; GGML_F32_VEC ax[GGML_F32_ARR]; GGML_F32_VEC ay[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 GGML_F32_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { sumf += x[i]*y[i]; } #endif #else // scalar ggml_float sumf = 0.0; for (int i = 0; i < n; ++i) { sumf += (ggml_float)(x[i]*y[i]); } #endif *s = sumf; } void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc) { assert(nrc == 1); GGML_UNUSED(nrc); GGML_UNUSED(bx); GGML_UNUSED(by); GGML_UNUSED(bs); int i = 0; ggml_float sumf = 0; #if defined(__AVX512BF16__) __m512 c1 = _mm512_setzero_ps(); __m512 c2 = _mm512_setzero_ps(); for (; i + 64 <= n; i += 64) { c1 = _mm512_dpbf16_ps(c1, m512bh(_mm512_loadu_si512((x + i))), m512bh(_mm512_loadu_si512((y + i)))); c2 = _mm512_dpbf16_ps(c2, m512bh(_mm512_loadu_si512((x + i + 32))), m512bh(_mm512_loadu_si512((y + i + 32)))); } sumf += (ggml_float)_mm512_reduce_add_ps(c1); sumf += (ggml_float)_mm512_reduce_add_ps(c2); #elif defined(__AVX512F__) #define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16)) __m512 c1 = _mm512_setzero_ps(); __m512 c2 = _mm512_setzero_ps(); for (; i + 32 <= n; i += 32) { c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1); c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2); } sumf += (ggml_float)_mm512_reduce_add_ps(c1); sumf += (ggml_float)_mm512_reduce_add_ps(c2); #undef LOAD #elif defined(__AVX2__) || defined(__AVX__) #if defined(__AVX2__) #define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16)) #else #define LOAD(p) _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16)), (_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_bsrli_si128(_mm_loadu_si128((const __m128i *)(p)), 8)), 16)), 1)) #endif __m256 c1 = _mm256_setzero_ps(); __m256 c2 = _mm256_setzero_ps(); __m256 c3 = _mm256_setzero_ps(); __m256 c4 = _mm256_setzero_ps(); for (; i + 32 <= n; i += 32) { c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1); c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2); c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3); c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4); } __m128 g; c1 = _mm256_add_ps(_mm256_add_ps(c1, c3), _mm256_add_ps(c2, c4)); g = _mm_add_ps(_mm256_extractf128_ps(c1, 1), _mm256_castps256_ps128(c1)); g = _mm_add_ps(g, _mm_movehl_ps(g, g)); g = _mm_add_ss(g, _mm_movehdup_ps(g)); sumf += (ggml_float)_mm_cvtss_f32(g); #undef LOAD #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfwma) size_t vl = __riscv_vsetvlmax_e32m4(); // initialize accumulators to all zeroes vfloat32m4_t vsum0 = __riscv_vfmv_v_f_f32m4(0.0f, vl); vfloat32m4_t vsum1 = __riscv_vfmv_v_f_f32m4(0.0f, vl); // calculate step size const size_t epr = __riscv_vsetvlmax_e16m2(); const size_t step = epr * 2; const int np = (n & ~(step - 1)); // unroll by 2 for (; i < np; i += step) { vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16 *)&x[i], epr); vbfloat16m2_t ay0 = __riscv_vle16_v_bf16m2((const __bf16 *)&y[i], epr); vsum0 = __riscv_vfwmaccbf16_vv_f32m4(vsum0, ax0, ay0, epr); __asm__ __volatile__ ("" ::: "memory"); vbfloat16m2_t ax1 = __riscv_vle16_v_bf16m2((const __bf16 *)&x[i + epr], epr); vbfloat16m2_t ay1 = __riscv_vle16_v_bf16m2((const __bf16 *)&y[i + epr], epr); vsum1 = __riscv_vfwmaccbf16_vv_f32m4(vsum1, ax1, ay1, epr); __asm__ __volatile__ ("" ::: "memory"); } // accumulate in 1 register vsum0 = __riscv_vfadd_vv_f32m4(vsum0, vsum1, vl); // leftovers for (i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m2(n - i); vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16 *)&x[i], vl); vbfloat16m2_t ay0 = __riscv_vle16_v_bf16m2((const __bf16 *)&y[i], vl); vsum0 = __riscv_vfwmaccbf16_vv_f32m4(vsum0, ax0, ay0, vl); } // reduce vl = __riscv_vsetvlmax_e32m4(); vfloat32m1_t redsum = __riscv_vfredusum_vs_f32m4_f32m1(vsum0, __riscv_vfmv_v_f_f32m1(0.0f, 1), vl); sumf += __riscv_vfmv_f_s_f32m1_f32(redsum); #endif for (; i < n; ++i) { sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) * GGML_BF16_TO_FP32(y[i])); } *s = sumf; } void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc) { assert(nrc == 1); GGML_UNUSED(nrc); GGML_UNUSED(bx); GGML_UNUSED(by); GGML_UNUSED(bs); ggml_float sumf = 0.0; #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) const int sve_register_length = svcntb() * 8; //get vector length const int ggml_f16_epr = sve_register_length / 16; // running when 16 const int ggml_f16_step = 8 * ggml_f16_epr; // choose 8 SVE registers const int np= (n & ~(ggml_f16_step - 1)); svfloat16_t sum1 = svdup_n_f16(0.0f); svfloat16_t sum2 = svdup_n_f16(0.0f); svfloat16_t sum3 = svdup_n_f16(0.0f); svfloat16_t sum4 = svdup_n_f16(0.0f); svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; for (int i = 0; i < np; i += ggml_f16_step) { ax1 = GGML_F16x_VEC_LOAD(x + i + 0 * ggml_f16_epr, 0); ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0); sum1 = GGML_F16x_VEC_FMA(sum1, ax1, ay1); ax2 = GGML_F16x_VEC_LOAD(x + i + 1 * ggml_f16_epr, 1); ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1); sum2 = GGML_F16x_VEC_FMA(sum2, ax2, ay2); ax3 = GGML_F16x_VEC_LOAD(x + i + 2 * ggml_f16_epr, 2); ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2); sum3 = GGML_F16x_VEC_FMA(sum3, ax3, ay3); ax4 = GGML_F16x_VEC_LOAD(x + i + 3 * ggml_f16_epr, 3); ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3); sum4 = GGML_F16x_VEC_FMA(sum4, ax4, ay4); ax5 = GGML_F16x_VEC_LOAD(x + i + 4 * ggml_f16_epr, 4); ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4); sum1 = GGML_F16x_VEC_FMA(sum1, ax5, ay5); ax6 = GGML_F16x_VEC_LOAD(x + i + 5 * ggml_f16_epr, 5); ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5); sum2 = GGML_F16x_VEC_FMA(sum2, ax6, ay6); ax7 = GGML_F16x_VEC_LOAD(x + i + 6 * ggml_f16_epr, 6); ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6); sum3 = GGML_F16x_VEC_FMA(sum3, ax7, ay7); ax8 = GGML_F16x_VEC_LOAD(x + i + 7 * ggml_f16_epr, 7); ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7); sum4 = GGML_F16x_VEC_FMA(sum4, ax8, ay8); } const int np2 = (n & ~(ggml_f16_epr - 1)); // round down to multiple of 8 for (int k = np; k < np2; k += ggml_f16_epr) { svfloat16_t rx = GGML_F16x_VEC_LOAD(x + k, 0); svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0); sum1 = GGML_F16x_VEC_FMA(sum1, rx, ry); } if (np2 < n) { svbool_t pg = svwhilelt_b16(np2, n); svfloat16_t hx = svld1_f16(pg, (const __fp16 *)(x + np2)); svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2)); sum1 = svmad_f16_x(pg, hx, hy, sum1); } GGML_F16x_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4); #elif defined(__riscv_v_intrinsic) #if defined(__riscv_zvfh) int vl = __riscv_vsetvlmax_e32m2(); vfloat32m1_t vs = __riscv_vfmv_v_f_f32m1(0.0f, 1); vfloat32m2_t vsum; vfloat16m1_t ax; vfloat16m1_t ay; vsum = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vmv_v_x_u32m2(0, vl)); for (int i = 0; i < n; i += vl) { vl = __riscv_vsetvl_e16m1(n - i); ax = __riscv_vle16_v_f16m1_tu(ax, (const _Float16 *)&x[i], vl); ay = __riscv_vle16_v_f16m1_tu(ay, (const _Float16 *)&y[i], vl); vsum = __riscv_vfwmacc_vv_f32m2_tu(vsum, ax, ay, vl); } vl = __riscv_vsetvlmax_e32m1(); vfloat32m1_t ac0 = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m2_f32m1(vsum, 0), __riscv_vget_v_f32m2_f32m1(vsum, 1), vl); vs = __riscv_vfredusum_vs_f32m1_f32m1(ac0, vs, vl); sumf += __riscv_vfmv_f_s_f32m1_f32(vs); #else for (int i = 0; i < n; ++i) { sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } #endif // __riscv_zvfh #else const int np = (n & ~(GGML_F16_STEP - 1)); GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; GGML_F16_VEC ax[GGML_F16_ARR]; GGML_F16_VEC ay[GGML_F16_ARR]; for (int i = 0; i < np; i += GGML_F16_STEP) { for (int j = 0; j < GGML_F16_ARR; j++) { ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 GGML_F16_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } // if you hit this, you are likely running outside the FP range assert(!isnan(sumf) && !isinf(sumf)); #endif #else for (int i = 0; i < n; ++i) { sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } #endif // GGML_SIMD *s = sumf; } void ggml_vec_silu_f32(const int n, float * y, const float * x) { int i = 0; #if defined(__AVX512F__) && defined(__AVX512DQ__) for (; i + 15 < n; i += 16) { _mm512_storeu_ps(y + i, ggml_v_silu(_mm512_loadu_ps(x + i))); } #elif defined(__AVX2__) && defined(__FMA__) for (; i + 7 < n; i += 8) { _mm256_storeu_ps(y + i, ggml_v_silu(_mm256_loadu_ps(x + i))); } #elif defined(__SSE2__) for (; i + 3 < n; i += 4) { _mm_storeu_ps(y + i, ggml_v_silu(_mm_loadu_ps(x + i))); } #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) const int vlen = svcntw(); for (; i < n; i += vlen) { const svbool_t pg = svwhilelt_b32_s32(i, n); svst1_f32(pg, y + i, ggml_v_silu(pg, svld1_f32(pg, x + i))); } #elif defined(__ARM_NEON) && defined(__aarch64__) for (; i + 3 < n; i += 4) { vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i))); } #elif defined(__riscv_v_intrinsic) for (int vl; i < n; i += vl) { vl = __riscv_vsetvl_e32m2(n - i); vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); vfloat32m2_t vy = ggml_v_silu_m2(vx, vl); __riscv_vse32_v_f32m2(&y[i], vy, vl); } #endif for (; i < n; ++i) { y[i] = ggml_silu_f32(x[i]); } } void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g) { int i = 0; #if defined(__AVX512F__) && defined(__AVX512DQ__) for (; i + 15 < n; i += 16) { _mm512_storeu_ps(y + i, _mm512_mul_ps(ggml_v_silu(_mm512_loadu_ps(x + i)), _mm512_loadu_ps(g + i))); } #elif defined(__AVX2__) && defined(__FMA__) for (; i + 7 < n; i += 8) { _mm256_storeu_ps(y + i, _mm256_mul_ps(ggml_v_silu(_mm256_loadu_ps(x + i)), _mm256_loadu_ps(g + i))); } #elif defined(__SSE2__) for (; i + 3 < n; i += 4) { _mm_storeu_ps(y + i, _mm_mul_ps(ggml_v_silu(_mm_loadu_ps(x + i)), _mm_loadu_ps(g + i))); } #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) const int vlen = svcntw(); for (; i < n; i += vlen) { const svbool_t pg = svwhilelt_b32_s32(i, n); svst1_f32(pg, y + i, svmul_f32_x(pg, ggml_v_silu(pg, svld1_f32(pg, x + i)), svld1_f32(pg, g + i))); } #elif defined(__ARM_NEON) && defined(__aarch64__) for (; i + 3 < n; i += 4) { vst1q_f32(y + i, vmulq_f32(ggml_v_silu(vld1q_f32(x + i)), vld1q_f32(g + i))); } #elif defined(__riscv_v_intrinsic) for (int vl; i < n; i += vl) { vl = __riscv_vsetvl_e32m2(n - i); vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); vfloat32m2_t vg = __riscv_vle32_v_f32m2(&g[i], vl); vfloat32m2_t vy = __riscv_vfmul_vv_f32m2(ggml_v_silu_m2(vx, vl), vg, vl); __riscv_vse32_v_f32m2(&y[i], vy, vl); } #endif for (; i < n; ++i) { y[i] = ggml_silu_f32(x[i]) * g[i]; } } ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const float mean) { int i = 0; ggml_float sum = 0; // TODO: optimize to process the remaining elements in groups using the smaller vector sizes from AVX2 and SSE // ref: https://github.com/ggml-org/llama.cpp/pull/15953#pullrequestreview-3310928344 #if defined(__AVX512F__) && defined(__AVX512DQ__) for (; i + 15 < n; i += 16) { __m512 val = _mm512_sub_ps(_mm512_loadu_ps(x + i), _mm512_set1_ps(mean)); _mm512_storeu_ps(y + i, val); sum += (ggml_float)_mm512_reduce_add_ps(_mm512_mul_ps(val, val)); } #elif defined(__AVX2__) && defined(__FMA__) for (; i + 7 < n; i += 8) { __m256 val = _mm256_sub_ps(_mm256_loadu_ps(x + i), _mm256_set1_ps(mean)); _mm256_storeu_ps(y + i, val); val = _mm256_mul_ps(val,val); __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), _mm256_castps256_ps128(val)); val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); sum += (ggml_float)_mm_cvtss_f32(val2); } #elif defined(__SSE2__) for (; i + 3 < n; i += 4) { __m128 val = _mm_sub_ps(_mm_loadu_ps(x + i), _mm_set1_ps(mean)); _mm_storeu_ps(y + i, val); val = _mm_mul_ps(val, val); #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) val = _mm_add_ps(val, _mm_movehl_ps(val, val)); val = _mm_add_ss(val, _mm_movehdup_ps(val)); #else __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); val = _mm_add_ps(val, tmp); tmp = _mm_movehl_ps(tmp, val); val = _mm_add_ss(val, tmp); #endif // __AVX__ || __AVX2__ || __AVX512F__ sum += (ggml_float)_mm_cvtss_f32(val); } #elif defined(__ARM_NEON) && defined(__aarch64__) for (; i + 3 < n; i += 4) { float32x4_t val = vsubq_f32(vld1q_f32(x + i), vdupq_n_f32(mean)); vst1q_f32(y + i, val); val = vmulq_f32(val, val); sum += (ggml_float)vaddvq_f32(val); } #elif defined(__VXE__) || defined(__VXE2__) for (; i + 3 < n; i += 4) { float32x4_t val = vec_sub(vec_xl(0, x + i), vec_splats(mean)); vec_xst(val, 0, y + i); val = vec_mul(val, val); sum += (ggml_float)vec_hsum_f32x4(val); } #elif defined(__riscv_v_intrinsic) vfloat64m1_t vsum = __riscv_vfmv_v_f_f64m1(0, 1); for (int vl; i < n; i += vl) { vl = __riscv_vsetvl_e32m2(n - i); vfloat32m2_t val = __riscv_vfsub_vf_f32m2(__riscv_vle32_v_f32m2(&x[i], vl), mean, vl); __riscv_vse32_v_f32m2(&y[i], val, vl); val = __riscv_vfmul_vv_f32m2(val, val, vl); vsum = __riscv_vfwredusum_vs_f32m2_f64m1(val, vsum, vl); } sum = (ggml_float)__riscv_vfmv_f_s_f64m1_f64(vsum); #endif for (; i < n; ++i) { float val = x[i] - mean; y[i] = val; val *= val; sum += (ggml_float)val; } return sum/n; } ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) { int i = 0; ggml_float sum = 0; #if defined(__AVX512F__) && defined(__AVX512DQ__) for (; i + 15 < n; i += 16) { __m512 val = ggml_v_expf(_mm512_sub_ps(_mm512_loadu_ps(x + i), _mm512_set1_ps(max))); _mm512_storeu_ps(y + i, val); sum += (ggml_float)_mm512_reduce_add_ps(val); } #elif defined(__AVX2__) && defined(__FMA__) for (; i + 7 < n; i += 8) { __m256 val = ggml_v_expf(_mm256_sub_ps(_mm256_loadu_ps(x + i), _mm256_set1_ps(max))); _mm256_storeu_ps(y + i, val); __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), _mm256_castps256_ps128(val)); val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); sum += (ggml_float)_mm_cvtss_f32(val2); } #elif defined(__SSE2__) for (; i + 3 < n; i += 4) { __m128 val = ggml_v_expf(_mm_sub_ps(_mm_loadu_ps(x + i), _mm_set1_ps(max))); _mm_storeu_ps(y + i, val); #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) val = _mm_add_ps(val, _mm_movehl_ps(val, val)); val = _mm_add_ss(val, _mm_movehdup_ps(val)); #else __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); val = _mm_add_ps(val, tmp); tmp = _mm_movehl_ps(tmp, val); val = _mm_add_ss(val, tmp); #endif sum += (ggml_float)_mm_cvtss_f32(val); } #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) const int vlen = svcntw(); for (; i < n; i += vlen) { const svbool_t pg = svwhilelt_b32_s32(i, n); svfloat32_t val = ggml_v_expf(pg, svsub_f32_x(pg, svld1_f32(pg, x + i), svdup_n_f32_x(pg, max))); svst1_f32(pg, y + i, val); sum += (ggml_float)svaddv_f32(pg, val); } #elif defined(__ARM_NEON) && defined(__aarch64__) for (; i + 3 < n; i += 4) { float32x4_t val = ggml_v_expf(vsubq_f32(vld1q_f32(x + i), vdupq_n_f32(max))); vst1q_f32(y + i, val); sum += (ggml_float)vaddvq_f32(val); } #elif defined(__riscv_v_intrinsic) vfloat64m1_t vsum = __riscv_vfmv_v_f_f64m1(0, 1); for (int avl; i < n; i += avl) { avl = __riscv_vsetvl_e32m2(n - i); vfloat32m2_t val = ggml_v_expf_m2(__riscv_vfsub_vf_f32m2(__riscv_vle32_v_f32m2(&x[i], avl), max, avl), avl); __riscv_vse32_v_f32m2(&y[i], val, avl); vsum = __riscv_vfwredusum_vs_f32m2_f64m1(val, vsum, avl); } return (ggml_float)__riscv_vfmv_f_s_f64m1_f64(vsum); #endif for (; i < n; ++i) { float val = expf(x[i] - max); sum += (ggml_float)val; y[i] = val; } return sum; } ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max) { // log(soft_max) = log(soft_max_i / soft_max_sum) = log(soft_max_i) - log(soft_max_sum) = (logit_i - max) - log(soft_max_i) int i = 0; ggml_float sum = 0; for (; i < n; ++i) { float val = x[i] - max; y[i] = val; sum += (ggml_float)expf(val); } return sum = (ggml_float)logf(sum); } ggml-org-ggml-3678254/src/ggml-cpu/vec.h000066400000000000000000002017701512524704700176230ustar00rootroot00000000000000// Vectorized functions for fundamental operations #pragma once #include "ggml-impl.h" #include "simd-mappings.h" #include "ggml.h" #include "ggml-cpu.h" #if defined(GGML_USE_ACCELERATE) #include #endif // floating point type used to accumulate sums typedef double ggml_float; #define GGML_GELU_FP16 #define GGML_GELU_QUICK_FP16 #define GGML_SOFT_MAX_UNROLL 4 #define GGML_VEC_DOT_UNROLL 2 #define GGML_VEC_MAD_UNROLL 32 #ifdef __cplusplus extern "C" { #endif // // global data // // precomputed gelu table for f16 (128 KB) extern ggml_fp16_t ggml_table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) extern ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; // // fundamental operations // void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc); void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc); void ggml_vec_silu_f32(const int n, float * y, const float * x); ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const float mean); //it will also center y ( y = y - mean ) ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max); ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max); inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const ggml_fp16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { int i = 0; #if defined(__AVX2__) for (; i + 7 < n; i += 8) { __m256 vx = _mm256_loadu_ps(x + i); __m256 vy = _mm256_loadu_ps(y + i); __m256 vz = _mm256_add_ps(vx, vy); _mm256_storeu_ps(z + i, vz); } #endif for (; i < n; ++i) { z[i] = x[i] + y[i]; } } inline static void ggml_vec_add_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) + GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } inline static void ggml_vec_sub_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) - GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } inline static void ggml_vec_neg_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(-GGML_CPU_FP16_TO_FP32(x[i])); } } inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } inline static void ggml_vec_mul_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) * GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } inline static void ggml_vec_div_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) / GGML_CPU_FP16_TO_FP32(y[i])); } } // compute GGML_VEC_DOT_UNROLL dot products at once // xs - x row stride in bytes inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GGML_RESTRICT s, void * GGML_RESTRICT xv, ggml_fp16_t * GGML_RESTRICT y) { ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 }; ggml_fp16_t * GGML_RESTRICT x[GGML_VEC_DOT_UNROLL]; for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { x[i] = (ggml_fp16_t *) ((char *) xv + i*xs); } #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) const int sve_register_length = svcntb() * 8; const int ggml_f16_epr = sve_register_length / 16; // running when 16 const int ggml_f16_step = 8 * ggml_f16_epr; // choose 8 SVE registers const int np = (n & ~(ggml_f16_step - 1)); svfloat16_t sum_00 = svdup_n_f16(0.0f); svfloat16_t sum_01 = svdup_n_f16(0.0f); svfloat16_t sum_02 = svdup_n_f16(0.0f); svfloat16_t sum_03 = svdup_n_f16(0.0f); svfloat16_t sum_10 = svdup_n_f16(0.0f); svfloat16_t sum_11 = svdup_n_f16(0.0f); svfloat16_t sum_12 = svdup_n_f16(0.0f); svfloat16_t sum_13 = svdup_n_f16(0.0f); svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; for (int i = 0; i < np; i += ggml_f16_step) { ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0); // 8 elements ax1 = GGML_F16x_VEC_LOAD(x[0] + i + 0*ggml_f16_epr, 0); // 8 elements sum_00 = GGML_F16x_VEC_FMA(sum_00, ax1, ay1); // sum_00 = sum_00+ax1*ay1 ax1 = GGML_F16x_VEC_LOAD(x[1] + i + 0*ggml_f16_epr, 0); // 8 elements sum_10 = GGML_F16x_VEC_FMA(sum_10, ax1, ay1); ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1); // next 8 elements ax2 = GGML_F16x_VEC_LOAD(x[0] + i + 1*ggml_f16_epr, 1); // next 8 elements sum_01 = GGML_F16x_VEC_FMA(sum_01, ax2, ay2); ax2 = GGML_F16x_VEC_LOAD(x[1] + i + 1*ggml_f16_epr, 1); sum_11 = GGML_F16x_VEC_FMA(sum_11, ax2, ay2); ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2); ax3 = GGML_F16x_VEC_LOAD(x[0] + i + 2*ggml_f16_epr, 2); sum_02 = GGML_F16x_VEC_FMA(sum_02, ax3, ay3); ax3 = GGML_F16x_VEC_LOAD(x[1] + i + 2*ggml_f16_epr, 2); sum_12 = GGML_F16x_VEC_FMA(sum_12, ax3, ay3); ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3); ax4 = GGML_F16x_VEC_LOAD(x[0] + i + 3*ggml_f16_epr, 3); sum_03 = GGML_F16x_VEC_FMA(sum_03, ax4, ay4); ax4 = GGML_F16x_VEC_LOAD(x[1] + i + 3*ggml_f16_epr, 3); sum_13 = GGML_F16x_VEC_FMA(sum_13, ax4, ay4); ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4); ax5 = GGML_F16x_VEC_LOAD(x[0] + i + 4*ggml_f16_epr, 4); sum_00 = GGML_F16x_VEC_FMA(sum_00, ax5, ay5); ax5 = GGML_F16x_VEC_LOAD(x[1] + i + 4*ggml_f16_epr, 4); sum_10 = GGML_F16x_VEC_FMA(sum_10, ax5, ay5); ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5); ax6 = GGML_F16x_VEC_LOAD(x[0] + i + 5*ggml_f16_epr, 5); sum_01 = GGML_F16x_VEC_FMA(sum_01, ax6, ay6); ax6 = GGML_F16x_VEC_LOAD(x[1] + i + 5*ggml_f16_epr, 5); sum_11 = GGML_F16x_VEC_FMA(sum_11, ax6, ay6); ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6); ax7 = GGML_F16x_VEC_LOAD(x[0] + i + 6*ggml_f16_epr, 6); sum_02 = GGML_F16x_VEC_FMA(sum_02, ax7, ay7); ax7 = GGML_F16x_VEC_LOAD(x[1] + i + 6*ggml_f16_epr, 6); sum_12 = GGML_F16x_VEC_FMA(sum_12, ax7, ay7); ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7); ax8 = GGML_F16x_VEC_LOAD(x[0] + i + 7*ggml_f16_epr, 7); sum_03 = GGML_F16x_VEC_FMA(sum_03, ax8, ay8); ax8 = GGML_F16x_VEC_LOAD(x[1] + i + 7*ggml_f16_epr, 7); sum_13 = GGML_F16x_VEC_FMA(sum_13, ax8, ay8); } const int np2 = (n & ~(ggml_f16_epr - 1)); for (int k = np; k < np2; k += ggml_f16_epr) { svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0); svfloat16_t rx = GGML_F16x_VEC_LOAD(x[0] + k, 0); sum_00 = GGML_F16x_VEC_FMA(sum_00, rx, ry); rx = GGML_F16x_VEC_LOAD(x[1] + k, 0); sum_10 = GGML_F16x_VEC_FMA(sum_10, rx, ry); } if (np2 < n) { svbool_t pg = svwhilelt_b16(np2, n); svfloat16_t hx_0 = svld1_f16(pg, (const __fp16 *)(x[0] + np2)); svfloat16_t hx_1 = svld1_f16(pg, (const __fp16 *)(x[1] + np2)); svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2)); sum_00 = svmad_f16_x(pg, hx_0, hy, sum_00); sum_10 = svmad_f16_x(pg, hx_1, hy, sum_10); } GGML_F16x_VEC_REDUCE(sumf[0], sum_00, sum_01, sum_02, sum_03); GGML_F16x_VEC_REDUCE(sumf[1], sum_10, sum_11, sum_12, sum_13); #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfh) size_t vl = __riscv_vsetvlmax_e32m4(); // initialize accumulators to all zeroes vfloat32m4_t vsum0_0 = __riscv_vfmv_v_f_f32m4(0.0f, vl); vfloat32m4_t vsum0_1 = __riscv_vfmv_v_f_f32m4(0.0f, vl); vfloat32m4_t vsum1_0 = __riscv_vfmv_v_f_f32m4(0.0f, vl); vfloat32m4_t vsum1_1 = __riscv_vfmv_v_f_f32m4(0.0f, vl); // calculate step size const size_t epr = __riscv_vsetvlmax_e16m2(); const size_t step = epr * 2; const int np = (n & ~(step - 1)); // unroll by 2 along the row dimension for (int i = 0; i < np; i += step) { vfloat16m2_t ay0 = __riscv_vle16_v_f16m2((const _Float16 *)(y + i), epr); vfloat16m2_t ax0_0 = __riscv_vle16_v_f16m2((const _Float16 *)(x[0] + i), epr); vfloat16m2_t ax1_0 = __riscv_vle16_v_f16m2((const _Float16 *)(x[1] + i), epr); vsum0_0 = __riscv_vfwmacc_vv_f32m4(vsum0_0, ax0_0, ay0, epr); vsum1_0 = __riscv_vfwmacc_vv_f32m4(vsum1_0, ax1_0, ay0, epr); vfloat16m2_t ay1 = __riscv_vle16_v_f16m2((const _Float16 *)(y + i + epr), epr); vfloat16m2_t ax0_1 = __riscv_vle16_v_f16m2((const _Float16 *)(x[0] + i + epr), epr); vfloat16m2_t ax1_1 = __riscv_vle16_v_f16m2((const _Float16 *)(x[1] + i + epr), epr); vsum0_1 = __riscv_vfwmacc_vv_f32m4(vsum0_1, ax0_1, ay1, epr); vsum1_1 = __riscv_vfwmacc_vv_f32m4(vsum1_1, ax1_1, ay1, epr); } vfloat32m4_t vsum0 = __riscv_vfadd_vv_f32m4(vsum0_0, vsum0_1, vl); vfloat32m4_t vsum1 = __riscv_vfadd_vv_f32m4(vsum1_0, vsum1_1, vl); // leftovers for (int i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m2(n - i); vfloat16m2_t ay = __riscv_vle16_v_f16m2((const _Float16 *)(y + i), vl); vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16 *)(x[0] + i), vl); vfloat16m2_t ax1 = __riscv_vle16_v_f16m2((const _Float16 *)(x[1] + i), vl); vsum0 = __riscv_vfwmacc_vv_f32m4(vsum0, ax0, ay, vl); vsum1 = __riscv_vfwmacc_vv_f32m4(vsum1, ax1, ay, vl); } // reduce vl = __riscv_vsetvlmax_e32m2(); vfloat32m2_t acc0_0 = __riscv_vfadd_vv_f32m2(__riscv_vget_v_f32m4_f32m2(vsum0, 0), __riscv_vget_v_f32m4_f32m2(vsum0, 1), vl); vl = __riscv_vsetvlmax_e32m1(); vfloat32m1_t acc0_1 = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m2_f32m1(acc0_0, 0), __riscv_vget_v_f32m2_f32m1(acc0_0, 1), vl); vfloat32m1_t redsum0 = __riscv_vfredusum_vs_f32m1_f32m1( acc0_1, __riscv_vfmv_v_f_f32m1(0.0f, 1), vl); vl = __riscv_vsetvlmax_e32m2(); vfloat32m2_t acc1_0 = __riscv_vfadd_vv_f32m2(__riscv_vget_v_f32m4_f32m2(vsum1, 0), __riscv_vget_v_f32m4_f32m2(vsum1, 1), vl); vl = __riscv_vsetvlmax_e32m1(); vfloat32m1_t acc1_1 = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m2_f32m1(acc1_0, 0), __riscv_vget_v_f32m2_f32m1(acc1_0, 1), vl); vfloat32m1_t redsum1 = __riscv_vfredusum_vs_f32m1_f32m1( acc1_1, __riscv_vfmv_v_f_f32m1(0.0f, 1), vl); sumf[0] = __riscv_vfmv_f_s_f32m1_f32(redsum0); sumf[1] = __riscv_vfmv_f_s_f32m1_f32(redsum1); #else const int np = (n & ~(GGML_F16_STEP - 1)); GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } }; GGML_F16_VEC ax[GGML_F16_ARR]; GGML_F16_VEC ay[GGML_F16_ARR]; for (int i = 0; i < np; i += GGML_F16_STEP) { for (int j = 0; j < GGML_F16_ARR; j++) { ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j); sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); } } } // reduce sum0..sum3 to sum0 for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { GGML_F16_VEC_REDUCE(sumf[k], sum[k]); } // leftovers for (int i = np; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #endif #else for (int i = 0; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #endif for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { s[i] = (float)sumf[i]; } } inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const float * GGML_RESTRICT x, const float v) { #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); const int np = (n & ~(ggml_f32_step - 1)); svfloat32_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; svfloat32_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; for (int i = 0; i < np; i += ggml_f32_step) { ax1 = GGML_F32_VEC_LOAD(x + i); ay1 = GGML_F32_VEC_LOAD(y + i); ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx); GGML_F32_VEC_STORE(y + i, ay1); ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); ay2 = GGML_F32_VEC_FMA(ay2, ax2, vx); GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); ay3 = GGML_F32_VEC_FMA(ay3, ax3, vx); GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3); ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); ay4 = GGML_F32_VEC_FMA(ay4, ax4, vx); GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4); ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); ay5 = GGML_F32_VEC_FMA(ay5, ax5, vx); GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5); ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); ay6 = GGML_F32_VEC_FMA(ay6, ax6, vx); GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6); ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); ay7 = GGML_F32_VEC_FMA(ay7, ax7, vx); GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7); ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); ay8 = GGML_F32_VEC_FMA(ay8, ax8, vx); GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8); } // leftovers // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop const int np2 = (n & ~(ggml_f32_epr - 1)); for (int i = np; i < np2; i += ggml_f32_epr) { ax1 = GGML_F32_VEC_LOAD(x + i); ay1 = GGML_F32_VEC_LOAD(y + i); ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx); GGML_F32_VEC_STORE(y + i, ay1); } // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only if (np2 < n) { svbool_t pg =svwhilelt_b32(np2, n); ax1 = svld1_f32(pg, x + np2); ay1 = svld1_f32(pg, y + np2); ay1 = svmad_f32_m(pg, ax1, vx, ay1); svst1_f32(pg, y + np2, ay1); } #elif defined(__riscv_v_intrinsic) for (int i = 0, avl; i < n; i += avl) { avl = __riscv_vsetvl_e32m8(n - i); vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[i], avl); vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl); vfloat32m8_t ny = __riscv_vfmadd_vf_f32m8(ax, v, ay, avl); __riscv_vse32_v_f32m8(&y[i], ny, avl); } #else const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); GGML_F32_VEC ax[GGML_F32_ARR]; GGML_F32_VEC ay[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } } // leftovers for (int i = np; i < n; ++i) { y[i] += x[i]*v; } #endif #else // scalar for (int i = 0; i < n; ++i) { y[i] += x[i]*v; } #endif } inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * GGML_RESTRICT y, const ggml_fp16_t * GGML_RESTRICT x, const float v) { #if defined(GGML_SIMD) && defined(__ARM_FEATURE_SVE) const int sve_register_length = svcntb() * 8; const int ggml_f16_epr = sve_register_length / 16; const int ggml_f16_step = 8 * ggml_f16_epr; GGML_F16x_VEC vx = GGML_F16x_VEC_SET1(v); int np = (n & ~(ggml_f16_step - 1)); svfloat16_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; svfloat16_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; for (int i = 0; i < np; i += ggml_f16_step) { ax1 = GGML_F16x_VEC_LOAD(x + i + 0 * ggml_f16_epr, 0); ay1 = GGML_F16x_VEC_LOAD(y + i + 0 * ggml_f16_epr, 0); ay1 = GGML_F16x_VEC_FMA(ay1, ax1, vx); GGML_F16x_VEC_STORE(y + i + 0 * ggml_f16_epr, ay1, 0); ax2 = GGML_F16x_VEC_LOAD(x + i + 1 * ggml_f16_epr, 1); ay2 = GGML_F16x_VEC_LOAD(y + i + 1 * ggml_f16_epr, 1); ay2 = GGML_F16x_VEC_FMA(ay2, ax2, vx); GGML_F16x_VEC_STORE(y + i + 1 * ggml_f16_epr, ay2, 1); ax3 = GGML_F16x_VEC_LOAD(x + i + 2 * ggml_f16_epr, 2); ay3 = GGML_F16x_VEC_LOAD(y + i + 2 * ggml_f16_epr, 2); ay3 = GGML_F16x_VEC_FMA(ay3, ax3, vx); GGML_F16x_VEC_STORE(y + i + 2 * ggml_f16_epr, ay3, 2); ax4 = GGML_F16x_VEC_LOAD(x + i + 3 * ggml_f16_epr, 3); ay4 = GGML_F16x_VEC_LOAD(y + i + 3 * ggml_f16_epr, 3); ay4 = GGML_F16x_VEC_FMA(ay4, ax4, vx); GGML_F16x_VEC_STORE(y + i + 3 * ggml_f16_epr, ay4, 3); ax5 = GGML_F16x_VEC_LOAD(x + i + 4 * ggml_f16_epr, 4); ay5 = GGML_F16x_VEC_LOAD(y + i + 4 * ggml_f16_epr, 4); ay5 = GGML_F16x_VEC_FMA(ay5, ax5, vx); GGML_F16x_VEC_STORE(y + i + 4 * ggml_f16_epr, ay5, 4); ax6 = GGML_F16x_VEC_LOAD(x + i + 5 * ggml_f16_epr, 5); ay6 = GGML_F16x_VEC_LOAD(y + i + 5 * ggml_f16_epr, 5); ay6 = GGML_F16x_VEC_FMA(ay6, ax6, vx); GGML_F16x_VEC_STORE(y + i + 5 * ggml_f16_epr, ay6, 5); ax7 = GGML_F16x_VEC_LOAD(x + i + 6 * ggml_f16_epr, 6); ay7 = GGML_F16x_VEC_LOAD(y + i + 6 * ggml_f16_epr, 6); ay7 = GGML_F16x_VEC_FMA(ay7, ax7, vx); GGML_F16x_VEC_STORE(y + i + 6 * ggml_f16_epr, ay7, 6); ax8 = GGML_F16x_VEC_LOAD(x + i + 7 * ggml_f16_epr, 7); ay8 = GGML_F16x_VEC_LOAD(y + i + 7 * ggml_f16_epr, 7); ay8 = GGML_F16x_VEC_FMA(ay8, ax8, vx); GGML_F16x_VEC_STORE(y + i + 7 * ggml_f16_epr, ay8, 7); } const int np2 = (n & ~(ggml_f16_epr - 1)); for (int k = np; k < np2; k += ggml_f16_epr) { svfloat16_t rx = GGML_F16x_VEC_LOAD(x + k, 0); svfloat16_t ry = GGML_F16x_VEC_LOAD(y + k, 0); ry = GGML_F16x_VEC_FMA(ry, rx, vx); GGML_F16x_VEC_STORE(y + k, ry, 0); } if (np2 < n) { svbool_t pg = svwhilelt_b16(np2, n); svfloat16_t hx = svld1_f16(pg, (const __fp16 *)(x + np2)); svfloat16_t hy = svld1_f16(pg, (const __fp16 *)(y + np2)); hy = svmad_f16_x(pg, hx, vx, hy); svst1_f16(pg, (__fp16 *)(y + np2), hy); } np = n; #elif defined(__riscv_zvfh) // implies __riscv_v_intrinsic const ggml_fp16_t s = GGML_CPU_FP32_TO_FP16(v); const _Float16 scale = *(const _Float16*)(&s); // calculate step size const int epr = __riscv_vsetvlmax_e16m4(); const int step = epr * 2; int np = (n & ~(step - 1)); // unroll by 2 for (int i = 0; i < np; i += step) { vfloat16m4_t ax0 = __riscv_vle16_v_f16m4((const _Float16*)x + i, epr); vfloat16m4_t ay0 = __riscv_vle16_v_f16m4((const _Float16*)y + i, epr); ay0 = __riscv_vfmacc_vf_f16m4(ay0, scale, ax0, epr); __riscv_vse16_v_f16m4((_Float16*)y + i, ay0, epr); __asm__ __volatile__ ("" ::: "memory"); vfloat16m4_t ax1 = __riscv_vle16_v_f16m4((const _Float16*)x + i + epr, epr); vfloat16m4_t ay1 = __riscv_vle16_v_f16m4((const _Float16*)y + i + epr, epr); ay1 = __riscv_vfmacc_vf_f16m4(ay1, scale, ax1, epr); __riscv_vse16_v_f16m4((_Float16*)y + i + epr, ay1, epr); __asm__ __volatile__ ("" ::: "memory"); } // leftovers int vl; for (int i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m4(n - i); vfloat16m4_t ax0 = __riscv_vle16_v_f16m4((const _Float16*)x + i, vl); vfloat16m4_t ay0 = __riscv_vle16_v_f16m4((const _Float16*)y + i, vl); ay0 = __riscv_vfmacc_vf_f16m4(ay0, scale, ax0, vl); __riscv_vse16_v_f16m4((_Float16*)y + i, ay0, vl); } np = n; #elif defined(GGML_SIMD) const int np = (n & ~(GGML_F16_STEP - 1)); GGML_F16_VEC vx = GGML_F16_VEC_SET1(v); GGML_F16_VEC ax[GGML_F16_ARR]; GGML_F16_VEC ay[GGML_F16_ARR]; for (int i = 0; i < np; i += GGML_F16_STEP) { for (int j = 0; j < GGML_F16_ARR; j++) { ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx); GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j); } } #else const int np = 0; #endif // leftovers for (int i = np; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v); } } // xs and vs are byte strides of x and v inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * GGML_RESTRICT y, const float * GGML_RESTRICT xv, const float * GGML_RESTRICT vv) { const float * GGML_RESTRICT x[GGML_VEC_MAD_UNROLL]; const float * GGML_RESTRICT v[GGML_VEC_MAD_UNROLL]; for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) { x[i] = (const float *) ((const char *) xv + i*xs); v[i] = (const float *) ((const char *) vv + i*vs); } #if defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) // scalar Route to scalar implementation //TODO: Write SVE code for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { for (int i = 0; i < n; ++i) { y[i] += x[k][i]*v[k][0]; } } #elif defined(__riscv_v_intrinsic) for (int i = 0, avl; i < n; i += avl) { avl = __riscv_vsetvl_e32m8(n - i); vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl); for (int k = 0; k < GGML_VEC_MAD_UNROLL; k++) { vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[k][i], avl); ay = __riscv_vfmadd_vf_f32m8(ax, v[k][0], ay, avl); } __riscv_vse32_v_f32m8(&y[i], ay, avl); } #else const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { vx[k] = GGML_F32_VEC_SET1(v[k][0]); } GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; GGML_F32_VEC ay[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); } GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } } // leftovers for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { for (int i = np; i < n; ++i) { y[i] += x[k][i]*v[k][0]; } } #endif #else // scalar for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { for (int i = 0; i < n; ++i) { y[i] += x[k][i]*v[k][0]; } } #endif } inline static void ggml_vec_mad1_f32(const int n, float * y, const float * x, const float s, const float b) { #if defined(GGML_USE_ACCELERATE) vDSP_vsmsa(x, 1, &s, &b, y, 1, n); #elif defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) // scalar ; TODO: Write SVE code for (int i = 0; i < n; ++i) { y[i] = x[i]*s + b; } #elif defined(__riscv_v_intrinsic) for (int i = 0, avl; i < n; i += avl) { avl = __riscv_vsetvl_e32m8(n - i); vfloat32m8_t ax = __riscv_vle32_v_f32m8(&x[i], avl); vfloat32m8_t vb = __riscv_vfmv_v_f_f32m8(b, avl); vfloat32m8_t ny = __riscv_vfmadd_vf_f32m8(ax, s, vb, avl); __riscv_vse32_v_f32m8(&y[i], ny, avl); } #else const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC vs = GGML_F32_VEC_SET1(s); GGML_F32_VEC vb = GGML_F32_VEC_SET1(b); GGML_F32_VEC ay[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ay[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_FMA(vb, ay[j], vs); GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } } // leftovers for (int i = np; i < n; ++i) { y[i] = x[i]*s + b; } #endif #else // scalar for (int i = 0; i < n; ++i) { y[i] = x[i]*s + b; } #endif } //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #if defined(GGML_USE_ACCELERATE) vDSP_vsmul(y, 1, &v, y, 1, n); #elif defined(GGML_SIMD) #if defined(__ARM_FEATURE_SVE) const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 const int ggml_f32_step = 2 * ggml_f32_epr; GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); const int np = (n & ~(ggml_f32_step - 1)); svfloat32_t ay1; svfloat32_t ay2; for (int i = 0; i < np; i += ggml_f32_step) { ay1 = GGML_F32_VEC_LOAD(y + i); ay1 = GGML_F32_VEC_MUL(ay1, vx); GGML_F32_VEC_STORE(y + i, ay1); ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); ay2 = GGML_F32_VEC_MUL(ay2, vx); GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); } // leftovers // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only for (int i = np; i < n; i += ggml_f32_epr) { svbool_t pg = svwhilelt_b32(i, n); ay1 = svld1_f32(pg, y + i); ay1 = svmul_f32_m(pg, ay1, vx); svst1_f32(pg, y + i, ay1); } #elif defined(__riscv_v_intrinsic) for (int i = 0, avl; i < n; i += avl) { avl = __riscv_vsetvl_e32m8(n - i); vfloat32m8_t ay = __riscv_vle32_v_f32m8(&y[i], avl); vfloat32m8_t ny = __riscv_vfmul_vf_f32m8(ay, v, avl); __riscv_vse32_v_f32m8(&y[i], ny, avl); } #else const int np = (n & ~(GGML_F32_STEP - 1)); GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); GGML_F32_VEC ay[GGML_F32_ARR]; for (int i = 0; i < np; i += GGML_F32_STEP) { for (int j = 0; j < GGML_F32_ARR; j++) { ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); ay[j] = GGML_F32_VEC_MUL(ay[j], vx); GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } } // leftovers for (int i = np; i < n; ++i) { y[i] *= v; } #endif #else // scalar for (int i = 0; i < n; ++i) { y[i] *= v; } #endif } inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float v) { #if defined(GGML_SIMD) && defined(__ARM_FEATURE_SVE) const int sve_register_length = svcntb() * 8; const int ggml_f16_epr = sve_register_length / 16; const int ggml_f16_step = 2 * ggml_f16_epr; GGML_F16x_VEC vx = GGML_F16x_VEC_SET1(v); const int np = (n & ~(ggml_f16_step - 1)); svfloat16_t ay1, ay2; for (int i = 0; i < np; i += ggml_f16_step) { ay1 = GGML_F16x_VEC_LOAD(y + i + 0*ggml_f16_epr, 0); ay1 = GGML_F16x_VEC_MUL(ay1, vx); GGML_F16x_VEC_STORE(y + i + 0*ggml_f16_epr, ay1, 0); ay2 = GGML_F16x_VEC_LOAD(y + i + 1*ggml_f16_epr, 1); ay2 = GGML_F16x_VEC_MUL(ay2, vx); GGML_F16x_VEC_STORE(y + i + 1*ggml_f16_epr, ay2, 1); } // leftovers // maximum number of leftover elements will be less that ggmlF_16x_epr. Apply predicated svmad on available elements only if (np < n) { svbool_t pg = svwhilelt_b16(np, n); svfloat16_t hy = svld1_f16(pg, (__fp16 *)(y + np)); svfloat16_t out = svmul_f16_m(pg, hy, vx); svst1_f16(pg, (__fp16 *)(y + np), out); } #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfh) const ggml_fp16_t s = GGML_CPU_FP32_TO_FP16(v); const _Float16 scale = *(const _Float16*)(&s); // calculate step size const int epr = __riscv_vsetvlmax_e16m4(); const int step = epr * 2; const int np = (n & ~(step - 1)); // unroll by 2 for (int i = 0; i < np; i += step) { vfloat16m4_t ay0 = __riscv_vle16_v_f16m4((const _Float16*)y + i, epr); ay0 = __riscv_vfmul_vf_f16m4(ay0, scale, epr); __riscv_vse16_v_f16m4((_Float16*)y + i, ay0, epr); __asm__ __volatile__ ("" ::: "memory"); vfloat16m4_t ay1 = __riscv_vle16_v_f16m4((const _Float16*)y + i + epr, epr); ay1 = __riscv_vfmul_vf_f16m4(ay1, scale, epr); __riscv_vse16_v_f16m4((_Float16*)y + i + epr, ay1, epr); __asm__ __volatile__ ("" ::: "memory"); } // leftovers int vl; for (int i = np; i < n; i += vl) { vl = __riscv_vsetvl_e16m4(n - i); vfloat16m4_t ay0 = __riscv_vle16_v_f16m4((const _Float16*)y + i, vl); ay0 = __riscv_vfmul_vf_f16m4(ay0, scale, vl); __riscv_vse16_v_f16m4((_Float16*)y + i, ay0, vl); } #elif defined(GGML_SIMD) const int np = (n & ~(GGML_F16_STEP - 1)); GGML_F16_VEC vx = GGML_F16_VEC_SET1(v); GGML_F16_VEC ay[GGML_F16_ARR]; for (int i = 0; i < np; i += GGML_F16_STEP) { for (int j = 0; j < GGML_F16_ARR; j++) { ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); ay[j] = GGML_F16_VEC_MUL(ay[j], vx); GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j); } } // leftovers for (int i = np; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #else // scalar for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #endif } inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); } inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } inline static void ggml_vec_sqr_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16(v*v); } } inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } inline static void ggml_vec_sqrt_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(sqrtf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); } inline static void ggml_vec_log_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(logf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sin_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sinf(x[i]); } inline static void ggml_vec_sin_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(sinf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_cos_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = cosf(x[i]); } inline static void ggml_vec_cos_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(cosf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } inline static void ggml_vec_abs_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(fabsf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } inline static void ggml_vec_sgn_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f)); } } inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } inline static void ggml_vec_step_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16((GGML_CPU_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f); } } inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } inline static void ggml_vec_tanh_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(tanhf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expm1f(x[i]); } inline static void ggml_vec_elu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { const float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v : expm1f(v)); } } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } inline static void ggml_vec_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v : 0.f); } } inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); } inline static void ggml_vec_leaky_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const float ns) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f)); } } inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); } inline static void ggml_vec_sigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(1.f / (1.f + expf(-GGML_CPU_FP16_TO_FP32(x[i])))); } } // TODO: optimize performance inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardswish_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f))); } } inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardsigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_CPU_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f))); } } inline static void ggml_vec_exp_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = expf(x[i]); } inline static void ggml_vec_exp_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = GGML_CPU_FP32_TO_FP16(expf(GGML_CPU_FP16_TO_FP32(x[i]))); } } static const float GELU_COEF_A = 0.044715f; static const float GELU_QUICK_COEF = -1.702f; static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; static const float SQRT_2_INV = 0.70710678118654752440084436210484f; inline static float ggml_gelu_f32(float x) { return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { y[i] = ggml_table_gelu_f16[i16[i]]; } } inline static void ggml_vec_gelu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float xi = GGML_CPU_FP16_TO_FP32(x[i]); float res = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV)); y[i] = GGML_CPU_FP32_TO_FP16(res); } } #ifdef GGML_GELU_FP16 inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { if (x[i] <= -10.0f) { y[i] = 0.0f; } else if (x[i] >= 10.0f) { y[i] = x[i]; } else { ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]); } } } #else inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { y[i] = ggml_gelu_f32(x[i]); } } #endif inline static void ggml_vec_gelu_erf_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { float xi = x[i]; y[i] = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV)); } } inline static float ggml_gelu_quick_f32(float x) { return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x))); } //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { // const uint16_t * i16 = (const uint16_t *) x; // for (int i = 0; i < n; ++i) { // y[i] = ggml_table_gelu_quick_f16[i16[i]]; // } //} #ifdef GGML_GELU_QUICK_FP16 inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); } } #else inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { y[i] = ggml_gelu_quick_f32(x[i]); } } #endif inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v)))); } } // Sigmoid Linear Unit (SiLU) function inline static float ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { float v = GGML_CPU_FP16_TO_FP32(x); return GGML_CPU_FP32_TO_FP16(v/(1.0f + expf(-v))); } #if __FINITE_MATH_ONLY__ #error "some routines in ggml.c require non-finite math arithmetics -- pass -fno-finite-math-only to the compiler to fix" #error "ref: https://github.com/ggml-org/llama.cpp/pull/7154#issuecomment-2143844461" #endif /* Below function was borrowed from the GitHub repository: https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp */ #if defined(__ARM_FEATURE_SVE) && defined(__aarch64__) inline static svfloat32_t exp_ps_sve(svbool_t pg, svfloat32_t src) { // Constants const svfloat32_t log2_e = svdup_n_f32(1.4426950409f); const svfloat32_t ln2 = svdup_n_f32(0.6931473921f); const svfloat32_t half_ln2_sq = svdup_n_f32(0.2413862043f); const svuint32_t not_mask17 = svdup_n_u32(~((1u << 17) - 1)); const svfloat32_t one = svdup_n_f32(1.0f); const svfloat32_t inactive1 = svdup_n_f32(0.0f); const svint32_t inactive2 = svdup_n_s32(0); // Algorithm starts here svfloat32_t t0 = svmul_f32_m(pg, src, log2_e); // y = x * log2(e) svfloat32_t t1 = svrintm_f32_m(inactive1, pg, t0); // rount to int (float) svint32_t t2 = svcvt_s32_f32_m(inactive2, pg, t1); // n t1 = svsub_f32_m(pg, t0, t1); // a = y - floor(y) t1 = svadd_f32_m(pg, t1, one); // b = a + 1 svuint32_t t3 = svlsr_n_u32_m(pg, svreinterpret_u32_f32(t1), 17); // v = b >> 17 (u32) svfloat32_t t4 = svexpa_f32(t3); // c = fexpa(v) t4 = svscale_f32_m(pg, t4, t2); // fexpa(v) * 2^(n) // and_(t2.d, t1.d, not_mask17.d) svfloat32_t t5 = svreinterpret_f32_u32(svand_u32_m(pg, svreinterpret_u32_f32(t1), not_mask17)); t5 = svsub_f32_m(pg, t1, t5); // z t0 = svmla_f32_m(pg, ln2, t5, half_ln2_sq); // ln2 + half_ln2_sq * z t0 = svmla_f32_m(pg, one, t5, t0); // 1 + (ln2 * z) + (half_ln2_sq * z * z) t0 = svmul_f32_m(pg, t0, t4); // Final result return t0; } #endif #if defined(__ARM_FEATURE_SVE) && defined(__aarch64__) inline static svfloat32_t ggml_v_expf(svbool_t pg, svfloat32_t x) { const svfloat32_t r = svdup_n_f32_x(pg, 0x1.8p23f); const svfloat32_t z = svmla_n_f32_x(pg, r, x, 0x1.715476p+0f); const svfloat32_t n = svsub_f32_x(pg, z, r); const svfloat32_t b = svmls_n_f32_x(pg, svmls_n_f32_x(pg, x, n, 0x1.62e4p-1f), n, 0x1.7f7d1cp-20f); const svuint32_t e = svlsl_n_u32_x(pg, svreinterpret_u32_f32(z), 23); const svfloat32_t k = svreinterpret_f32_u32(svadd_u32_x(pg, e, svreinterpret_u32_f32(svdup_n_f32_x(pg, 1)))); const svbool_t c = svacgt_n_f32(pg, n, 126); const svfloat32_t u = svmul_f32_x(pg, b, b); const svfloat32_t j = svmla_f32_x(pg, svmul_n_f32_x(pg, b, 0x1.ffffecp-1f), svmla_f32_x(pg, svmla_f32_x(pg, svdup_n_f32_x(pg, 0x1.fffdb6p-2f), svdup_n_f32_x(pg, 0x1.555e66p-3f), b), svmla_f32_x(pg, svdup_n_f32_x(pg, 0x1.573e2ep-5f), svdup_n_f32_x(pg, 0x1.0e4020p-7f), b), u), u); const svuint32_t d = svdup_n_u32_z(svcmple_n_f32(pg, n, 0.0), 0x82000000); const svfloat32_t s1 = svreinterpret_f32_u32(svadd_n_u32_x(pg, d, 0x7f000000)); const svfloat32_t s2 = svreinterpret_f32_u32(svsub_u32_x(pg, e, d)); return svsel_f32(svacgt_f32(pg, n, svdup_n_f32_x(pg, 192)), svmul_f32_x(pg, s1, s1), svsel_f32(c, svmul_f32_x(pg, svmla_f32_x(pg, s2, s2, j), s1), svmla_f32_x(pg, k, k, j))); } // computes silu x/(1+exp(-x)) in single precision vector inline static svfloat32_t ggml_v_silu(svbool_t pg, svfloat32_t x) { const svfloat32_t one = svdup_n_f32_x(pg, 1.0f); const svfloat32_t zero = svdup_n_f32_x(pg, 0.0f); const svfloat32_t neg_x = svsub_f32_x(pg, zero, x); const svfloat32_t exp_neg_x = ggml_v_expf(pg, neg_x); const svfloat32_t one_plus_exp_neg_x = svadd_f32_x(pg, one, exp_neg_x); return svdiv_f32_x(pg, x, one_plus_exp_neg_x); } #elif defined(__ARM_NEON) && defined(__aarch64__) // adapted from arm limited optimized routine // the maximum error is 1.45358 plus 0.5 ulps // numbers above 88.38 will flush to infinity // numbers beneath -103.97 will flush to zero inline static float32x4_t ggml_v_expf(float32x4_t x) { const float32x4_t r = vdupq_n_f32(0x1.8p23f); const float32x4_t z = vfmaq_f32(r, x, vdupq_n_f32(0x1.715476p+0f)); const float32x4_t n = vsubq_f32(z, r); const float32x4_t b = vfmsq_f32(vfmsq_f32(x, n, vdupq_n_f32(0x1.62e4p-1f)), n, vdupq_n_f32(0x1.7f7d1cp-20f)); const uint32x4_t e = vshlq_n_u32(vreinterpretq_u32_f32(z), 23); const float32x4_t k = vreinterpretq_f32_u32(vaddq_u32(e, vreinterpretq_u32_f32(vdupq_n_f32(1)))); const uint32x4_t c = vcagtq_f32(n, vdupq_n_f32(126)); const float32x4_t u = vmulq_f32(b, b); const float32x4_t j = vfmaq_f32( vmulq_f32(vdupq_n_f32(0x1.ffffecp-1f), b), vfmaq_f32(vfmaq_f32(vdupq_n_f32(0x1.fffdb6p-2f), vdupq_n_f32(0x1.555e66p-3f), b), vfmaq_f32(vdupq_n_f32(0x1.573e2ep-5f), vdupq_n_f32(0x1.0e4020p-7f), b), u), u); if (!vpaddd_u64(vreinterpretq_u64_u32(c))) return vfmaq_f32(k, j, k); const uint32x4_t d = vandq_u32(vclezq_f32(n), vdupq_n_u32(0x82000000)); const float32x4_t s1 = vreinterpretq_f32_u32(vaddq_u32(d, vdupq_n_u32(0x7f000000))); const float32x4_t s2 = vreinterpretq_f32_u32(vsubq_u32(e, d)); return vbslq_f32(vcagtq_f32(n, vdupq_n_f32(192)), vmulq_f32(s1, s1), vbslq_f32(c, vmulq_f32(vfmaq_f32(s2, s2, j), s1), vfmaq_f32(k, k, j))); } // computes silu x/(1+exp(-x)) in single precision vector inline static float32x4_t ggml_v_silu(float32x4_t x) { const float32x4_t one = vdupq_n_f32(1.0f); const float32x4_t zero = vdupq_n_f32(0.0f); const float32x4_t neg_x = vsubq_f32(zero, x); const float32x4_t exp_neg_x = ggml_v_expf(neg_x); const float32x4_t one_plus_exp_neg_x = vaddq_f32(one, exp_neg_x); return vdivq_f32(x, one_plus_exp_neg_x); } #elif defined(__AVX512F__) && defined(__AVX512DQ__) // adapted from arm limited optimized routine // the maximum error is 1.45358 plus 0.5 ulps // numbers above 88.38 will flush to infinity // numbers beneath -103.97 will flush to zero inline static __m512 ggml_v_expf(__m512 x) { const __m512 r = _mm512_set1_ps(0x1.8p23f); const __m512 z = _mm512_fmadd_ps(x, _mm512_set1_ps(0x1.715476p+0f), r); const __m512 n = _mm512_sub_ps(z, r); const __m512 b = _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f), _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x)); const __mmask16 d = _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(192), _CMP_GT_OQ); const __m512 u = _mm512_mul_ps(b, b); const __m512 j = _mm512_fmadd_ps( _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b, _mm512_set1_ps(0x1.573e2ep-5f)), u, _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b, _mm512_set1_ps(0x1.fffdb6p-2f))), u, _mm512_fmadd_ps(_mm512_set1_ps(0x1.ffffecp-1f), b, _mm512_set1_ps(1.0F))); const __m512 res = _mm512_scalef_ps(j, n); if (_mm512_kortestz(d, d)) return res; const __m512 zero = _mm512_setzero_ps(); const __m512 alt = _mm512_mask_blend_ps( _mm512_cmp_ps_mask(n, zero, _CMP_LE_OQ), _mm512_set1_ps(INFINITY), zero); return _mm512_mask_blend_ps(d, res, alt); } // computes silu x/(1+exp(-x)) in single precision vector inline static __m512 ggml_v_silu(__m512 x) { const __m512 one = _mm512_set1_ps(1); const __m512 zero = _mm512_setzero_ps(); const __m512 neg_x = _mm512_sub_ps(zero, x); const __m512 exp_neg_x = ggml_v_expf(neg_x); const __m512 one_plus_exp_neg_x = _mm512_add_ps(one, exp_neg_x); return _mm512_div_ps(x, one_plus_exp_neg_x); } #elif defined(__AVX2__) && defined(__FMA__) // adapted from arm limited optimized routine // the maximum error is 1.45358 plus 0.5 ulps // numbers above 88.38 will flush to infinity // numbers beneath -103.97 will flush to zero inline static __m256 ggml_v_expf(__m256 x) { const __m256 r = _mm256_set1_ps(0x1.8p23f); const __m256 z = _mm256_fmadd_ps(x, _mm256_set1_ps(0x1.715476p+0f), r); const __m256 n = _mm256_sub_ps(z, r); const __m256 b = _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.7f7d1cp-20f), _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.62e4p-1f), x)); const __m256i e = _mm256_slli_epi32(_mm256_castps_si256(z), 23); const __m256 k = _mm256_castsi256_ps( _mm256_add_epi32(e, _mm256_castps_si256(_mm256_set1_ps(1)))); const __m256i c = _mm256_castps_si256( _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), _mm256_set1_ps(126), _CMP_GT_OQ)); const __m256 u = _mm256_mul_ps(b, b); const __m256 j = _mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_set1_ps(0x1.0e4020p-7f), b, _mm256_set1_ps(0x1.573e2ep-5f)), u, _mm256_fmadd_ps(_mm256_set1_ps(0x1.555e66p-3f), b, _mm256_set1_ps(0x1.fffdb6p-2f))), u, _mm256_mul_ps(_mm256_set1_ps(0x1.ffffecp-1f), b)); if (!_mm256_movemask_ps(_mm256_castsi256_ps(c))) return _mm256_fmadd_ps(j, k, k); const __m256i g = _mm256_and_si256( _mm256_castps_si256(_mm256_cmp_ps(n, _mm256_setzero_ps(), _CMP_LE_OQ)), _mm256_set1_epi32(0x82000000u)); const __m256 s1 = _mm256_castsi256_ps(_mm256_add_epi32(g, _mm256_set1_epi32(0x7f000000u))); const __m256 s2 = _mm256_castsi256_ps(_mm256_sub_epi32(e, g)); const __m256i d = _mm256_castps_si256( _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), _mm256_set1_ps(192), _CMP_GT_OQ)); return _mm256_or_ps( _mm256_and_ps(_mm256_castsi256_ps(d), _mm256_mul_ps(s1, s1)), _mm256_andnot_ps( _mm256_castsi256_ps(d), _mm256_or_ps( _mm256_and_ps(_mm256_castsi256_ps(c), _mm256_mul_ps(_mm256_fmadd_ps(s2, j, s2), s1)), _mm256_andnot_ps(_mm256_castsi256_ps(c), _mm256_fmadd_ps(k, j, k))))); } // computes silu x/(1+exp(-x)) in single precision vector inline static __m256 ggml_v_silu(__m256 x) { const __m256 one = _mm256_set1_ps(1); const __m256 zero = _mm256_setzero_ps(); const __m256 neg_x = _mm256_sub_ps(zero, x); const __m256 exp_neg_x = ggml_v_expf(neg_x); const __m256 one_plus_exp_neg_x = _mm256_add_ps(one, exp_neg_x); return _mm256_div_ps(x, one_plus_exp_neg_x); } #elif defined(__SSE2__) // __AVX2__ / __ARM_NEON #if defined(__FMA__) #define MADD128(x, y, z) _mm_fmadd_ps(x, y, z) #define NMADD128(x, y, z) _mm_fnmadd_ps(x, y, z) #else #define MADD128(x, y, z) _mm_add_ps(_mm_mul_ps(x, y), z) #define NMADD128(x, y, z) _mm_sub_ps(z, _mm_mul_ps(x, y)) #endif // adapted from arm limited optimized routine // the maximum error is 1.45358 plus 0.5 ulps // numbers above 88.38 will flush to infinity // numbers beneath -103.97 will flush to zero inline static __m128 ggml_v_expf(__m128 x) { const __m128 r = _mm_set1_ps(0x1.8p23f); const __m128 z = MADD128(x, _mm_set1_ps(0x1.715476p+0f), r); const __m128 n = _mm_sub_ps(z, r); const __m128 b = NMADD128(n, _mm_set1_ps(0x1.7f7d1cp-20f), NMADD128(n, _mm_set1_ps(0x1.62e4p-1f), x)); const __m128i e = _mm_slli_epi32(_mm_castps_si128(z), 23); const __m128 k = _mm_castsi128_ps(_mm_add_epi32(e, _mm_castps_si128(_mm_set1_ps(1)))); const __m128i c = _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(126))); const __m128 u = _mm_mul_ps(b, b); const __m128 j = MADD128(MADD128(MADD128(_mm_set1_ps(0x1.0e4020p-7f), b, _mm_set1_ps(0x1.573e2ep-5f)), u, MADD128(_mm_set1_ps(0x1.555e66p-3f), b, _mm_set1_ps(0x1.fffdb6p-2f))), u, _mm_mul_ps(_mm_set1_ps(0x1.ffffecp-1f), b)); if (!_mm_movemask_epi8(c)) return MADD128(j, k, k); const __m128i g = _mm_and_si128(_mm_castps_si128(_mm_cmple_ps(n, _mm_setzero_ps())), _mm_set1_epi32(0x82000000u)); const __m128 s1 = _mm_castsi128_ps(_mm_add_epi32(g, _mm_set1_epi32(0x7f000000u))); const __m128 s2 = _mm_castsi128_ps(_mm_sub_epi32(e, g)); const __m128i d = _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(192))); return _mm_or_ps( _mm_and_ps(_mm_castsi128_ps(d), _mm_mul_ps(s1, s1)), _mm_andnot_ps(_mm_castsi128_ps(d), _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), _mm_mul_ps(MADD128(s2, j, s2), s1)), _mm_andnot_ps(_mm_castsi128_ps(c), MADD128(k, j, k))))); } // computes silu x/(1+exp(-x)) in single precision vector inline static __m128 ggml_v_silu(__m128 x) { const __m128 one = _mm_set1_ps(1); const __m128 zero = _mm_setzero_ps(); const __m128 neg_x = _mm_sub_ps(zero, x); const __m128 exp_neg_x = ggml_v_expf(neg_x); const __m128 one_plus_exp_neg_x = _mm_add_ps(one, exp_neg_x); return _mm_div_ps(x, one_plus_exp_neg_x); } #elif defined(__riscv_v_intrinsic) // adapted from arm limited optimized routine // the maximum error is 1.45358 plus 0.5 ulps // numbers above 88.38 will flush to infinity // numbers beneath -103.97 will flush to zero inline static vfloat32m2_t ggml_v_expf_m2(vfloat32m2_t x, int vl) { const vfloat32m2_t r = __riscv_vfmv_v_f_f32m2(0x1.8p23f, vl); #ifdef __riscv_xtheadvector // workaround for compiler bug (gcc 14.3.0: Error: unrecognized opcode `th.vmv1r.v v2,v4') vfloat32m2_t z = __riscv_vfadd_vf_f32m2(r, 0.0f, vl); z = __riscv_vfmacc_vf_f32m2(z, 0x1.715476p+0f, x, vl); #else const vfloat32m2_t z = __riscv_vfmacc_vf_f32m2(r, 0x1.715476p+0f, x, vl); #endif const vfloat32m2_t n = __riscv_vfsub_vv_f32m2(z, r, vl); const vfloat32m2_t b = __riscv_vfnmsac_vf_f32m2(__riscv_vfnmsac_vf_f32m2(x, 0x1.62e4p-1f, n, vl), 0x1.7f7d1cp-20f, n, vl); const vuint32m2_t e = __riscv_vsll_vx_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(z), 23, vl); const vfloat32m2_t k = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vadd_vx_u32m2(e, 0x3f800000, vl)); // 1.0f const vbool16_t c = __riscv_vmfgt_vf_f32m2_b16(__riscv_vfabs_v_f32m2(n, vl), 126.0f, vl); const vfloat32m2_t u = __riscv_vfmul_vv_f32m2(b, b, vl); const vfloat32m2_t j = __riscv_vfmacc_vv_f32m2( __riscv_vfmul_vf_f32m2(b, 0x1.ffffecp-1f, vl), __riscv_vfmacc_vv_f32m2( __riscv_vfmacc_vf_f32m2(__riscv_vfmv_v_f_f32m2(0x1.fffdb6p-2f, vl), 0x1.555e66p-3f, b, vl), __riscv_vfmacc_vf_f32m2(__riscv_vfmv_v_f_f32m2(0x1.573e2ep-5f, vl), 0x1.0e4020p-7f, b, vl), u, vl), u, vl); if (!__riscv_vcpop_m_b16(c, vl)) return __riscv_vfmacc_vv_f32m2(k, j, k, vl); const vbool16_t dm = __riscv_vmfle_vf_f32m2_b16(n, 0.0f, vl); const vuint32m2_t d = __riscv_vmerge_vxm_u32m2(__riscv_vmv_v_x_u32m2(0, vl), 0x82000000, dm, vl); const vfloat32m2_t s1 = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vadd_vx_u32m2(d, 0x7f000000, vl)); const vfloat32m2_t s2 = __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vsub_vv_u32m2(e, d, vl)); const vfloat32m2_t r1 = __riscv_vmerge_vvm_f32m2( __riscv_vfmacc_vv_f32m2(k, k, j, vl), __riscv_vfmul_vv_f32m2(__riscv_vfmacc_vv_f32m2(s2, s2, j, vl), s1, vl), c, vl); return __riscv_vmerge_vvm_f32m2( r1, __riscv_vfmul_vv_f32m2(s1, s1, vl), __riscv_vmfgt_vf_f32m2_b16(__riscv_vfabs_v_f32m2(n, vl), 192.0f, vl), vl); } // computes silu x/(1+exp(-x)) in single precision vector inline static vfloat32m2_t ggml_v_silu_m2(vfloat32m2_t x, int vl) { const vfloat32m2_t neg_x = __riscv_vfneg_v_f32m2(x, vl); const vfloat32m2_t exp_neg_x = ggml_v_expf_m2(neg_x, vl); const vfloat32m2_t one_plus_exp_neg_x = __riscv_vfadd_vf_f32m2(exp_neg_x, 1.0f, vl); return __riscv_vfdiv_vv_f32m2(x, one_plus_exp_neg_x, vl); } #endif // __ARM_NEON / __AVX2__ / __SSE2__ / __riscv_v_intrinsic inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { y[i] = ggml_silu_f16(x[i]); } } inline static float ggml_silu_backward_f32(float x, float dy) { const float s = 1.0f/(1.0f + expf(-x)); return dy*s*(1.0f + x*(1.0f - s)); } inline static ggml_fp16_t ggml_silu_backward_f16(ggml_fp16_t x, ggml_fp16_t dy) { const float v = GGML_CPU_FP16_TO_FP32(x); const float s = 1.0f/(1.0f + expf(-v)); return GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s))); } inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { for (int i = 0; i < n; ++i) { dx[i] = ggml_silu_backward_f32(x[i], dy[i]); } } inline static void ggml_vec_silu_backward_f16(const int n, ggml_fp16_t * dx, const ggml_fp16_t * x, const ggml_fp16_t * dy) { for (int i = 0; i < n; ++i) { dx[i] = ggml_silu_backward_f16(x[i], dy[i]); } } inline static void ggml_vec_reglu_f32 (const int n, float * y, const float * x, const float * g) { for (int i = 0; i < n; ++i) { y[i] = (x[i] > 0.f) ? x[i] * g[i] : 0.f; } } inline static void ggml_vec_reglu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(x[i]); y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v * GGML_CPU_FP16_TO_FP32(g[i]) : 0.f); } } #ifdef GGML_GELU_FP16 inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { uint16_t t; for (int i = 0; i < n; ++i) { if (x[i] <= -10.0f) { y[i] = 0.0f; } else if (x[i] >= 10.0f) { y[i] = x[i] * g[i]; } else { ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i]; } } } #else inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { for (int i = 0; i < n; ++i) { y[i] = ggml_gelu_f32(x[i]) * g[i]; } } #endif inline static void ggml_vec_geglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(g[i]); y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v); } } void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g); inline static void ggml_vec_swiglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { for (int i = 0; i < n; ++i) { float xi = GGML_CPU_FP16_TO_FP32(x[i]); float gi = GGML_CPU_FP16_TO_FP32(g[i]); y[i] = GGML_CPU_FP32_TO_FP16((xi/(1.0f + expf(-xi))) * gi); } } inline static void ggml_vec_geglu_erf_f32(const int n, float * y, const float * x, const float * g) { for (int i = 0; i < n; ++i) { float xi = x[i]; y[i] = 0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * g[i]; } } inline static void ggml_vec_geglu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { for (int i = 0; i < n; ++i) { float xi = GGML_CPU_FP16_TO_FP32(x[i]); float gi = GGML_CPU_FP16_TO_FP32(g[i]); y[i] = GGML_CPU_FP32_TO_FP16(0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * gi); } } #ifdef GGML_GELU_QUICK_FP16 inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) { uint16_t t; for (int i = 0; i < n; ++i) { ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]) * g[i]; } } #else inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) { for (int i = 0; i < n; ++i) { y[i] = ggml_gelu_quick_f32(x[i]) * g[i]; } } #endif inline static void ggml_vec_geglu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { float v = GGML_CPU_FP16_TO_FP32(g[i]); y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[i16[i]]) * v); } } inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { sum += (ggml_float)x[i]; } *s = (float)sum; #else vDSP_sve(x, 1, s, n); #endif } inline static void ggml_vec_cumsum_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { if (i == 0) { y[i] = x[i]; } else { y[i] = y[i - 1] + x[i]; } } } inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) { ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { sum += (ggml_float)x[i]; } *s = sum; } inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { sum += GGML_CPU_FP16_TO_FP32(x[i]); } *s = sum; } inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { sum += GGML_BF16_TO_FP32(x[i]); } *s = sum; } inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE float max = -INFINITY; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); } *s = max; #else vDSP_maxv(x, 1, s, n); #endif } inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { ggml_vec_norm_f32(n, s, x); *s = 1.f/(*s); } inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) { float max = -INFINITY; int idx = 0; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); if (max == x[i]) { idx = i; } } *s = idx; } #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-cuda/000077500000000000000000000000001512524704700170135ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cuda/CMakeLists.txt000066400000000000000000000233151512524704700215570ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES find_package(CUDAToolkit) if (CUDAToolkit_FOUND) message(STATUS "CUDA Toolkit found") if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) # native == GPUs available at build time # 50 == Maxwell, lowest CUDA 12 standard # 60 == P100, FP16 CUDA intrinsics # 61 == Pascal, __dp4a instruction (per-byte integer dot product) # 70 == V100, FP16 tensor cores # 75 == Turing, int8 tensor cores # 80 == Ampere, asynchronous data loading, faster tensor core instructions # 86 == RTX 3000, needs CUDA v11.1 # 89 == RTX 4000, needs CUDA v11.8 # 120 == Blackwell, needs CUDA v12.8, FP4 tensor cores # # XX-virtual == compile CUDA code as PTX, do JIT compilation to binary code on first run # XX-real == compile CUDA code as device code for this specific architecture # no suffix == compile as both PTX and device code # # The default behavior for a non-native is to build virtual architectures as needed to cover all features needed # for best performance and to also build real architectures for the most commonly used GPUs. if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24") set(CMAKE_CUDA_ARCHITECTURES "native") else() if (CUDAToolkit_VERSION VERSION_LESS "13") list(APPEND CMAKE_CUDA_ARCHITECTURES 50-virtual 61-virtual 70-virtual) endif () list(APPEND CMAKE_CUDA_ARCHITECTURES 75-virtual 80-virtual 86-real) if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.8") list(APPEND CMAKE_CUDA_ARCHITECTURES 89-real) endif() if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8") # The CUDA architecture 120f-virtual would in principle work for Blackwell support # but the newly added "f" suffix conflicted with a preexising regex for validating CUDA architectures in CMake. # So either a recent CMake version or one with the backported fix is needed. # The following versions should work: # - CMake >= v3.31.8 && CMake < v4.0.0 # - CMake >= v4.0.2 # This is NOT documented in the CMake release notes, # check Modules/Internal/CMakeCUDAArchitecturesValidate.cmake in the CMake git repository instead. # However, the architectures 120a-real and 121a-real should work with basically any CMake version and # until the release of e.g. Rubin there is no benefit to shipping virtual architectures for Blackwell. list(APPEND CMAKE_CUDA_ARCHITECTURES 120a-real 121a-real) endif() endif() endif() enable_language(CUDA) # Replace any plain 12X CUDA architectures with their "architecture-specific" equivalents 12Xa. # 12X is forwards-compatible, 12Xa is not. # Notably the Blackwell FP4 tensor core instructions are not forwards compatible and therefore need 12Xa. # But while 12X vs. 12Xa can be checked in device code there is (to my knowledge) no easy way to do the same check in host code. # So for now just replace all instances of 12X with 12Xa, this should be fine until Rubin is released. foreach(ARCHS IN ITEMS CMAKE_CUDA_ARCHITECTURES CMAKE_CUDA_ARCHITECTURES_NATIVE) set(FIXED_ARCHS "") foreach(ARCH IN LISTS ${ARCHS}) if (ARCH MATCHES "^12[0-9](-real|-virtual)?$") string(REGEX REPLACE "^(12[0-9])((-real|-virtual)?)$" "\\1a\\2" FIXED_ARCH ${ARCH}) message(STATUS "Replacing ${ARCH} in ${ARCHS} with ${FIXED_ARCH}") list(APPEND FIXED_ARCHS "${FIXED_ARCH}") else() list(APPEND FIXED_ARCHS "${ARCH}") endif() endforeach() set(${ARCHS} ${FIXED_ARCHS}) endforeach() # If we try to compile a "native" build it will use the 12X architectures and fail. # So we should instead use the native architectures as determined by CMake after replacing 12X with 12Xa. # But if at the time of the build no GPUs are connected at all CMAKE_CUDA_ARCHITECTURES will contain garbage that we should not use. if (CMAKE_CUDA_ARCHITECTURES STREQUAL "native" AND CMAKE_CUDA_ARCHITECTURES_NATIVE MATCHES "^[0-9]+(a|f)?(-real|-virtual)?(;[0-9]+(a|f)?(-real|-virtual)?|;)*$") set(CMAKE_CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES_NATIVE}) endif() message(STATUS "Using CMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} CMAKE_CUDA_ARCHITECTURES_NATIVE=${CMAKE_CUDA_ARCHITECTURES_NATIVE}") file(GLOB GGML_HEADERS_CUDA "*.cuh") list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h") file(GLOB GGML_SOURCES_CUDA "*.cu") file(GLOB SRCS "template-instances/fattn-tile*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/mmq*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/mmf*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) if (GGML_CUDA_FA_ALL_QUANTS) file(GLOB SRCS "template-instances/fattn-vec*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) else() file(GLOB SRCS "template-instances/fattn-vec*q4_0-q4_0.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/fattn-vec*q8_0-q8_0.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/fattn-vec*f16-f16.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) endif() ggml_add_backend_library(ggml-cuda ${GGML_HEADERS_CUDA} ${GGML_SOURCES_CUDA} ) add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE}) if (GGML_CUDA_GRAPHS) add_compile_definitions(GGML_CUDA_USE_GRAPHS) endif() if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() if (GGML_CUDA_FORCE_CUBLAS) add_compile_definitions(GGML_CUDA_FORCE_CUBLAS) endif() if (GGML_CUDA_NO_VMM) add_compile_definitions(GGML_CUDA_NO_VMM) endif() if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() if (GGML_CUDA_NO_PEER_COPY) add_compile_definitions(GGML_CUDA_NO_PEER_COPY) endif() if (GGML_STATIC) if (WIN32) # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas) else () if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "10.1") target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) else() target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static) endif() endif() else() target_link_libraries(ggml-cuda PRIVATE CUDA::cudart CUDA::cublas) endif() if (GGML_CUDA_NO_VMM) # No VMM requested, no need to link directly with the cuda driver lib (libcuda.so) else() target_link_libraries(ggml-cuda PRIVATE CUDA::cuda_driver) endif() set(CUDA_CXX_FLAGS "") set(CUDA_FLAGS -use_fast_math -extended-lambda) if (GGML_CUDA_DEBUG) list(APPEND CUDA_FLAGS -lineinfo) add_compile_definitions(GGML_CUDA_DEBUG) endif() if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8") # Options are: # - none (not recommended) # - speed (nvcc's default) # - balance # - size list(APPEND CUDA_FLAGS -compress-mode=${GGML_CUDA_COMPRESSION_MODE}) endif() if (GGML_FATAL_WARNINGS) list(APPEND CUDA_FLAGS -Werror all-warnings) endif() if (GGML_ALL_WARNINGS AND NOT MSVC) set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c) if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL "") list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER}) endif() execute_process( COMMAND ${NVCC_CMD} -Xcompiler --version OUTPUT_VARIABLE CUDA_CCFULLVER ERROR_QUIET ) if (NOT CUDA_CCFULLVER MATCHES clang) set(CUDA_CCID "GNU") execute_process( COMMAND ${NVCC_CMD} -Xcompiler "-dumpfullversion -dumpversion" OUTPUT_VARIABLE CUDA_CCVER ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE ) else() if (CUDA_CCFULLVER MATCHES Apple) set(CUDA_CCID "AppleClang") else() set(CUDA_CCID "Clang") endif() string(REGEX REPLACE "^.* version ([0-9.]*).*$" "\\1" CUDA_CCVER ${CUDA_CCFULLVER}) endif() message(STATUS "CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}") ggml_get_flags(${CUDA_CCID} ${CUDA_CCVER}) list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later endif() if (NOT MSVC) list(APPEND CUDA_CXX_FLAGS -Wno-pedantic) endif() list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "") list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED}) endif() target_compile_options(ggml-cuda PRIVATE "$<$:${CUDA_FLAGS}>") else() message(FATAL_ERROR "CUDA Toolkit not found") endif() ggml-org-ggml-3678254/src/ggml-cuda/acc.cu000066400000000000000000000045721512524704700201020ustar00rootroot00000000000000#include "acc.cuh" static __global__ void acc_f32(const float * x, const float * y, float * dst, const int64_t ne, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const int64_t s11, const int64_t s12, const int64_t s13, const int64_t offset) { const int64_t i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= ne) { return; } int64_t src1_idx = i - offset; int64_t tmp = src1_idx; const int64_t i13 = tmp / s13; tmp -= i13 * s13; const int64_t i12 = tmp / s12; tmp -= i12 * s12; const int64_t i11 = tmp / s11; tmp -= i11 * s11; const int64_t i10 = tmp; float val = x[i]; if (src1_idx >= 0 && i10 < ne10 && i11 < ne11 && i12 < ne12 && i13 < ne13) { val += y[((i13*ne12 + i12) * ne11 + i11) * ne10 + i10]; } dst[i] = val; } static void acc_f32_cuda(const float * x, const float * y, float * dst, const int64_t n_elements, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const int64_t s1, const int64_t s2, const int64_t s3, const int64_t offset, cudaStream_t stream) { const int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE; acc_f32<<>>(x, y, dst, n_elements, ne10, ne11, ne12, ne13, s1, s2, s3, offset); } void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(dst->nb[0] == ggml_element_size(dst)); GGML_ASSERT(ggml_is_contiguously_allocated(dst)); const int64_t s1 = dst->op_params[0] / sizeof(float); const int64_t s2 = dst->op_params[1] / sizeof(float); const int64_t s3 = dst->op_params[2] / sizeof(float); const int64_t offset = dst->op_params[3] / sizeof(float); acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], s1, s2, s3, offset, stream); } ggml-org-ggml-3678254/src/ggml-cuda/acc.cuh000066400000000000000000000002031512524704700202350ustar00rootroot00000000000000#include "common.cuh" #define CUDA_ACC_BLOCK_SIZE 256 void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/add-id.cu000066400000000000000000000035551512524704700204760ustar00rootroot00000000000000#include "add-id.cuh" static __global__ void add_id_kernel( const float * src0, const float * src1, const int32_t * src2, float * dst, int64_t ne0, int64_t ne1, size_t nb01, size_t nb02, size_t nb11, size_t nb21 ) { const int64_t i1 = blockIdx.x; const int64_t i2 = blockIdx.y; const int i11 = *(const int32_t *) ((const char *) src2 + i1*sizeof(int32_t) + i2*nb21); const size_t nb1 = ne0 * sizeof(float); const size_t nb2 = ne1 * nb1; float * dst_row = (float *)((char *)dst + i1*nb1 + i2*nb2); const float * src0_row = (const float *)((const char *)src0 + i1*nb01 + i2*nb02); const float * src1_row = (const float *)((const char *)src1 + i11*nb11); for (int64_t i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) { dst_row[i0] = src0_row[i0] + src1_row[i0]; } } void ggml_cuda_op_add_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; GGML_TENSOR_TERNARY_OP_LOCALS GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src2->type == GGML_TYPE_I32); GGML_ASSERT(nb00 == sizeof(float)); GGML_ASSERT(nb10 == sizeof(float)); GGML_ASSERT(nb20 == sizeof(int32_t)); const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; const int32_t * src2_d = (const int32_t *)src2->data; float * dst_d = (float *)dst->data; int threads = std::min((int)ne00, 768); // cols dim3 blocks(ne01, ne02); // n_experts_used, n_tokens add_id_kernel<<>>( src0_d, src1_d, src2_d, dst_d, ne0, ne1, nb01, nb02, nb11, nb21 ); } ggml-org-ggml-3678254/src/ggml-cuda/add-id.cuh000066400000000000000000000001451512524704700206360ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_add_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/arange.cu000066400000000000000000000022771512524704700206110ustar00rootroot00000000000000#include "arange.cuh" static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) { // blockIDx.x: idx of ne0 / BLOCK_SIZE int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; } dst[nidx] = start + step * nidx; } static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE; arange_f32<<>>(dst, ne0, start, step); } void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(dst->type == GGML_TYPE_F32); float start; float stop; float step; memcpy(&start, (float *)dst->op_params + 0, sizeof(float)); memcpy(&stop, (float *)dst->op_params + 1, sizeof(float)); memcpy(&step, (float *)dst->op_params + 2, sizeof(float)); int64_t steps = (int64_t)ceil((stop - start) / step); GGML_ASSERT(ggml_nelements(dst) == steps); arange_f32_cuda(dst_d, dst->ne[0], start, step, stream); } ggml-org-ggml-3678254/src/ggml-cuda/arange.cuh000066400000000000000000000002111512524704700207430ustar00rootroot00000000000000#include "common.cuh" #define CUDA_ARANGE_BLOCK_SIZE 256 void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/argmax.cu000066400000000000000000000054461512524704700206340ustar00rootroot00000000000000#include #include #include "argmax.cuh" #include "common.cuh" #include "sum.cuh" static __global__ void argmax_f32(const float * __restrict__ x, int32_t * __restrict__ dst, const int64_t ncols) { const int64_t row = blockIdx.x; float maxval = -FLT_MAX; int argmax = -1; const float * rowx = x + row * ncols; for (int32_t col = threadIdx.x; col < ncols; col += blockDim.x) { const float val = rowx[col]; if (val > maxval) { maxval = val; argmax = col; } } #pragma unroll for (int offset = WARP_SIZE/2; offset > 0; offset >>= 1) { const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, offset, WARP_SIZE); const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, offset, WARP_SIZE); if (val > maxval) { maxval = val; argmax = col; } } const int n_warps = blockDim.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; const int warp_id = threadIdx.x / WARP_SIZE; if (n_warps > 1) { constexpr int max_warps = 1024 / WARP_SIZE; __shared__ float shared_maxval[max_warps]; __shared__ int shared_argmax[max_warps]; if (lane_id == 0) { shared_maxval[warp_id] = maxval; shared_argmax[warp_id] = argmax; } __syncthreads(); if (warp_id == 0) { if (lane_id < n_warps) { maxval = shared_maxval[lane_id]; argmax = shared_argmax[lane_id]; } #pragma unroll for (int offset = WARP_SIZE/2; offset > 0; offset >>= 1) { const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, offset, WARP_SIZE); const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, offset, WARP_SIZE); if (val > maxval) { maxval = val; argmax = col; } } } } if (warp_id == 0 && lane_id == 0) { dst[row] = argmax; } } void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_I32); GGML_ASSERT(ggml_is_contiguous(src0)); const int64_t ne00 = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); const float * src0_d = (const float *) src0->data; int32_t * dst_d = (int32_t *) dst->data; cudaStream_t stream = ctx.stream(); const int64_t num_blocks = nrows; const int64_t num_threads = std::min(1024, (ne00 + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE); const dim3 blocks_dim(num_threads, 1, 1); const dim3 blocks_num(num_blocks, 1, 1); argmax_f32<<>>(src0_d, dst_d, ne00); } ggml-org-ggml-3678254/src/ggml-cuda/argmax.cuh000066400000000000000000000001421512524704700207700ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/argsort.cu000066400000000000000000000173451512524704700210370ustar00rootroot00000000000000#include "argsort.cuh" #ifdef GGML_CUDA_USE_CUB # include using namespace cub; #endif // GGML_CUDA_USE_CUB static __global__ void init_indices(int * indices, const int ncols, const int nrows) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y; if (col < ncols && row < nrows) { indices[row * ncols + col] = col; } } static __global__ void init_offsets(int * offsets, const int ncols, const int nrows) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx <= nrows) { offsets[idx] = idx * ncols; } } #ifdef GGML_CUDA_USE_CUB static void argsort_f32_i32_cuda_cub(ggml_cuda_pool & pool, const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) { ggml_cuda_pool_alloc temp_indices_alloc(pool, ncols * nrows); ggml_cuda_pool_alloc temp_keys_alloc(pool, ncols * nrows); ggml_cuda_pool_alloc offsets_alloc(pool, nrows + 1); int * temp_indices = temp_indices_alloc.get(); float * temp_keys = temp_keys_alloc.get(); int * d_offsets = offsets_alloc.get(); static const int block_size = 256; const dim3 grid_size((ncols + block_size - 1) / block_size, nrows); init_indices<<>>(temp_indices, ncols, nrows); const dim3 offset_grid((nrows + block_size - 1) / block_size); init_offsets<<>>(d_offsets, ncols, nrows); CUDA_CHECK(cudaMemcpyAsync(temp_keys, x, ncols * nrows * sizeof(float), cudaMemcpyDeviceToDevice, stream)); size_t temp_storage_bytes = 0; if (order == GGML_SORT_ORDER_ASC) { DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, temp_keys, temp_keys, // keys (in-place) temp_indices, dst, // values (indices) ncols * nrows, nrows, // num items, num segments d_offsets, d_offsets + 1, 0, sizeof(float) * 8, // all bits stream); } else { DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, temp_keys, temp_keys, temp_indices, dst, ncols * nrows, nrows, d_offsets, d_offsets + 1, 0, sizeof(float) * 8, stream); } ggml_cuda_pool_alloc temp_storage_alloc(pool, temp_storage_bytes); void * d_temp_storage = temp_storage_alloc.get(); if (order == GGML_SORT_ORDER_ASC) { DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, temp_keys, temp_keys, temp_indices, dst, ncols * nrows, nrows, d_offsets, d_offsets + 1, 0, sizeof(float) * 8, stream); } else { DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, temp_keys, temp_keys, temp_indices, dst, ncols * nrows, nrows, d_offsets, d_offsets + 1, 0, sizeof(float) * 8, stream); } } #endif // GGML_CUDA_USE_CUB // Bitonic sort implementation template static inline __device__ void ggml_cuda_swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } template static __global__ void k_argsort_f32_i32(const float * x, int * dst, const int ncols, int ncols_pad) { // bitonic sort int col = threadIdx.x; int row = blockIdx.x; if (col >= ncols_pad) { return; } const float * x_row = x + row * ncols; extern __shared__ int dst_row[]; // initialize indices dst_row[col] = col; __syncthreads(); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } } __syncthreads(); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } static int next_power_of_2(int x) { int n = 1; while (n < x) { n *= 2; } return n; } static void argsort_f32_i32_cuda_bitonic(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) { // bitonic sort requires ncols to be power of 2 const int ncols_pad = next_power_of_2(ncols); const dim3 block_dims(ncols_pad, 1, 1); const dim3 block_nums(nrows, 1, 1); const size_t shared_mem = ncols_pad * sizeof(int); // FIXME: this limit could be raised by ~2-4x on Ampere or newer GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb); if (order == GGML_SORT_ORDER_ASC) { k_argsort_f32_i32 <<>>(x, dst, ncols, ncols_pad); } else if (order == GGML_SORT_ORDER_DESC) { k_argsort_f32_i32 <<>>(x, dst, ncols, ncols_pad); } else { GGML_ABORT("fatal error"); } } void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_I32); GGML_ASSERT(ggml_is_contiguous(src0)); const int64_t ncols = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; #ifdef GGML_CUDA_USE_CUB const int ncols_pad = next_power_of_2(ncols); const size_t shared_mem = ncols_pad * sizeof(int); const size_t max_shared_mem = ggml_cuda_info().devices[ggml_cuda_get_device()].smpb; if (shared_mem > max_shared_mem || ncols > 1024) { ggml_cuda_pool & pool = ctx.pool(); argsort_f32_i32_cuda_cub(pool, src0_d, (int *) dst_d, ncols, nrows, order, stream); } else { argsort_f32_i32_cuda_bitonic(src0_d, (int *) dst_d, ncols, nrows, order, stream); } #else argsort_f32_i32_cuda_bitonic(src0_d, (int *) dst_d, ncols, nrows, order, stream); #endif } ggml-org-ggml-3678254/src/ggml-cuda/argsort.cuh000066400000000000000000000001461512524704700211760ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/binbcast.cu000066400000000000000000000520651512524704700211410ustar00rootroot00000000000000#include "binbcast.cuh" #include #include static __device__ __forceinline__ float op_repeat(const float a, const float b) { return b; GGML_UNUSED(a); } static __device__ __forceinline__ float op_add(const float a, const float b) { return a + b; } static __device__ __forceinline__ float op_sub(const float a, const float b) { return a - b; } static __device__ __forceinline__ float op_mul(const float a, const float b) { return a * b; } static __device__ __forceinline__ float op_div(const float a, const float b) { return a / b; } template static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, const int ne0, const int ne1, const int ne2, const uint3 ne3, const uint3 ne10, const uint3 ne11, const uint3 ne12, const uint3 ne13, /*int s0, */ const int s1, const int s2, const int s3, /*int s00,*/ const int s01, const int s02, const int s03, /*int s10,*/ const int s11, const int s12, const int s13, src1_ptrs... src1s) { const uint32_t i0s = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t i1 = (blockDim.y * blockIdx.y + threadIdx.y); const uint32_t i2 = fastdiv((blockDim.z * blockIdx.z + threadIdx.z), ne3); const uint32_t i3 = (blockDim.z * blockIdx.z + threadIdx.z) - (i2 * ne3.z); if (i0s >= (uint32_t)ne0 || i1 >= (uint32_t)ne1 || i2 >= (uint32_t)ne2 || i3 >= ne3.z) { return; } const uint32_t i11 = fastmodulo(i1, ne11); const uint32_t i12 = fastmodulo(i2, ne12); const uint32_t i13 = fastmodulo(i3, ne13); const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 ? (src0 + i_src0) : nullptr; dst_t * dst_row = dst + i_dst; for (int i0 = i0s; i0 < ne0; i0 += blockDim.x * gridDim.x) { const uint32_t i10 = fastmodulo(i0, ne10); float result = src0_row ? (float) src0_row[i0] : 0.0f; if constexpr (sizeof...(src1_ptrs) > 0) { result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); } else { result = bin_op(result, (float)src1[i_src1 + i10]); } dst_row[i0] = (dst_t) result; } } template static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst, const uint3 ne0, const uint3 ne1, const uint3 ne2, const uint32_t ne3, const uint3 prod_012, const uint3 prod_01, const uint3 ne10, const uint3 ne11, const uint3 ne12, const uint3 ne13, /*int s0, */ const int s1, const int s2, const int s3, /*int s00,*/ const int s01, const int s02, const int s03, /*int s10,*/ const int s11, const int s12, const int s13, src1_ptrs... src1s) { const int i = blockDim.x*blockIdx.x + threadIdx.x; const uint32_t i3 = fastdiv(i, prod_012); const uint32_t i2 = fastdiv(i - i3 * prod_012.z, prod_01); const uint32_t i1 = fastdiv(i - i3 * prod_012.z - i2 * prod_01.z, ne0); const uint32_t i0 = i - i3 * prod_012.z - i2 * prod_01.z - i1 * ne0.z; if (i0 >= ne0.z || i1 >= ne1.z || i2 >= ne2.z || i3 >= ne3) { return; } const int i11 = fastmodulo(i1, ne11); const int i12 = fastmodulo(i2, ne12); const int i13 = fastmodulo(i3, ne13); const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 ? (src0 + i_src0) : nullptr; dst_t * dst_row = dst + i_dst; const int i10 = fastmodulo(i0, ne10); float result = src0_row ? (float) src0_row[i0] : 0.0f; if constexpr (sizeof...(src1_ptrs) > 0) { result = (..., (result = bin_op(result, (float)src1s[i_src1 + i10]))); } else { result = bin_op(result, (float)src1[i_src1 + i10]); } dst_row[i0] = (dst_t) result; } template static void launch_bin_bcast_pack(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, cudaStream_t stream, std::index_sequence) { GGML_TENSOR_BINARY_OP_LOCALS int nr0 = ne10 / ne0; int nr1 = ne11 / ne1; int nr2 = ne12 / ne2; int nr3 = ne13 / ne3; int nr[4] = { nr0, nr1, nr2, nr3 }; int64_t cne[] = { ne0, ne1, ne2, ne3 }; int64_t cne0[] = { ne00, ne01, ne02, ne03 }; int64_t cne1[] = { ne10, ne11, ne12, ne13 }; size_t cnb[] = { nb0, nb1, nb2, nb3 }; size_t cnb0[] = { nb00, nb01, nb02, nb03 }; size_t cnb1[] = { nb10, nb11, nb12, nb13 }; auto collapse = [](int64_t cne[]) { cne[0] *= cne[1]; cne[1] = cne[2]; cne[2] = cne[3]; cne[3] = 1; }; auto collapse_nb = [](size_t cnb[], const int64_t cne[]) { cnb[1] *= cne[1]; cnb[2] *= cne[2]; cnb[3] *= cne[3]; }; if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) { for (int i = 0; i < 4; i++) { if (nr[i] != 1) { break; } if (i > 0) { collapse_nb(cnb, cne); collapse_nb(cnb0, cne0); collapse_nb(cnb1, cne1); collapse(cne); collapse(cne0); collapse(cne1); } } } { int64_t ne0 = cne[0]; int64_t ne1 = cne[1]; int64_t ne2 = cne[2]; int64_t ne3 = cne[3]; //int64_t ne00 = cne0[0]; GGML_UNUSED(ne00); //int64_t ne01 = cne0[1]; GGML_UNUSED(ne01); //int64_t ne02 = cne0[2]; GGML_UNUSED(ne02); //int64_t ne03 = cne0[3]; GGML_UNUSED(ne03); size_t nb0 = cnb[0]; size_t nb1 = cnb[1]; size_t nb2 = cnb[2]; size_t nb3 = cnb[3]; size_t nb00 = cnb0[0]; size_t nb01 = cnb0[1]; size_t nb02 = cnb0[2]; size_t nb03 = cnb0[3]; size_t nb10 = cnb1[0]; size_t nb11 = cnb1[1]; size_t nb12 = cnb1[2]; size_t nb13 = cnb1[3]; size_t s0 = nb0 / sizeof(dst_t); size_t s1 = nb1 / sizeof(dst_t); size_t s2 = nb2 / sizeof(dst_t); size_t s3 = nb3 / sizeof(dst_t); size_t s10 = nb10 / sizeof(src1_t); size_t s11 = nb11 / sizeof(src1_t); size_t s12 = nb12 / sizeof(src1_t); size_t s13 = nb13 / sizeof(src1_t); size_t s00 = nb00 / sizeof(src0_t); size_t s01 = nb01 / sizeof(src0_t); size_t s02 = nb02 / sizeof(src0_t); size_t s03 = nb03 / sizeof(src0_t); GGML_ASSERT(nb0 % sizeof(dst_t) == 0); GGML_ASSERT(nb1 % sizeof(dst_t) == 0); GGML_ASSERT(nb2 % sizeof(dst_t) == 0); GGML_ASSERT(nb3 % sizeof(dst_t) == 0); GGML_ASSERT(nb00 % sizeof(src0_t) == 0); GGML_ASSERT(nb01 % sizeof(src0_t) == 0); GGML_ASSERT(nb02 % sizeof(src0_t) == 0); GGML_ASSERT(nb03 % sizeof(src0_t) == 0); GGML_ASSERT(nb10 % sizeof(src1_t) == 0); GGML_ASSERT(nb11 % sizeof(src1_t) == 0); GGML_ASSERT(nb12 % sizeof(src1_t) == 0); GGML_ASSERT(nb13 % sizeof(src1_t) == 0); GGML_ASSERT(s0 == 1); GGML_ASSERT(s00 == 1); GGML_ASSERT(s10 == 1); const int block_size = 128; int64_t hne0 = std::max(ne0 / 2LL, 1LL); dim3 block_dims; block_dims.x = std::min(hne0, block_size); block_dims.y = std::min(ne1, block_size / block_dims.x); block_dims.z = std::min(std::min(ne2 * ne3, block_size / block_dims.x / block_dims.y), 64U); dim3 block_nums((hne0 + block_dims.x - 1) / block_dims.x, (ne1 + block_dims.y - 1) / block_dims.y, (ne2 * ne3 + block_dims.z - 1) / block_dims.z); const uint3 ne10 = init_fastdiv_values((uint32_t) cne1[0]); const uint3 ne11 = init_fastdiv_values((uint32_t) cne1[1]); const uint3 ne12 = init_fastdiv_values((uint32_t) cne1[2]); const uint3 ne13 = init_fastdiv_values((uint32_t) cne1[3]); if (block_nums.z > 65535 || block_nums.y > 65535) { int block_num = (ne0 * ne1 * ne2 * ne3 + block_size - 1) / block_size; const uint3 prod_012 = init_fastdiv_values((uint32_t) (ne0 * ne1 * ne2)); const uint3 prod_01 = init_fastdiv_values((uint32_t) (ne0 * ne1)); const uint3 ne0_fastdiv = init_fastdiv_values((uint32_t) ne0); const uint3 ne1_fastdiv = init_fastdiv_values((uint32_t) ne1); const uint3 ne2_fastdiv = init_fastdiv_values((uint32_t) ne2); if constexpr (sizeof...(I) > 0) { k_bin_bcast_unravel<<>>( src0_dd, src1_dd, dst_dd, ne0_fastdiv, ne1_fastdiv, ne2_fastdiv, ne3, prod_012, prod_01, ne10, ne11, ne12, ne13, /* s0, */ s1, s2, s3, /* s00,*/ s01, s02, s03, /* s10,*/ s11, s12, s13, (const src1_t *) dst->src[I + 1]->data...); } else { k_bin_bcast_unravel <<>>(src0_dd, src1_dd, dst_dd, ne0_fastdiv, ne1_fastdiv, ne2_fastdiv, ne3, prod_012, prod_01, ne10, ne11, ne12, ne13, /* s0, */ s1, s2, s3, /* s00,*/ s01, s02, s03, /* s10,*/ s11, s12, s13); } } else { const uint3 ne3_fastdiv = init_fastdiv_values((uint32_t) ne3); if constexpr (sizeof...(I) > 0) { k_bin_bcast<<>>( src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3_fastdiv, ne10, ne11, ne12, ne13, /* s0, */ s1, s2, s3, /* s00,*/ s01, s02, s03, /* s10,*/ s11, s12, s13, (const src1_t *) dst->src[I + 1]->data...); } else { k_bin_bcast<<>>( src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3_fastdiv, ne10, ne11, ne12, ne13, /* s0, */ s1, s2, s3, /* s00,*/ s01, s02, s03, /* s10,*/ s11, s12, s13); } } } } template static __global__ void k_repeat_back( const T * __restrict__ src, T * __restrict__ dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const size_t s00, const size_t s01, const size_t s02, const size_t s03, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3) { const int64_t tid0 = int64_t(blockIdx.x)*blockDim.x + threadIdx.x; const int64_t tid1 = int64_t(blockIdx.y)*blockDim.y + threadIdx.y; const int64_t tid23 = int64_t(blockIdx.z)*blockDim.z + threadIdx.z; const int64_t tid2 = tid23 % ne2; const int64_t tid3 = tid23 / ne2; if (tid0 >= ne0) { return; } T sum = 0; for (int64_t i3 = tid3; i3 < ne03; i3 += ne3) { for (int64_t i2 = tid2; i2 < ne02; i2 += ne2) { for (int64_t i1 = tid1; i1 < ne01; i1 += ne1) { for (int64_t i0 = tid0; i0 < ne00; i0 += ne0) { sum += src[i3*s03 + i2*s02 + i1*s01 + i0*s00]; } } } } dst[tid3*ne2*ne1*ne0 + tid2*ne1*ne0 + tid1*ne0 + tid0] = sum; } template struct bin_bcast_cuda { template void operator()(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, cudaStream_t stream) { launch_bin_bcast_pack( src0, src1, dst, src0_dd, src1_dd, dst_dd, stream, std::make_index_sequence{}); } }; template static void repeat_back_cuda( const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const size_t s00, const size_t s01, const size_t s02, const size_t s03, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) { const dim3 block_dims(WARP_SIZE, 1, 1); const dim3 block_nums((ne0 + WARP_SIZE - 1) / WARP_SIZE, ne1, ne2*ne3); k_repeat_back<<>> (src, dst, ne00, ne01, ne02, ne03, s00, s01, s02, s03, ne0, ne1, ne2, ne3); } template static void ggml_cuda_op_bin_bcast( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const void * src0_dd, const void * src1_dd, void * dst_dd, cudaStream_t stream) { GGML_ASSERT(src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { op()(src0, src1, dst, (const float *)src0_dd, (const float *)src1_dd, (float *)dst_dd, stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { op()(src0, src1, dst, (const half *) src0_dd, (const half *)src1_dd, (half *) dst_dd, stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) { op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (half *) dst_dd, stream); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (float *)dst_dd, stream); } else { fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); GGML_ABORT("fatal error"); } } void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_bin_bcast>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream()); } void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); } void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); } void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); } void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_bin_bcast>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream()); } template static void ggml_cuda_op_fused_binbcast_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { cudaStream_t stream = ctx.stream(); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { launch_bin_bcast_pack(src0, src1, dst, (const float *) src0->data, (const float *) src1->data, (float *) dst->data, stream, std::make_index_sequence{}); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { launch_bin_bcast_pack(src0, src1, dst, (const half *) src0->data, (const half *) src1->data, (half *) dst->data, stream, std::make_index_sequence{}); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) { launch_bin_bcast_pack(src0, src1, dst, (const half *) src0->data, (const float *) src1->data, (half *) dst->data, stream, std::make_index_sequence{}); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { launch_bin_bcast_pack(src0, src1, dst, (const half *) src0->data, (const float *) src1->data, (float *) dst->data, stream, std::make_index_sequence{}); } else { fprintf(stderr, "%s: unsupported types for fusion: dst: %s, src0: %s, src1: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); GGML_ABORT("fatal error"); } } void ggml_cuda_op_fused_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst, int n_fuse) { GGML_ASSERT(2 <= n_fuse && n_fuse <= 8); switch (n_fuse) { case 2: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 3: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 4: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 5: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 6: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 7: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; case 8: ggml_cuda_op_fused_binbcast_impl(ctx, dst); break; default: GGML_ASSERT(false && "Unsupported n_fuse value"); } } void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_can_repeat(dst, src0)); cudaStream_t stream = ctx.stream(); GGML_TENSOR_UNARY_OP_LOCALS; GGML_ASSERT(ne2*ne3 <= (1 << 15)); const size_t ts = ggml_type_size(src0->type); const size_t s00 = nb00 / ts; const size_t s01 = nb01 / ts; const size_t s02 = nb02 / ts; const size_t s03 = nb03 / ts; switch (dst->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; repeat_back_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s00, s01, s02, s03, ne0, ne1, ne2, ne3, stream); } break; default: { GGML_ASSERT(false); } break; } } ggml-org-ggml-3678254/src/ggml-cuda/binbcast.cuh000066400000000000000000000011031512524704700212740ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_fused_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst, int n_fuse); ggml-org-ggml-3678254/src/ggml-cuda/clamp.cu000066400000000000000000000030641512524704700204430ustar00rootroot00000000000000#include "clamp.cuh" static __device__ __forceinline__ float op_clamp(float x, float min, float max) { return fminf(fmaxf(x, min), max); } template static __global__ void op_clamp_kernel(const T * x, T * dst, const T min, const T max, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = (T)op_clamp((float)x[i], (float)min, (float)max); } template static void clamp_cuda(const T * x, T * dst, const T min, const T max, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE; op_clamp_kernel<<>>(x, dst, min, max, k); } void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const void * src0_d = src0->data; void * dst_d = dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); float min; float max; memcpy(&min, dst->op_params, sizeof(float)); memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); if (src0->type == GGML_TYPE_F16) { clamp_cuda((const half *)src0_d, (half *)dst_d, (half)min, (half)max, ggml_nelements(src0), stream); } else { clamp_cuda((const float *)src0_d, (float *)dst_d, (float)min, (float)max, ggml_nelements(src0), stream); } } ggml-org-ggml-3678254/src/ggml-cuda/clamp.cuh000066400000000000000000000002071512524704700206070ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CLAMP_BLOCK_SIZE 256 void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/common.cuh000066400000000000000000001336271512524704700210200ustar00rootroot00000000000000#pragma once #include "ggml.h" #include "ggml-impl.h" #include "ggml-cuda.h" #include #include #if defined(GGML_USE_HIP) #define GGML_COMMON_DECL_HIP #define GGML_COMMON_IMPL_HIP #else #define GGML_COMMON_DECL_CUDA #define GGML_COMMON_IMPL_CUDA #if defined(GGML_USE_MUSA) #define GGML_COMMON_DECL_MUSA #define GGML_COMMON_IMPL_MUSA #endif #endif #include "ggml-common.h" #include #include #include #include #include #include #include #include #if defined(GGML_USE_HIP) #include "vendors/hip.h" #elif defined(GGML_USE_MUSA) #include "vendors/musa.h" #else #include "vendors/cuda.h" #endif // defined(GGML_USE_HIP) #define STRINGIZE_IMPL(...) #__VA_ARGS__ #define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__) #define WARP_SIZE 32 #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons #define GGML_CUDA_CC_PASCAL 600 #define GGML_CUDA_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define GGML_CUDA_CC_VOLTA 700 #define GGML_CUDA_CC_TURING 750 #define GGML_CUDA_CC_AMPERE 800 #define GGML_CUDA_CC_ADA_LOVELACE 890 // While BW spans CC 1000, 1100 & 1200, we are integrating Tensor Core instructions available to 1200 family, see // https://docs.nvidia.com/cutlass/media/docs/cpp/blackwell_functionality.html#blackwell-sm120-gemms #define GGML_CUDA_CC_BLACKWELL 1200 #define GGML_CUDA_CC_RUBIN 1300 #define GGML_CUDA_CC_OFFSET_AMD 0x1000000 #define GGML_CUDA_CC_OFFSET_MTHREADS 0x0100000 #define GGML_CUDA_CC_IS_NVIDIA(cc) (cc < GGML_CUDA_CC_OFFSET_MTHREADS) // AMD // GCN/CDNA, wave size is 64 #define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 #define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue #define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a #define GGML_CUDA_CC_CDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x908) // MI100, minimum for MFMA, acc registers #define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing #define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300 // RDNA removes MFMA, dp4a, xnack, acc registers, wave size is 32 #define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x1010) // RX 5000 #define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a #define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA #define GGML_CUDA_CC_RDNA3_5 (GGML_CUDA_CC_OFFSET_AMD + 0x1150) // AI 370, AI Max 395 laptops. #define GGML_CUDA_CC_RDNA4 (GGML_CUDA_CC_OFFSET_AMD + 0x1200) // RX 9000 #define GGML_CUDA_CC_IS_AMD(cc) (cc >= GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_RDNA(cc) (cc >= GGML_CUDA_CC_RDNA1) #define GGML_CUDA_CC_IS_RDNA1(cc) (cc >= GGML_CUDA_CC_RDNA1 && cc < GGML_CUDA_CC_RDNA2) #define GGML_CUDA_CC_IS_RDNA2(cc) (cc >= GGML_CUDA_CC_RDNA2 && cc < GGML_CUDA_CC_RDNA3) #define GGML_CUDA_CC_IS_RDNA3_0(cc) (cc >= GGML_CUDA_CC_RDNA3 && cc < GGML_CUDA_CC_RDNA3_5) #define GGML_CUDA_CC_IS_RDNA3_5(cc) (cc >= GGML_CUDA_CC_RDNA3_5 && cc < GGML_CUDA_CC_RDNA4) #define GGML_CUDA_CC_IS_RDNA3(cc) (GGML_CUDA_CC_IS_RDNA3_0(cc) || GGML_CUDA_CC_IS_RDNA3_5(cc)) #define GGML_CUDA_CC_IS_RDNA4(cc) (cc >= GGML_CUDA_CC_RDNA4) #define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA1) #define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA1 && cc < GGML_CUDA_CC_RDNA1) #define GGML_CUDA_CC_IS_CDNA1(cc) (cc >= GGML_CUDA_CC_CDNA1 && cc < GGML_CUDA_CC_CDNA2) #define GGML_CUDA_CC_IS_CDNA2(cc) (cc >= GGML_CUDA_CC_CDNA2 && cc < GGML_CUDA_CC_CDNA3) #define GGML_CUDA_CC_IS_CDNA3(cc) (cc >= GGML_CUDA_CC_CDNA3 && cc < GGML_CUDA_CC_RDNA1) // Moore Threads #define MUSART_HMASK 40300 // MUSA rc4.3, min. ver. for half2 -> uint mask comparisons #define GGML_CUDA_CC_QY1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 #define GGML_CUDA_CC_QY2 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 #define GGML_CUDA_CC_PH1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x310) // MTT S5000 #define GGML_CUDA_CC_IS_MTHREADS(cc) (cc >= GGML_CUDA_CC_OFFSET_MTHREADS && cc < GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_QY1(cc) (cc >= GGML_CUDA_CC_QY1 && cc < GGML_CUDA_CC_QY2) #define GGML_CUDA_CC_IS_QY2(cc) (cc >= GGML_CUDA_CC_QY2 && cc < GGML_CUDA_CC_PH1) #define GGML_CUDA_CC_IS_PH1(cc) (cc >= GGML_CUDA_CC_PH1) #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11070 # define GGML_CUDA_USE_CUB #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11070 #ifdef __CUDA_ARCH_LIST__ constexpr bool ggml_cuda_has_arch_impl(int) { return false; } template constexpr bool ggml_cuda_has_arch_impl(const int arch, const int first, Archs... rest) { return arch == first || ggml_cuda_has_arch_impl(arch, rest...); } constexpr bool ggml_cuda_has_arch(const int arch) { return ggml_cuda_has_arch_impl(arch, __CUDA_ARCH_LIST__); } constexpr int ggml_cuda_highest_compiled_arch_impl(const int /*arch*/, const int cur) { if (cur == 0) { return -1; } return cur; } template constexpr int ggml_cuda_highest_compiled_arch_impl(const int arch, const int cur, const int first, Archs... rest) { if (first <= arch && first > cur) { return ggml_cuda_highest_compiled_arch_impl(arch, first, rest...); } else { return ggml_cuda_highest_compiled_arch_impl(arch, cur, rest...); } } constexpr int ggml_cuda_highest_compiled_arch(const int arch) { return ggml_cuda_highest_compiled_arch_impl(arch, 0, __CUDA_ARCH_LIST__); } #else static int ggml_cuda_highest_compiled_arch(const int arch) { return arch; } #endif // __CUDA_ARCH_LIST__ // --------------------------------------------------------------------------------------------------------- #define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses #define GGML_CUDA_MAX_STREAMS 8 [[noreturn]] void ggml_cuda_error(const char * stmt, const char * func, const char * file, int line, const char * msg); #define CUDA_CHECK_GEN(err, success, error_fn) \ do { \ auto err_ = (err); \ if (err_ != (success)) { \ ggml_cuda_error(#err, __func__, __FILE__, __LINE__, error_fn(err_)); \ } \ } while (0) #define CUDA_CHECK(err) CUDA_CHECK_GEN(err, cudaSuccess, cudaGetErrorString) #if CUDART_VERSION >= 12000 || defined(GGML_USE_MUSA) static const char * cublas_get_error_str(const cublasStatus_t err) { return cublasGetStatusString(err); } #else static const char * cublas_get_error_str(const cublasStatus_t err) { switch (err) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; default: return "unknown error"; } } #endif // CUDART_VERSION >= 12000 #define CUBLAS_CHECK(err) CUDA_CHECK_GEN(err, CUBLAS_STATUS_SUCCESS, cublas_get_error_str) #if !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) static const char * cu_get_error_str(CUresult err) { const char * err_str; cuGetErrorString(err, &err_str); return err_str; } #define CU_CHECK(err) CUDA_CHECK_GEN(err, CUDA_SUCCESS, cu_get_error_str) #endif #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) # define CUDA_SET_SHARED_MEMORY_LIMIT(kernel, nbytes) \ do { \ static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = { false }; \ const int id = ggml_cuda_get_device(); \ if (!shared_memory_limit_raised[id]) { \ CUDA_CHECK(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes)); \ shared_memory_limit_raised[id] = true; \ } \ } while (0) #else # define CUDA_SET_SHARED_MEMORY_LIMIT(kernel, nbytes) \ do { \ GGML_UNUSED(nbytes); \ } while (0) #endif // !(defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) #if CUDART_VERSION >= 11010 || defined(GGML_USE_MUSA) #define GGML_CUDA_ASSUME(x) __builtin_assume(x) #else #define GGML_CUDA_ASSUME(x) #endif // CUDART_VERSION >= 11010 #if (!defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM)) || (defined(GGML_USE_HIP) && !defined(GGML_HIP_NO_VMM)) #define GGML_USE_VMM #endif // (!defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM)) || (defined(GGML_USE_HIP) && !defined(GGML_HIP_NO_VMM)) #if defined(GGML_USE_HIP) || defined(GGML_USE_MUSA) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #define FP16_AVAILABLE #endif // defined(GGML_USE_HIP) || defined(GGML_USE_MUSA) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #if defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 #define FAST_FP16_AVAILABLE #endif // defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 #if defined(GGML_USE_HIP) && defined(CDNA) && !defined(GGML_HIP_NO_MMQ_MFMA) #define AMD_MFMA_AVAILABLE #endif // defined(GGML_USE_HIP) && defined(CDNA) && !defined(GGML_HIP_NO_MMQ_MFMA) #if defined(GGML_USE_HIP) && (defined(RDNA4) || defined(RDNA3)) #define AMD_WMMA_AVAILABLE #endif // defined(GGML_USE_HIP) && defined(RDNA4) // The Volta instructions are in principle available on Turing or newer but they are effectively unusable: #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA #define VOLTA_MMA_AVAILABLE #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define TURING_MMA_AVAILABLE #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #define AMPERE_MMA_AVAILABLE #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_BLACKWELL && __CUDA_ARCH__ < GGML_CUDA_CC_RUBIN # define BLACKWELL_MMA_AVAILABLE #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_BLACKWELL #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #define CP_ASYNC_AVAILABLE #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) #define FLASH_ATTN_AVAILABLE #endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) static bool fp16_available(const int cc) { return ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_PASCAL || (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_PH1); } static bool fast_fp16_available(const int cc) { return GGML_CUDA_CC_IS_AMD(cc) || (GGML_CUDA_CC_IS_NVIDIA(cc) && fp16_available(cc) && ggml_cuda_highest_compiled_arch(cc) != 610) || (GGML_CUDA_CC_IS_MTHREADS(cc) && fp16_available(cc)); } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fast_fp16_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc) || (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc) || (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); } static bool bf16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_AMPERE) || GGML_CUDA_CC_IS_CDNA(cc) || cc >= GGML_CUDA_CC_RDNA3 || (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_PH1); } static bool fp32_mma_hardware_available(const int cc) { return GGML_CUDA_CC_IS_CDNA(cc); } static bool amd_mfma_available(const int cc) { #if !defined(GGML_HIP_NO_MMQ_MFMA) return GGML_CUDA_CC_IS_CDNA(cc); #else return false; #endif //!defined(GGML_HIP_NO_MMQ_MFMA) } static bool amd_wmma_available(const int cc) { return (GGML_CUDA_CC_IS_RDNA4(cc) || GGML_CUDA_CC_IS_RDNA3(cc)); } static bool volta_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_VOLTA; } static bool turing_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_TURING; } static bool ampere_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_AMPERE; } static bool cp_async_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_AMPERE; } static bool blackwell_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_BLACKWELL && ggml_cuda_highest_compiled_arch(cc) < GGML_CUDA_CC_RUBIN; } static constexpr __device__ int ggml_cuda_get_physical_warp_size() { #if defined(GGML_USE_HIP) && (defined(__GFX9__) || defined(__GFX8__)) return 64; #else return 32; #endif // defined(GGML_USE_HIP) && (defined(__GFX9__) || defined(__GFX8__)) } // Maximum number of bytes that can be copied in a single instruction. static constexpr __device__ int ggml_cuda_get_max_cpy_bytes() { #ifdef GGML_USE_HIP return 16; #else #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA return 16; #else return 8; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // GGML_USE_HIP } [[noreturn]] static __device__ void no_device_code( const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) { #if defined(GGML_USE_HIP) printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n", file_name, line, function_name, arch); GGML_UNUSED(arch_list); #else printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n", file_name, line, function_name, arch, arch_list); #endif // defined(GGML_USE_HIP) __trap(); GGML_UNUSED(no_device_code); // suppress unused function warning #if defined(GGML_USE_MUSA) __builtin_unreachable(); #endif // defined(GGML_USE_MUSA) } #ifdef __CUDA_ARCH__ #define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__)) #else #define NO_DEVICE_CODE //GGML_ABORT("NO_DEVICE_CODE not valid in host code.") #endif // __CUDA_ARCH__ // The compiler is always able to unroll loops if they contain continue expressions. // In such cases loop unrolling can still be achieved via recursion: template struct ggml_cuda_unroll { template __device__ void operator()(const Func & f, Args... args) const { f(n - 1, args...); ggml_cuda_unroll{}(f, args...); } }; template <> struct ggml_cuda_unroll<1> { template __device__ void operator()(const Func & f, Args... args) const { f(0, args...); } }; template static __device__ __forceinline__ int warp_reduce_sum(int x) { #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE return __reduce_add_sync(0xffffffff, x); #else #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x += __shfl_xor_sync(0xffffffff, x, offset, width); } return x; #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE } template static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x += __shfl_xor_sync(0xffffffff, x, offset, width); } return x; } template static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { a.x += __shfl_xor_sync(0xffffffff, a.x, offset, width); a.y += __shfl_xor_sync(0xffffffff, a.y, offset, width); } return a; } template static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { #ifdef FP16_AVAILABLE #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, offset, width)); } return a; #else NO_DEVICE_CODE; return a; #endif // FP16_AVAILABLE } template static __device__ __forceinline__ int warp_reduce_all(int x) { if (width == ggml_cuda_get_physical_warp_size()) { return __all_sync(0xffffffff, x); } else { #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x = __shfl_xor_sync(0xffffffff, x, offset, width) && x; } return x; } } template static __device__ __forceinline__ int warp_reduce_any(int x) { if (width == ggml_cuda_get_physical_warp_size()) { return __any_sync(0xffffffff, x); } else { #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x = __shfl_xor_sync(0xffffffff, x, offset, width) || x; } return x; } } template static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, offset, width)); } return x; } template static __device__ __forceinline__ T warp_prefix_inclusive_sum(T x) { const int lane_id = threadIdx.x % width; #pragma unroll for (int offset = 1; offset < width; offset <<= 1) { const T t = __shfl_up_sync(0xffffffff, x, offset, width); if (lane_id >= offset) { x += t; } } return x; } template static __device__ __forceinline__ float2 warp_prefix_inclusive_sum(float2 a) { const int lane_id = threadIdx.x % width; #pragma unroll for (int offset = 1; offset < width; offset <<= 1) { const float t_x = __shfl_up_sync(0xffffffff, a.x, offset, width); const float t_y = __shfl_up_sync(0xffffffff, a.y, offset, width); if (lane_id >= offset) { a.x += t_x; a.y += t_y; } } return a; } template static __device__ __forceinline__ half2 warp_prefix_inclusive_sum(half2 a) { #ifdef FP16_AVAILABLE const int lane_id = threadIdx.x % width; #pragma unroll for (int offset = 1; offset < width; offset <<= 1) { const half2 t = __shfl_up_sync(0xffffffff, a, offset, width); if (lane_id >= offset) { a = __hadd2(a, t); } } return a; #else NO_DEVICE_CODE; return a; #endif // FP16_AVAILABLE } static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) { #ifdef FP16_AVAILABLE #if !defined(GGML_USE_HIP) && CUDART_VERSION < CUDART_HMAX return __float2half(fmaxf(__half2float(a), __half2float(b))); #else return __hmax(a, b); #endif // !defined(GGML_USE_HIP) && CUDART_VERSION < CUDART_HMAX #else NO_DEVICE_CODE; GGML_UNUSED(b); return a; #endif // FP16_AVAILABLE } static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) { #if defined(GGML_USE_HIP) return half2(__hmax(a.x, b.x), __hmax(a.y, b.y)); #elif CUDART_VERSION >= CUDART_HMAX return __hmax2(a, b); #else half2 ret; reinterpret_cast(ret.x) = __float2half(fmaxf( __low2float(a), __low2float(b))); reinterpret_cast(ret.y) = __float2half(fmaxf(__high2float(a), __high2float(b))); return ret; #endif } template static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { #if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || defined(GGML_USE_HIP) #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, offset, width)); } return x; #else GGML_UNUSED(x); NO_DEVICE_CODE; #endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || defined(GGML_USE_HIP) } #if (defined(CUDART_VERSION) && CUDART_VERSION < CUDART_HMASK) || defined(GGML_USE_HIP) || \ (defined(MUSART_VERSION) && MUSART_VERSION < MUSART_HMASK) static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) { const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b))); const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b))); return mask_low | mask_high; } #endif // (defined(CUDART_VERSION) && CUDART_VERSION < CUDART_HMASK) || defined(GGML_USE_HIP) || (defined(MUSART_VERSION) && MUSART_VERSION < MUSART_HMASK) static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) { #if defined(GGML_USE_HIP) #if defined(CDNA) || defined(RDNA2) || defined(__gfx906__) c = __builtin_amdgcn_sdot4(a, b, c, false); #elif defined(RDNA3) || defined(RDNA4) c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); #elif defined(RDNA1) || defined(__gfx900__) int tmp1; int tmp2; asm("\n \ v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \ v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \ v_add3_u32 %0, %1, %2, %0 \n \ v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \ v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \ v_add3_u32 %0, %1, %2, %0 \n \ " : "+v"(c), "=&v"(tmp1), "=&v"(tmp2) : "v"(a), "v"(b) ); #else const int8x4_t va = reinterpret_cast(a); const int8x4_t vb = reinterpret_cast(b); c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3]; #endif return c; #else // defined(GGML_USE_HIP) #if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A || defined(GGML_USE_MUSA) return __dp4a(a, b, c); #else // __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A || defined(GGML_USE_MUSA) const int8_t * a8 = (const int8_t *) &a; const int8_t * b8 = (const int8_t *) &b; return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3]; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A || defined(GGML_USE_MUSA) #endif // defined(GGML_USE_HIP) } static __device__ __forceinline__ void ggml_cuda_mad(float & acc, const float v, const float u) { acc += v*u; } static __device__ __forceinline__ void ggml_cuda_mad(float & acc, const float2 v, const float2 u) { acc += v.x*u.x; acc += v.y*u.y; } #if defined(GGML_USE_HIP) && (defined(RDNA2) || defined(RDNA3) || defined(RDNA4) || defined(__gfx906__) || defined(CDNA)) #define V_DOT2_F32_F16_AVAILABLE #endif // defined(GGML_USE_HIP) && (defined(RDNA2) || defined(RDNA3) || defined(RDNA4) || defined(__gfx906__) || defined(CDNA)) static __device__ __forceinline__ void ggml_cuda_mad(float & acc, const half2 v, const half2 u) { #ifdef V_DOT2_F32_F16_AVAILABLE asm volatile("v_dot2_f32_f16 %0, %1, %2, %0" : "+v"(acc) : "v"(v), "v"(u)); #else #ifdef FAST_FP16_AVAILABLE const float2 tmp = __half22float2(v*u); acc += tmp.x + tmp.y; #else const float2 tmpv = __half22float2(v); const float2 tmpu = __half22float2(u); acc += tmpv.x * tmpu.x; acc += tmpv.y * tmpu.y; #endif // FAST_FP16_AVAILABLE #endif // V_DOT2_F32_F16_AVAILABLE } static __device__ __forceinline__ void ggml_cuda_mad(half2 & acc, const half2 v, const half2 u) { #ifdef FAST_FP16_AVAILABLE acc += v*u; #else const float2 tmpv = __half22float2(v); const float2 tmpu = __half22float2(u); float2 tmpacc = __half22float2(acc); tmpacc.x += tmpv.x * tmpu.x; tmpacc.y += tmpv.y * tmpu.y; acc = make_half2(tmpacc.x, tmpacc.y); #endif // FAST_FP16_AVAILABLE } // Aligned memory transfers of 8/16 bytes can be faster than 2 transfers with 4 bytes, especially on AMD. // Important: do not use this function if dst and src both point at registers. // Due to the strict aliasing rule the compiler can do incorrect optimizations if src and dst have different types. // The function is intended for copies between registers and SRAM/VRAM to make the compiler emit the right instructions. // If dst and src point at different address spaces then they are guaranteed to not be aliased. template static __device__ __forceinline__ void ggml_cuda_memcpy_1(void * __restrict__ dst, const void * __restrict__ src) { static_assert( nbytes <= ggml_cuda_get_max_cpy_bytes() || alignment == 0, "You are misusing the alignment parameter for ggml_cuda_memcpy_1. " "The intent is for the parameter is only as a workaround if either one of the pointers is not properly aligned. " "If you use it to do more bytes per copy than ggml_cuda_max_cpy_bytes() the reads and writes may not be coalesced. " "Call ggml_cuda_memcpy_1 in a loop instead."); if constexpr (alignment != 0) { static_assert(nbytes % alignment == 0, "bad alignment"); } constexpr int nb_per_cpy = alignment == 0 ? nbytes : alignment; #pragma unroll for (int i = 0; i < nbytes/nb_per_cpy; ++i) { if constexpr (nb_per_cpy == 1) { ((char *) dst)[i] = ((const char *) src)[i]; } else if constexpr (nb_per_cpy == 2) { ((short *) dst)[i] = ((const short *) src)[i]; } else if constexpr (nb_per_cpy == 4) { ((int *) dst)[i] = ((const int *) src)[i]; } else if constexpr (nb_per_cpy == 8) { ((int2 *) dst)[i] = ((const int2 *) src)[i]; } else if constexpr (nb_per_cpy == 16) { ((int4 *) dst)[i] = ((const int4 *) src)[i]; } else { static_assert(nbytes == 0 && nbytes == -1, "bad nbytes"); } } } static __device__ __forceinline__ float ggml_cuda_e8m0_to_fp32(uint8_t x) { #if CUDART_VERSION >= 12080 const nv_bfloat16 e = __nv_cvt_e8m0_to_bf16raw(x); return (float) e; #else uint32_t bits; if (x == 0) { bits = 0x00400000; } else { bits = (uint32_t) x << 23; } float result; memcpy(&result, &bits, sizeof(float)); return result; #endif // CUDART_VERSION >= 12050 } __device__ __forceinline__ uint8_t ggml_cuda_float_to_fp4_e2m1(float x, float e) { const uint8_t sign_bit = (x < 0.0f) << 3; float ax = fabsf(x) * e; // Positive LUT static constexpr float pos_lut[8] = { 0.0f, 0.5f, 1.0f, 1.5f, 2.0f, 3.0f, 4.0f, 6.0f }; int best_i = 0; float best_err = fabsf(ax - pos_lut[0]); #pragma unroll for (int i = 1; i < 8; ++i) { const float err = fabsf(ax - pos_lut[i]); if (err < best_err) { best_err = err; best_i = i; } } return static_cast(best_i | sign_bit); } // See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. // Precompute mp (m' in the paper) and L such that division // can be computed using a multiply (high 32b of 64b result) // and a shift: // // n/d = (mulhi(n, mp) + n) >> L; static const uint3 init_fastdiv_values(uint64_t d_64) { GGML_ASSERT(d_64 != 0); GGML_ASSERT(d_64 <= std::numeric_limits::max()); uint32_t d = (uint32_t)d_64; // compute L = ceil(log2(d)); uint32_t L = 0; while (L < 32 && (uint32_t{ 1 } << L) < d) { L++; } uint32_t mp = (uint32_t) ((uint64_t{ 1 } << 32) * ((uint64_t{ 1 } << L) - d) / d + 1); // pack divisor as well to reduce error surface return make_uint3(mp, L, d); } static __device__ __forceinline__ uint32_t fastdiv(uint32_t n, const uint3 fastdiv_values) { // expects fastdiv_values to contain in // fastdiv_values.z is unused and optimized away by the compiler. // Compute high 32 bits of n * mp const uint32_t hi = __umulhi(n, fastdiv_values.x); // add n, apply bit shift return (hi + n) >> fastdiv_values.y; } static __device__ __forceinline__ uint32_t fastmodulo(uint32_t n, const uint3 fastdiv_values) { // expects fastdiv_values to contain in (see init_fastdiv_values) return n - fastdiv(n, fastdiv_values) * fastdiv_values.z; } // Calculate both division and modulo at once, returns static __device__ __forceinline__ uint2 fast_div_modulo(uint32_t n, const uint3 fastdiv_values) { // expects fastdiv_values to contain in (see init_fastdiv_values) const uint32_t div_val = fastdiv(n, fastdiv_values); const uint32_t mod_val = n - div_val * fastdiv_values.z; return make_uint2(div_val, mod_val); } typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, float2 & v); static __device__ __forceinline__ float get_alibi_slope( const float max_bias, const uint32_t h, const uint32_t n_head_log2, const float m0, const float m1 ) { if (max_bias <= 0.0f) { return 1.0f; } const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; return powf(base, exph); } template struct ggml_cuda_type_traits; template<> struct ggml_cuda_type_traits { static constexpr int qk = 1; static constexpr int qr = 1; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK4_0; static constexpr int qr = QR4_0; static constexpr int qi = QI4_0; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK4_1; static constexpr int qr = QR4_1; static constexpr int qi = QI4_1; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK5_0; static constexpr int qr = QR5_0; static constexpr int qi = QI5_0; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK5_1; static constexpr int qr = QR5_1; static constexpr int qi = QI5_1; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK8_0; static constexpr int qr = QR8_0; static constexpr int qi = QI8_0; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_MXFP4; static constexpr int qr = QR_MXFP4; static constexpr int qi = QI_MXFP4; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR2_K; static constexpr int qi = QI2_K; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR3_K; static constexpr int qi = QI3_K; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR4_K; static constexpr int qi = QI4_K; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR5_K; static constexpr int qi = QI5_K; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR6_K; static constexpr int qi = QI6_K; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR2_XXS; static constexpr int qi = QI2_XXS; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR2_XS; static constexpr int qi = QI2_XS; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR2_S; static constexpr int qi = QI2_S; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR3_XXS; static constexpr int qi = QI3_XXS; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR1_S; static constexpr int qi = QI1_S; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR1_M; static constexpr int qi = QI1_M; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK4_NL; static constexpr int qr = QR4_NL; static constexpr int qi = QI4_NL; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR4_XS; static constexpr int qi = QI4_XS; }; template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; static constexpr int qr = QR3_S; static constexpr int qi = QI3_S; }; ////////////////////// struct ggml_cuda_device_info { int device_count; struct cuda_device_info { int cc; // compute capability int nsm; // number of streaming multiprocessors size_t smpb; // max. shared memory per block size_t smpbo; // max. shared memory per block (with opt-in) bool integrated; // Device is integrated as opposed to discrete bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory size_t total_vram; int warp_size; // Number of threads in a dispatch }; cuda_device_info devices[GGML_CUDA_MAX_DEVICES] = {}; std::array default_tensor_split = {}; }; const ggml_cuda_device_info & ggml_cuda_info(); void ggml_cuda_set_device(int device); int ggml_cuda_get_device(); struct ggml_cuda_pool { virtual ~ggml_cuda_pool() = default; virtual void * alloc(size_t size, size_t * actual_size) = 0; virtual void free(void * ptr, size_t size) = 0; }; template struct ggml_cuda_pool_alloc { ggml_cuda_pool * pool = nullptr; T * ptr = nullptr; size_t actual_size = 0; ggml_cuda_pool_alloc() = default; explicit ggml_cuda_pool_alloc(ggml_cuda_pool & pool) : pool(&pool) { } ggml_cuda_pool_alloc(ggml_cuda_pool & pool, size_t size) : pool(&pool) { alloc(size); } ~ggml_cuda_pool_alloc() { if (ptr != nullptr) { pool->free(ptr, actual_size); } } // size is in number of elements T * alloc(size_t size) { GGML_ASSERT(pool != nullptr); GGML_ASSERT(ptr == nullptr); ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size); return ptr; } T * alloc(ggml_cuda_pool & pool, size_t size) { this->pool = &pool; return alloc(size); } T * get() { return ptr; } ggml_cuda_pool_alloc(const ggml_cuda_pool_alloc &) = delete; ggml_cuda_pool_alloc(ggml_cuda_pool_alloc &&) = delete; ggml_cuda_pool_alloc& operator=(const ggml_cuda_pool_alloc &) = delete; ggml_cuda_pool_alloc& operator=(ggml_cuda_pool_alloc &&) = delete; }; // backend interface struct ggml_tensor_extra_gpu { void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs }; #if (defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)) || defined(GGML_MUSA_GRAPHS) #define USE_CUDA_GRAPH #endif struct ggml_graph_node_properties { void * node_address; ggml_op node_op; int64_t ne[GGML_MAX_DIMS]; size_t nb[GGML_MAX_DIMS]; void * src_address[GGML_MAX_SRC]; int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; }; struct ggml_cuda_graph { #ifdef USE_CUDA_GRAPH ~ggml_cuda_graph() { if (instance != nullptr) { CUDA_CHECK(cudaGraphExecDestroy(instance)); } if (graph != nullptr) { CUDA_CHECK(cudaGraphDestroy(graph)); } } cudaGraph_t graph = nullptr; cudaGraphExec_t instance = nullptr; size_t num_nodes = 0; std::vector nodes; std::vector params; bool disable_due_to_gpu_arch = false; bool disable_due_to_too_many_updates = false; bool disable_due_to_failed_graph_capture = false; int number_consecutive_updates = 0; std::vector ggml_graph_properties; #endif }; struct ggml_cuda_concurrent_event { std::vector join_events; cudaEvent_t fork_event = nullptr; int n_streams = 0; std::unordered_map stream_mapping; // Original order of nodes in this concurrent region (before interleaving) // Used to restore grouping for fusion within streams std::vector original_order; const ggml_tensor * join_node; ggml_cuda_concurrent_event() = default; ggml_cuda_concurrent_event(const ggml_cuda_concurrent_event &) = delete; ggml_cuda_concurrent_event & operator=(const ggml_cuda_concurrent_event &) = delete; explicit ggml_cuda_concurrent_event(int n_streams) : n_streams(n_streams) { join_events.resize(n_streams); for (size_t i = 0; i < join_events.size(); ++i) { CUDA_CHECK(cudaEventCreateWithFlags(&join_events[i], cudaEventDisableTiming)); } CUDA_CHECK(cudaEventCreateWithFlags(&fork_event, cudaEventDisableTiming)); } ggml_cuda_concurrent_event(ggml_cuda_concurrent_event && other) noexcept : join_events(std::move(other.join_events)) , fork_event(other.fork_event) , n_streams(other.n_streams) , stream_mapping(std::move(other.stream_mapping)) , original_order(std::move(other.original_order)) , join_node(other.join_node) { other.fork_event = nullptr; } // 1. check if any branches write to overlapping memory ranges (except the join node) // 2. check whether all srcs are either within the branch or outside the nodes covered by ggml_cuda_concurrent_event // we assume all nodes have the same buffer bool is_valid() const { std::vector>> write_ranges; write_ranges.resize(n_streams); // get join_node's memory range to exclude from overlap checking. // multiple nodes can use join_node's buffer; we synchronize on the join node. const ggml_tensor * join_t = join_node->view_src ? join_node->view_src : join_node; const int64_t join_start = (int64_t) join_t->data; const int64_t join_end = join_start + ggml_nbytes(join_t); for (const auto & [tensor, stream] : stream_mapping) { const ggml_tensor * t = tensor->view_src ? tensor->view_src : tensor; const int64_t t_start = (int64_t) t->data; const int64_t t_end = t_start + ggml_nbytes(t); // skip tensors that overlap with join_node's buffer. if ((t_start <= join_start && join_start < t_end) || (join_start <= t_start && t_start < join_end)) { continue; } // concurrent streams begin from 1 write_ranges[stream - 1].emplace_back(t_start, t_end); } for (int i = 0; i < n_streams; ++i) { // sorts first by start then by end of write range std::sort(write_ranges[i].begin(), write_ranges[i].end()); } bool writes_overlap = false; bool dependent_srcs = false; for (const auto & [tensor, stream] : stream_mapping) { const ggml_tensor * t = tensor->view_src ? tensor->view_src : tensor; const int64_t t_start = (int64_t) t->data; const int64_t t_end = t_start + ggml_nbytes(t); // skip tensors that overlap with join_node's buffer if ((t_start <= join_start && join_start < t_end) || (join_start <= t_start && t_start < join_end)) { continue; } // check if this buffer's write data overlaps with another stream's std::pair data_range = std::make_pair(t_start, t_end); for (int i = 0; i < n_streams; ++i) { if (i == stream - 1) { continue; } auto it = std::lower_bound(write_ranges[i].begin(), write_ranges[i].end(), data_range); if (it != write_ranges[i].end()) { const std::pair & other = *it; // std::lower_bound returns the first element where other >= data_range (lexicographically). // This guarantees other.first >= data_range.first. // Therefore, overlap occurs iff other.first < data_range.second // (i.e., the other range starts before this range ends). if (other.first < data_range.second) { GGML_LOG_DEBUG("Writes overlap for %s", tensor->name); writes_overlap = true; break; } } } //check if all srcs are either in branch or don't have a branch for (int i = 0; i < GGML_MAX_SRC; ++i) { if (!tensor->src[i]) { continue; } auto it = stream_mapping.find(tensor->src[i]); if (it == stream_mapping.end()) { continue; } if (it->second != stream) { dependent_srcs = true; break; } } if (dependent_srcs || writes_overlap) { break; } } return !writes_overlap && !dependent_srcs; } ~ggml_cuda_concurrent_event() { if (fork_event != nullptr) { CUDA_CHECK(cudaEventDestroy(fork_event)); } for (cudaEvent_t e : join_events) { if (e != nullptr) { CUDA_CHECK(cudaEventDestroy(e)); } } } }; struct ggml_cuda_stream_context { std::unordered_map concurrent_events; void reset() { concurrent_events.clear(); } }; struct ggml_backend_cuda_context { int device; std::string name; cudaEvent_t copy_event = nullptr; cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } }; cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; std::unique_ptr cuda_graph; int curr_stream_no = 0; explicit ggml_backend_cuda_context(int device) : device(device), name(GGML_CUDA_NAME + std::to_string(device)) { } ggml_cuda_stream_context concurrent_stream_context; ~ggml_backend_cuda_context(); cudaStream_t stream(int device, int stream) { if (streams[device][stream] == nullptr) { ggml_cuda_set_device(device); CUDA_CHECK(cudaStreamCreateWithFlags(&streams[device][stream], cudaStreamNonBlocking)); } return streams[device][stream]; } cudaStream_t stream() { return stream(device, curr_stream_no); } ggml_cuda_stream_context & stream_context() { return concurrent_stream_context; } cublasHandle_t cublas_handle(int device) { if (cublas_handles[device] == nullptr) { ggml_cuda_set_device(device); CUBLAS_CHECK(cublasCreate(&cublas_handles[device])); CUBLAS_CHECK(cublasSetMathMode(cublas_handles[device], CUBLAS_TF32_TENSOR_OP_MATH)); } return cublas_handles[device]; } cublasHandle_t cublas_handle() { return cublas_handle(device); } // pool std::unique_ptr pools[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; static std::unique_ptr new_pool_for_device(int device, int stream_no); ggml_cuda_pool & pool(int device) { if (pools[device][curr_stream_no] == nullptr) { pools[device][curr_stream_no] = new_pool_for_device(device, curr_stream_no); } return *pools[device][curr_stream_no]; } ggml_cuda_pool & pool() { return pool(device); } }; struct ggml_cuda_mm_fusion_args_host { const ggml_tensor * x_bias = nullptr; const ggml_tensor * gate = nullptr; const ggml_tensor * gate_bias = nullptr; ggml_glu_op glu_op; }; struct ggml_cuda_mm_fusion_args_device { const void * x_bias = nullptr; const void * gate = nullptr; const void * gate_bias = nullptr; ggml_glu_op glu_op; }; ggml-org-ggml-3678254/src/ggml-cuda/concat.cu000066400000000000000000000166261512524704700206260ustar00rootroot00000000000000#include "concat.cuh" // contiguous kernels static __global__ void concat_f32_dim0(const float * x, const float * y, float * dst, const int ne0, const int ne00) { int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; } int offset_dst = nidx + blockIdx.y * ne0 + blockIdx.z * ne0 * gridDim.y; if (nidx < ne00) { // src0 int offset_src = nidx + blockIdx.y * ne00 + blockIdx.z * ne00 * gridDim.y; dst[offset_dst] = x[offset_src]; } else { int offset_src = (nidx - ne00) + blockIdx.y * (ne0 - ne00) + blockIdx.z * (ne0 - ne00) * gridDim.y; dst[offset_dst] = y[offset_src]; } } static __global__ void concat_f32_dim1(const float * x, const float * y, float * dst, const int ne0, const int ne01) { int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; } int offset_dst = nidx + blockIdx.y * ne0 + blockIdx.z * ne0 * gridDim.y; if (blockIdx.y < (unsigned)ne01) { // src0 int offset_src = nidx + blockIdx.y * ne0 + blockIdx.z * ne0 * ne01; dst[offset_dst] = x[offset_src]; } else { int offset_src = nidx + (blockIdx.y - ne01) * ne0 + blockIdx.z * ne0 * (gridDim.y - ne01); dst[offset_dst] = y[offset_src]; } } static __global__ void concat_f32_dim2(const float * x, const float * y, float * dst, const int ne0, const int ne02) { int nidx = threadIdx.x + blockIdx.x * blockDim.x; if (nidx >= ne0) { return; } int offset_dst = nidx + blockIdx.y * ne0 + blockIdx.z * ne0 * gridDim.y; if (blockIdx.z < (unsigned)ne02) { // src0 int offset_src = nidx + blockIdx.y * ne0 + blockIdx.z * ne0 * gridDim.y; dst[offset_dst] = x[offset_src]; } else { int offset_src = nidx + blockIdx.y * ne0 + (blockIdx.z - ne02) * ne0 * gridDim.y; dst[offset_dst] = y[offset_src]; } } static void concat_f32_cuda(const float * x, const float * y, float * dst, int ne00, int ne01, int ne02, int ne0, int ne1, int ne2, int dim, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2); if (dim == 0) { concat_f32_dim0<<>>(x, y, dst, ne0, ne00); return; } if (dim == 1) { concat_f32_dim1<<>>(x, y, dst, ne0, ne01); return; } concat_f32_dim2<<>>(x, y, dst, ne0, ne02); } // non-contiguous kernel (slow) template static __global__ void __launch_bounds__(CUDA_CONCAT_BLOCK_SIZE) concat_f32_non_cont( const char * src0, const char * src1, char * dst, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, uint64_t nb00, uint64_t nb01, uint64_t nb02, uint64_t nb03, int64_t /*ne10*/, int64_t /*ne11*/, int64_t /*ne12*/, int64_t /*ne13*/, uint64_t nb10, uint64_t nb11, uint64_t nb12, uint64_t nb13, int64_t ne0, int64_t /*ne1*/, int64_t /*ne2*/, int64_t /*ne3*/, uint64_t nb0, uint64_t nb1, uint64_t nb2, uint64_t nb3){ static_assert(dim >= 0 && dim <= 3, "dim must be in [0, 3]"); const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; const int64_t i1 = blockIdx.x; const float * x; for (int64_t i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const float *)(src0 + (i3 )*nb03 + (i2 )*nb02 + (i1 )*nb01 + (i0 )*nb00); } else { if constexpr (dim == 0) { x = (const float *) (src1 + i3 * nb13 + i2 * nb12 + i1 * nb11 + (i0 - ne00) * nb10); } else if constexpr (dim == 1) { x = (const float *) (src1 + i3 * nb13 + i2 * nb12 + (i1 - ne01) * nb11 + i0 * nb10); } else if constexpr (dim == 2) { x = (const float *) (src1 + i3 * nb13 + (i2 - ne02) * nb12 + i1 * nb11 + i0 * nb10); } else if constexpr (dim == 3) { x = (const float *) (src1 + (i3 - ne03) * nb13 + i2 * nb12 + i1 * nb11 + i0 * nb10); } } float * y = (float *)(dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); *y = *x; } } void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; cudaStream_t stream = ctx.stream(); const int32_t dim = ((int32_t *) dst->op_params)[0]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) { const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; float * dst_d = (float *)dst->data; if (dim != 3) { for (int i3 = 0; i3 < dst->ne[3]; i3++) { concat_f32_cuda( src0_d + i3 * (src0->nb[3] / 4), src1_d + i3 * (src1->nb[3] / 4), dst_d + i3 * ( dst->nb[3] / 4), src0->ne[0], src0->ne[1], src0->ne[2], dst->ne[0], dst->ne[1], dst->ne[2], dim, stream); } } else { const size_t size0 = ggml_nbytes(src0); const size_t size1 = ggml_nbytes(src1); CUDA_CHECK(cudaMemcpyAsync(dst_d, src0_d, size0, cudaMemcpyDeviceToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(dst_d + size0/4, src1_d, size1, cudaMemcpyDeviceToDevice, stream)); } } else { dim3 grid_dim(dst->ne[1], dst->ne[2], dst->ne[3]); auto launch_kernel = [&](auto dim) { concat_f32_non_cont<<>>( (const char *) src0->data, (const char *) src1->data, (char *) dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3]); }; switch (dim) { case 0: launch_kernel(std::integral_constant{}); break; case 1: launch_kernel(std::integral_constant{}); break; case 2: launch_kernel(std::integral_constant{}); break; case 3: launch_kernel(std::integral_constant{}); break; default: GGML_ABORT("Invalid dim: %d", dim); break; } } } ggml-org-ggml-3678254/src/ggml-cuda/concat.cuh000066400000000000000000000002111512524704700207550ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CONCAT_BLOCK_SIZE 256 void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/conv-transpose-1d.cu000066400000000000000000000063671512524704700226430ustar00rootroot00000000000000#include "conv-transpose-1d.cuh" static __global__ void conv_transpose_1d_kernel( const int s0, const int p0, const int d0, const int output_size, const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3, const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3, const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3, const float * src0, const float * src1, float * dst) { int global_index = threadIdx.x + blockIdx.x * blockDim.x; if (global_index >= output_size) { return; } int out_index = global_index / dst_ne0; float accumulator = 0; for (int c = 0; c < src0_ne2; c++) { int idx = global_index % dst_ne0; int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0); int input_offset = src1_ne0 * c; for (int i = 0; i < src1_ne0; i++) { if (!(idx >= i*s0 && idx < i*s0 + src0_ne0)) { continue; } int weight_idx = idx - i*s0; float kernel_weight = src0[kernel_offset + weight_idx]; float input_value = src1[input_offset+i]; accumulator += kernel_weight * input_value; } } dst[global_index] = accumulator; GGML_UNUSED_VARS(p0, d0, src0_ne3, src1_ne3, dst_ne3, src1_ne1, dst_ne1, src1_ne2, dst_ne2); } static void conv_transpose_1d_f32_f32_cuda( const int s0, const int p0, const int d0, const int output_size, const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3, const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3, const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3, const float * src0, const float * src1, float * dst, cudaStream_t stream) { const int num_blocks = (output_size + CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE; conv_transpose_1d_kernel<<>>( s0,p0,d0,output_size, src0_ne0, src0_ne1, src0_ne2, src0_ne3, src1_ne0, src1_ne1, src1_ne2, src1_ne3, dst_ne0, dst_ne1, dst_ne2, dst_ne3, src0,src1, dst); } void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; const ggml_tensor * src1 = dst->src[1]; const float * src1_d = (const float *)src1->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); const int32_t * opts = (const int32_t *)dst->op_params; const int s0 = opts[0]; const int p0 = 0;//opts[3]; const int d0 = 1;//opts[4]; const int64_t output_size = ggml_nelements(dst); conv_transpose_1d_f32_f32_cuda(s0, p0, d0, output_size, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], src0_d, src1_d, dst_d, stream); } ggml-org-ggml-3678254/src/ggml-cuda/conv-transpose-1d.cuh000066400000000000000000000002361512524704700230000ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE 256 void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/conv2d-dw.cu000066400000000000000000000163361512524704700211600ustar00rootroot00000000000000#include "conv2d-dw.cuh" struct conv_params { int in_w, in_h; int out_w, out_h; int kernel_w, kernel_h; int stride_x, stride_y; int padding_x, padding_y; int dilation_x, dilation_y; int channels, batches; }; struct kernel_bounds { int y_min, y_max; int x_min, x_max; }; __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) { kernel_bounds bounds; bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); bounds.y_max = min(params.kernel_h, (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); bounds.x_max = min(params.kernel_w, (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); return bounds; } __device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coord, int stride, int dilation, int padding) { return out_coord * stride + kern_coord * dilation - padding; } struct whcn_layout { __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x; } __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { return c * params.kernel_h * params.kernel_w + ky * params.kernel_w + kx; } __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.out_w * params.out_h) + c * params.out_w * params.out_h + y * params.out_w + x; } __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, int & out_x) { out_x = global_idx % params.out_w; out_y = (global_idx / params.out_w) % params.out_h; c = (global_idx / (params.out_w * params.out_h)) % params.channels; n = global_idx / (params.out_w * params.out_h * params.channels); } }; struct cwhn_layout { __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.in_w * params.in_h) + (y * params.in_w + x) * params.channels + c; } __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { return (ky * params.kernel_w + kx) * params.channels + c; } __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.out_w * params.out_h) + y * (params.out_w * params.channels) + x * params.channels + c; } __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, int & out_x) { c = global_idx % params.channels; out_x = (global_idx / params.channels) % params.out_w; out_y = (global_idx / (params.channels * params.out_w)) % params.out_h; n = global_idx / (params.channels * params.out_w * params.out_h); } }; template __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restrict__ kernel, T * __restrict__ output, const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride_x, const int stride_y, const int padding_x, const int padding_y, const int dilation_x, const int dilation_y, const int channels, const int batches) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = batches * channels * out_h * out_w; if (global_idx >= total_elements) { return; } conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches }; int batch_idx, channel_idx, out_y_idx, out_x_idx; Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx); T accumulator = 0; kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params); for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)]; const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; accumulator += input_val * kernel_val; } } output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator; } void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * kernel = dst->src[0]; const ggml_tensor * input = dst->src[1]; GGML_ASSERT(kernel->type == GGML_TYPE_F32 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); const float * w_d = (const float *) kernel->data; const float * x_d = (const float *) input->data; float * y_d = (float *) dst->data; const int32_t * p = (const int32_t *) dst->op_params; const int stride_x = p[0]; const int stride_y = p[1]; const int padding_x = p[2]; const int padding_y = p[3]; const int dilation_x = p[4]; const int dilation_y = p[5]; const int in_w = input->ne[0]; const int in_h = input->ne[1]; const int kernel_w = kernel->ne[0]; const int kernel_h = kernel->ne[1]; const int out_w = dst->ne[0]; const int out_h = dst->ne[1]; const int channels = dst->ne[2]; const int batches = dst->ne[3]; cudaStream_t st = ctx.stream(); const int total = batches * channels * out_h * out_w; const int blocks = (total + CUDA_CONV2D_DW_BLOCK_SIZE - 1) / CUDA_CONV2D_DW_BLOCK_SIZE; if (ggml_is_contiguous(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches); } else if (ggml_is_contiguous_channels(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches); } else { GGML_ABORT("Unsupported memory layout for conv_2d_dw"); } } ggml-org-ggml-3678254/src/ggml-cuda/conv2d-dw.cuh000066400000000000000000000002331512524704700213150ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_CONV2D_DW_BLOCK_SIZE 256 void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/conv2d-transpose.cu000066400000000000000000000073121512524704700225560ustar00rootroot00000000000000#include #include "conv2d-transpose.cuh" #include "ggml.h" __global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel, float * __restrict__ output, const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride, const int c_in, const int c_out, const int batches) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = out_w * out_h * c_out * batches; if (global_idx >= total_elements) { return; } const int out_x_idx = global_idx % out_w; const int out_y_idx = (global_idx / out_w) % out_h; const int c_idx = (global_idx / (out_w * out_h)) % c_out; const int n_idx = global_idx / (out_w * out_h * c_out); float accumulator = 0; // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { for (int kh = 0; kh < kernel_h; ++kh) { int in_y = out_y_idx - kh; if (in_y < 0 || in_y % stride) continue; in_y /= stride; if (in_y >= in_h) continue; for (int kw = 0; kw < kernel_w; ++kw) { int in_x = out_x_idx - kw; if (in_x < 0 || in_x % stride) continue; in_x /= stride; if (in_x >= in_w) continue; const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; const int kernel_idx = (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; float input_val = input[input_idx]; half kern_val = kernel[kernel_idx]; accumulator += input_val * (float) kern_val; } } } output[(out_w * out_h * c_out) * n_idx + (out_w * out_h) * c_idx + (out_w) *out_y_idx + out_x_idx] = accumulator; } //input is (W, H, C_in, N), Kernel is (W, H, C_out, C_in) void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * kernel = dst->src[0]; const ggml_tensor * input = dst->src[1]; GGML_ASSERT(kernel->type == GGML_TYPE_F16 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); const float * input_data = (const float *) input->data; float * output_data = (float *) dst->data; const half * kernel_data = (const half *) kernel->data; const int input_w = input->ne[0]; const int input_h = input->ne[1]; const int output_w = dst->ne[0]; const int output_h = dst->ne[1]; const int channels_in = input->ne[2]; const int channels_out = kernel->ne[2]; const int kernel_w = kernel->ne[0]; const int kernel_h = kernel->ne[1]; const int stride = dst->op_params[0]; const int batches = input->ne[3]; GGML_ASSERT(channels_in == kernel->ne[3]); GGML_ASSERT(stride > 0); cudaStream_t st = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(input)); GGML_ASSERT(ggml_is_contiguous(kernel)); GGML_ASSERT(ggml_is_contiguous(dst)); const int total = (output_w * output_h * channels_out * batches); const int blocks = (total + CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE - 1) / CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE; conv2d_transpose_kernel<<>>( input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, channels_in, channels_out, batches); } ggml-org-ggml-3678254/src/ggml-cuda/conv2d-transpose.cuh000066400000000000000000000002351512524704700227230ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE 256 void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/conv2d.cu000066400000000000000000000145711512524704700205470ustar00rootroot00000000000000#include "conv2d.cuh" #include "convert.cuh" struct conv_params { const int64_t IW, IH; const int64_t OW, OH; const int64_t KW, KH; const int64_t ST_X, ST_Y; const int64_t PD_X, PD_Y; const int64_t DL_X, DL_Y; const int64_t IC, OC; const int64_t B; const int64_t TOTAL; }; struct kernel_bounds { int64_t y_min, y_max; int64_t x_min, x_max; }; __device__ __forceinline__ int64_t max64(int64_t a, int64_t b) { return (a > b) ? a : b; } __device__ __forceinline__ int64_t min64(int64_t a, int64_t b) { return (a < b) ? a : b; } __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int64_t out_x, int64_t out_y, const conv_params & P) { kernel_bounds bounds; bounds.y_min = max64(0, (P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); bounds.y_max = min64(P.KH, (P.IH + P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); bounds.x_min = max64(0, (P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); bounds.x_max = min64(P.KW, (P.IW + P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); return bounds; } __device__ __forceinline__ int calculate_input_coord(int64_t out_coord, int64_t kern_coord, int64_t stride, int64_t dilation, int64_t padding) { return out_coord * stride + kern_coord * dilation - padding; } struct whcn_layout { __device__ static int64_t input_index(int64_t n, int64_t c, int64_t y, int64_t x, const conv_params & P) { return n * (P.IC * P.IW * P.IH) + c * P.IW * P.IH + y * P.IW + x; } __device__ static int64_t kernel_index(int64_t c_out, int64_t c_in, int64_t ky, int64_t kx, const conv_params & P) { return c_out * (P.IC * P.KH * P.KW) + c_in * (P.KH * P.KW) + ky * P.KW + kx; } __device__ static int64_t output_index(int64_t n, int64_t c, int64_t y, int64_t x, const conv_params & P) { return n * (P.OC * P.OW * P.OH) + c * P.OW * P.OH + y * P.OW + x; } __device__ static void unpack_indices(int64_t global_idx, const conv_params & P, int64_t & n, int64_t & c, int64_t & out_y, int64_t & out_x) { out_x = global_idx % P.OW; out_y = (global_idx / P.OW) % P.OH; c = (global_idx / (P.OW * P.OH)) % P.OC; n = global_idx / (P.OW * P.OH * P.OC); } }; template static __global__ void conv2d_kernel(const float * __restrict__ input, const T * __restrict__ kernel, float * __restrict__ output, const conv_params P) { const int64_t global_idx = blockIdx.x * blockDim.x + threadIdx.x; if (global_idx >= P.TOTAL) { return; } int64_t n, c_out, out_y, out_x; Layout::unpack_indices(global_idx, P, n, c_out, out_y, out_x); float acc = 0.0f; for (int64_t c_in = 0; c_in < P.IC; ++c_in) { kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { const int64_t in_y = calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y); for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { const int64_t in_x = calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X); const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; acc += (input_val * ggml_cuda_cast(kernel_val)); } } } // [N, OC, OH, OW] output[Layout::output_index(n, c_out, out_y, out_x, P)] = acc; } template static void conv2d_cuda(const float * X_D, const T * K_D, float * Y_D, const conv_params P, cudaStream_t st) { const int blocks = (P.TOTAL + CUDA_CONV2D_BLOCK_SIZE - 1) / CUDA_CONV2D_BLOCK_SIZE; conv2d_kernel<<>>(X_D, K_D, Y_D, P); } static void conv2d_cuda_f16(const float * X_D, const half * K_D, float * Y_D, const conv_params P, cudaStream_t st) { conv2d_cuda(X_D, K_D, Y_D, P, st); } static void conv2d_cuda_f32(const float * X_D, const float * K_D, float * Y_D, const conv_params P, cudaStream_t st) { conv2d_cuda(X_D, K_D, Y_D, P, st); } void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * kernel = dst->src[0]; const ggml_tensor * input = dst->src[1]; float * K_D = (float *) kernel->data; const float * X_D = (const float *) input->data; float * Y_D = (float *) dst->data; GGML_ASSERT(ggml_is_contiguous(kernel)); GGML_ASSERT(kernel->type == GGML_TYPE_F16 || kernel->type == GGML_TYPE_F32); // same number of input channels GGML_ASSERT(input->ne[2] == kernel->ne[2]); cudaStream_t st = ctx.stream(); const int32_t * p = (const int32_t *) dst->op_params; const int ST_X = p[0]; // stride_x const int ST_Y = p[1]; // stride_y const int PD_X = p[2]; // padding_x const int PD_Y = p[3]; // padding_y const int DL_X = p[4]; // dilation_x const int DL_Y = p[5]; // dilation_y // No cwhn GGML_ASSERT(p[6] == false); const int IW = input->ne[0]; // input_w const int IH = input->ne[1]; // input_h const int OW = dst->ne[0]; // output_w const int OH = dst->ne[1]; // output_h const int KW = kernel->ne[0]; // kernel_w const int KH = kernel->ne[1]; // kernel_h const int IC = input->ne[2]; // input_channels const int OC = kernel->ne[3]; // ouptut_chanles const int B = input->ne[3]; // n_batches const int64_t total = B * OC * OH * OW; conv_params params = { IW, IH, OW, OH, KW, KH, ST_X, ST_Y, PD_X, PD_Y, DL_X, DL_Y, IC, OC, B, total }; if (kernel->type == GGML_TYPE_F16) { conv2d_cuda_f16(X_D, (half *) K_D, Y_D, params, st); } else { conv2d_cuda_f32(X_D, K_D, Y_D, params, st); } } ggml-org-ggml-3678254/src/ggml-cuda/conv2d.cuh000066400000000000000000000002251512524704700207060ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_CONV2D_BLOCK_SIZE 256 void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/convert.cu000066400000000000000000000751121512524704700210320ustar00rootroot00000000000000#include "convert.cuh" #include "dequantize.cuh" #include #define CUDA_Q8_0_NE_ALIGN 2048 template static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t s01, const int64_t s02, const int64_t s03) { const int64_t i00 = 2 * (int64_t(blockDim.x)*blockIdx.x + threadIdx.x); if (i00 >= ne00) { return; } const int64_t i01 = blockIdx.y; const int64_t i02 = blockIdx.z % ne02; const int64_t i03 = blockIdx.z / ne02; const int64_t ibx0 = i03*s03 + i02*s02 + i01*s01; const int64_t ib = ibx0 + i00/qk; // block index const int64_t iqs = (i00%qk)/qr; // quant index const int64_t iybs = i00 - i00%qk; // y block start index const int64_t y_offset = qr == 1 ? 1 : qk/2; // dequantize float2 v; dequantize_kernel(vx, ib, iqs, v); const int64_t iy0 = ((i03*ne02 + i02)*ne01 + i01)*ne00 + iybs + iqs; y[iy0 + 0] = ggml_cuda_cast(v.x); y[iy0 + y_offset] = ggml_cuda_cast(v.y); } template static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, half * __restrict__ y, const int64_t k) { #if __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL constexpr int nint = CUDA_Q8_0_NE_ALIGN/sizeof(int) + WARP_SIZE; const int64_t i0 = CUDA_Q8_0_NE_ALIGN*blockIdx.x; const int * x0 = ((int *) vx) + blockIdx.x * nint; half2 * y2 = (half2 *) (y + i0); __shared__ int vals[nint]; #pragma unroll for (int ix0 = 0; ix0 < nint; ix0 += WARP_SIZE) { if (need_check && i0*sizeof(block_q8_0)/QK8_0 + sizeof(int)*(ix0 + threadIdx.x) >= k*sizeof(block_q8_0)/QK8_0) { break; } const int ix = ix0 + threadIdx.x; vals[ix] = x0[ix]; } __syncthreads(); #pragma unroll for (int iy = 0; iy < CUDA_Q8_0_NE_ALIGN; iy += 2*WARP_SIZE) { if (need_check && i0 + iy + 2*threadIdx.x >= k) { return; } const half * b0 = ((const half *) vals) + (sizeof(block_q8_0)/sizeof(half)) * ((iy + 2*threadIdx.x)/QK8_0); const half d = *b0; const char2 qs = ((const char2 *) (b0 + 1))[threadIdx.x % (QK8_0/2)]; y2[iy/2 + threadIdx.x] = __hmul2(make_half2(qs.x, qs.y), __half2half2(d)); } #else GGML_UNUSED_VARS(vx, y, k); NO_DEVICE_CODE; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL } template static __global__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int64_t tid = threadIdx.x; const int64_t il = tid/8; const int64_t ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_0 * x = (const block_q4_0 *)vx + ib; const float d = __half2float(x->d); const float dm = -8*d; const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d * (q[l] & 0xF) + dm; y[l+16] = d * (q[l] >> 4) + dm; } } template static __global__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int64_t tid = threadIdx.x; const int64_t il = tid/8; const int64_t ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_1 * x = (const block_q4_1 *)vx + ib; const float2 d = __half22float2(x->dm); const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d.x * (q[l] & 0xF) + d.y; y[l+16] = d.x * (q[l] >> 4) + d.y; } } //================================== k-quants template static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_q2_K * x = (const block_q2_K *) vx; const int64_t tid = threadIdx.x; const int64_t n = tid/32; const int64_t l = tid - 32*n; const int64_t is = 8*n + l/16; const uint8_t q = x[i].qs[32*n + l]; dst_t * y = yy + i*QK_K + 128*n; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); } template static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_q3_K * x = (const block_q3_K *) vx; const int64_t r = threadIdx.x/4; const int64_t tid = r/2; const int64_t is0 = r%2; const int64_t l0 = 16*is0 + 4*(threadIdx.x%4); const int64_t n = tid / 4; const int64_t j = tid - 4*n; uint8_t m = 1 << (4*n + j); int64_t is = 8*n + 2*j + is0; int shift = 2*j; int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); float d_all = x[i].d; float dl = d_all * (us - 32); dst_t * y = yy + i*QK_K + 128*n + 32*j; const uint8_t * q = x[i].qs + 32*n; const uint8_t * hm = x[i].hmask; for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); } static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { if (j < 4) { d = q[j] & 63; m = q[j + 4] & 63; } else { d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } template static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q4_K * x = (const block_q4_K *) vx; const int64_t i = blockIdx.x; // assume 32 threads const int64_t tid = threadIdx.x; const int64_t il = tid/8; const int64_t ir = tid%8; const int64_t is = 2*il; const int64_t n = 4; dst_t * y = yy + i*QK_K + 64*il + n*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * q = x[i].qs + 32*il + n*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; for (int l = 0; l < n; ++l) { y[l + 0] = d1 * (q[l] & 0xF) - m1; y[l +32] = d2 * (q[l] >> 4) - m2; } } template static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q5_K * x = (const block_q5_K *) vx; const int64_t i = blockIdx.x; // assume 64 threads - this is very slightly better than the one below const int64_t tid = threadIdx.x; const int64_t il = tid/16; // il is in 0...3 const int64_t ir = tid%16; // ir is in 0...15 const int64_t is = 2*il; // is is in 0...6 dst_t * y = yy + i*QK_K + 64*il + 2*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * ql = x[i].qs + 32*il + 2*ir; const uint8_t * qh = x[i].qh + 2*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; uint8_t hm = 1 << (2*il); y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; hm <<= 1; y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; } template static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q6_K * x = (const block_q6_K *) vx; const int64_t i = blockIdx.x; // assume 64 threads - this is very slightly better than the one below const int64_t tid = threadIdx.x; const int64_t ip = tid/32; // ip is 0 or 1 const int64_t il = tid - 32*ip; // 0...32 const int64_t is = 8*ip + il/16; dst_t * y = yy + i*QK_K + 128*ip + il; const float d = x[i].d; const uint8_t * ql = x[i].ql + 64*ip + il; const uint8_t qh = x[i].qh[32*ip + il]; const int8_t * sc = x[i].scales + is; y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); } template static __global__ void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq2_xxs * x = (const block_iq2_xxs *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * q2 = x[i].qs + 4*ib; const uint8_t * aux8 = (const uint8_t *)q2; const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[il]); const uint32_t aux32 = q2[2] | (q2[3] << 16); const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f; const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } template static __global__ void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq2_xs * x = (const block_iq2_xs *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * q2 = x[i].qs + 4*ib; const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511)); const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; const uint8_t signs = ksigns_iq2xs[q2[il] >> 9]; for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } template static __global__ void dequantize_block_iq2_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq2_s * x = (const block_iq2_s *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300))); const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; const uint8_t signs = x[i].qs[QK_K/8+4*ib+il]; for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } template static __global__ void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq3_xxs * x = (const block_iq3_xxs *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * q3 = x[i].qs + 8*ib; const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib; const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]); const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]); const uint32_t aux32 = gas[0] | (gas[1] << 16); const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f; const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; for (int j = 0; j < 4; ++j) { y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } } template static __global__ void dequantize_block_iq3_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq3_s * x = (const block_iq3_s *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * qs = x[i].qs + 8*ib; const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256))); const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf)); const uint8_t signs = x[i].signs[4*ib + il]; for (int j = 0; j < 4; ++j) { y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } } template static __global__ void dequantize_block_iq1_s(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq1_s * x = (const block_iq1_s *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA; const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1); uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)]; grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; grid32[0] &= 0x0f0f0f0f; for (int j = 0; j < 8; ++j) { y[j] = d * (q[j] + delta); } } template static __global__ void dequantize_block_iq1_m(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq1_m * x = (const block_iq1_m *) vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * sc = (const uint16_t *)x[i].scales; iq1m_scale_t scale; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); const int64_t ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4); const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1); const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA; uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)]; grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; grid32[0] &= 0x0f0f0f0f; for (int j = 0; j < 8; ++j) { y[j] = d * (q[j] + delta); } } template static __global__ void dequantize_block_iq4_nl(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL); const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[ib].qs + 4*il; const float d = (float)x[ib].d; for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; } } template static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_iq4_xs * x = (const block_iq4_xs *)vx; const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[i].qs + 16*ib + 4*il; const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32); for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; } } template static __global__ void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int64_t i = blockIdx.x; const block_mxfp4 * x = (const block_mxfp4 *) vx + i*(QK_K/QK_MXFP4); const int64_t tid = threadIdx.x; const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[ib].qs + 4*il; const float d = ggml_cuda_e8m0_to_fp32(x[ib].e); for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_mxfp4[q4[j] & 0xf]*0.5f; y[j+16] = d * kvalues_mxfp4[q4[j] >> 4]*0.5f; } } template static void dequantize_block_cuda(const void * vx, dst_t * y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t s01, const int64_t s02, const int64_t s03, cudaStream_t stream) { const dim3 num_blocks((ne00 + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE), ne01, ne02*ne03); dequantize_block<<>> (vx, y, ne00, ne01, ne02, s01, s02, s03); } template static void dequantize_block_cont_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { dequantize_block_cuda(vx, y, k, 1, 1, 1, k/qk, k/qk, k/qk, stream); } static void dequantize_block_q8_0_f16_cuda(const void * __restrict__ vx, half * __restrict__ y, const int64_t k, cudaStream_t stream) { const int num_blocks = (k + CUDA_Q8_0_NE_ALIGN - 1) / CUDA_Q8_0_NE_ALIGN; if (k % CUDA_Q8_0_NE_ALIGN == 0) { const bool need_check = false; dequantize_block_q8_0_f16<<>>(vx, y, k); } else { const bool need_check = true; dequantize_block_q8_0_f16<<>>(vx, y, k); } } template static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_q2_K<<>>(vx, y); } template static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_q3_K<<>>(vx, y); } template static void dequantize_row_q4_0_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb32 = k / 32; const int nb = (k + 255) / 256; dequantize_block_q4_0<<>>(vx, y, nb32); } template static void dequantize_row_q4_1_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb32 = k / 32; const int nb = (k + 255) / 256; dequantize_block_q4_1<<>>(vx, y, nb32); } template static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_q4_K<<>>(vx, y); } template static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_q5_K<<>>(vx, y); } template static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_q6_K<<>>(vx, y); } template static void dequantize_row_iq2_xxs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq2_xxs<<>>(vx, y); } template static void dequantize_row_iq2_xs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq2_xs<<>>(vx, y); } template static void dequantize_row_iq2_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq2_s<<>>(vx, y); } template static void dequantize_row_iq3_xxs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq3_xxs<<>>(vx, y); } template static void dequantize_row_iq3_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq3_s<<>>(vx, y); } template static void dequantize_row_iq1_s_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq1_s<<>>(vx, y); } template static void dequantize_row_iq4_nl_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = (k + QK_K - 1) / QK_K; dequantize_block_iq4_nl<<>>(vx, y); } template static void dequantize_row_iq1_m_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = k / QK_K; dequantize_block_iq1_m<<>>(vx, y); } template static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = (k + QK_K - 1) / QK_K; dequantize_block_iq4_xs<<>>(vx, y); } template static void dequantize_row_mxfp4_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { const int nb = (k + QK_K - 1) / QK_K; dequantize_block_mxfp4<<>>(vx, y); } template static __global__ void convert_unary( const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t s01, const int64_t s02, const int64_t s03) { const int64_t i00 = (int64_t)blockDim.x*blockIdx.x + threadIdx.x; if (i00 >= ne00) { return; } const int64_t i01 = blockIdx.y; const int64_t i02 = blockIdx.z % ne02; const int64_t i03 = blockIdx.z / ne02; const src_t * x = (const src_t *) vx; const int64_t ix = i03*s03 + i02*s02 + i01*s01 + i00; const int64_t iy = ((i03*ne02 + i02)*ne01 + i01)*ne00 + i00; y[iy] = ggml_cuda_cast(x[ix]); } template static void convert_unary_cuda(const void * vx, dst_t * y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t s01, const int64_t s02, const int64_t s03, cudaStream_t stream) { const dim3 num_blocks((ne00 + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE, ne01, ne02*ne03); convert_unary<<>> (vx, y, ne00, ne01, ne02, s01, s02, s03); } template static void convert_unary_cont_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { convert_unary_cuda(vx, y, k, 1, 1, 1, k, k, k, stream); } to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) { switch (type) { case GGML_TYPE_F32: return convert_unary_cont_cuda; case GGML_TYPE_F16: return convert_unary_cont_cuda; default: return nullptr; } } to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: return dequantize_row_q4_0_cuda; case GGML_TYPE_Q4_1: return dequantize_row_q4_1_cuda; case GGML_TYPE_Q5_0: return dequantize_block_cont_cuda; case GGML_TYPE_Q5_1: return dequantize_block_cont_cuda; case GGML_TYPE_Q8_0: if (fp16_available(ggml_cuda_info().devices[ggml_cuda_get_device()].cc)) { return dequantize_block_q8_0_f16_cuda; } return dequantize_block_cont_cuda; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_cuda; case GGML_TYPE_Q3_K: return dequantize_row_q3_K_cuda; case GGML_TYPE_Q4_K: return dequantize_row_q4_K_cuda; case GGML_TYPE_Q5_K: return dequantize_row_q5_K_cuda; case GGML_TYPE_Q6_K: return dequantize_row_q6_K_cuda; case GGML_TYPE_IQ2_XXS: return dequantize_row_iq2_xxs_cuda; case GGML_TYPE_IQ2_XS: return dequantize_row_iq2_xs_cuda; case GGML_TYPE_IQ2_S: return dequantize_row_iq2_s_cuda; case GGML_TYPE_IQ3_XXS: return dequantize_row_iq3_xxs_cuda; case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_cuda; case GGML_TYPE_IQ1_M: return dequantize_row_iq1_m_cuda; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_cuda; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; case GGML_TYPE_MXFP4: return dequantize_row_mxfp4_cuda; case GGML_TYPE_F32: return convert_unary_cont_cuda; case GGML_TYPE_BF16: return convert_unary_cont_cuda; default: return nullptr; } } to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: return dequantize_row_q4_0_cuda; case GGML_TYPE_Q4_1: return dequantize_row_q4_1_cuda; case GGML_TYPE_Q5_0: return dequantize_block_cont_cuda; case GGML_TYPE_Q5_1: return dequantize_block_cont_cuda; case GGML_TYPE_Q8_0: return dequantize_block_cont_cuda; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_cuda; case GGML_TYPE_Q3_K: return dequantize_row_q3_K_cuda; case GGML_TYPE_Q4_K: return dequantize_row_q4_K_cuda; case GGML_TYPE_Q5_K: return dequantize_row_q5_K_cuda; case GGML_TYPE_Q6_K: return dequantize_row_q6_K_cuda; case GGML_TYPE_IQ2_XXS: return dequantize_row_iq2_xxs_cuda; case GGML_TYPE_IQ2_XS: return dequantize_row_iq2_xs_cuda; case GGML_TYPE_IQ2_S: return dequantize_row_iq2_s_cuda; case GGML_TYPE_IQ3_XXS: return dequantize_row_iq3_xxs_cuda; case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_cuda; case GGML_TYPE_IQ1_M: return dequantize_row_iq1_m_cuda; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_cuda; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; case GGML_TYPE_MXFP4: return dequantize_row_mxfp4_cuda; case GGML_TYPE_F16: return convert_unary_cont_cuda; case GGML_TYPE_BF16: return convert_unary_cont_cuda; default: return nullptr; } } to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type) { switch (type) { case GGML_TYPE_F32: return convert_unary_cuda; case GGML_TYPE_Q4_0: return dequantize_block_cuda; case GGML_TYPE_Q4_1: return dequantize_block_cuda; case GGML_TYPE_Q5_0: return dequantize_block_cuda; case GGML_TYPE_Q5_1: return dequantize_block_cuda; case GGML_TYPE_Q8_0: return dequantize_block_cuda; case GGML_TYPE_BF16: return convert_unary_cuda; default: return nullptr; } } to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type) { switch (type) { case GGML_TYPE_F32: return convert_unary_cuda; case GGML_TYPE_Q4_0: return dequantize_block_cuda; case GGML_TYPE_Q4_1: return dequantize_block_cuda; case GGML_TYPE_Q5_0: return dequantize_block_cuda; case GGML_TYPE_Q5_1: return dequantize_block_cuda; case GGML_TYPE_Q8_0: return dequantize_block_cuda; case GGML_TYPE_F16: return convert_unary_cuda; default: return nullptr; } } to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type) { switch (type) { case GGML_TYPE_F16: return convert_unary_cuda; case GGML_TYPE_Q4_0: return dequantize_block_cuda; case GGML_TYPE_Q4_1: return dequantize_block_cuda; case GGML_TYPE_Q5_0: return dequantize_block_cuda; case GGML_TYPE_Q5_1: return dequantize_block_cuda; case GGML_TYPE_Q8_0: return dequantize_block_cuda; case GGML_TYPE_BF16: return convert_unary_cuda; default: return nullptr; } } ggml-org-ggml-3678254/src/ggml-cuda/convert.cuh000066400000000000000000000036541512524704700212040ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 template using to_t_cuda_t = void (*)(const void * x, T * y, int64_t k, cudaStream_t stream); typedef to_t_cuda_t to_fp32_cuda_t; typedef to_t_cuda_t to_fp16_cuda_t; typedef to_t_cuda_t to_bf16_cuda_t; to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type); to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type); to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type); // TODO more general support for non-contiguous inputs template using to_t_nc_cuda_t = void (*)(const void * x, T * y, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, int64_t s01, int64_t s02, int64_t s03, cudaStream_t stream); typedef to_t_nc_cuda_t to_fp32_nc_cuda_t; typedef to_t_nc_cuda_t to_fp16_nc_cuda_t; typedef to_t_nc_cuda_t to_bf16_nc_cuda_t; to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type); to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type); to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type); template __host__ __device__ inline dst_t ggml_cuda_cast(src_t x) { if constexpr (std::is_same_v) { return x; } else if constexpr(std::is_same_v) { return __float2bfloat16(float(x)); } else if constexpr(std::is_same_v) { return __bfloat162float(x); } else if constexpr(std::is_same_v && std::is_same_v) { return __float22half2_rn(x); } else if constexpr(std::is_same_v && std::is_same_v) { // bypass compile error on cuda 12.0.1 #ifdef GGML_USE_HIP return __float22bfloat162_rn(x); #else return {x.x, x.y}; #endif // GGML_USE_HIP } else if constexpr(std::is_same_v) { return int32_t(x); } else { return float(x); } } ggml-org-ggml-3678254/src/ggml-cuda/count-equal.cu000066400000000000000000000040151512524704700216010ustar00rootroot00000000000000#include "common.cuh" #include "count-equal.cuh" #include template static __global__ void count_equal(const T * __restrict__ x, const T * __restrict__ y, int64_t * __restrict__ dst, const int64_t dk, const int64_t k) { const int64_t i0 = (int64_t) blockIdx.x*dk; const int64_t i1 = min(i0 + dk, k); int nequal = 0; for (int64_t i = i0 + threadIdx.x; i < i1; i += WARP_SIZE) { const T xi = x[i]; const T yi = y[i]; nequal += xi == yi; } nequal = warp_reduce_sum(nequal); if (threadIdx.x != 0) { return; } atomicAdd((int *) dst, nequal); } void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == src1->type); GGML_ASSERT( dst->type == GGML_TYPE_I64); GGML_ASSERT(ggml_are_same_shape(src0, src1)); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(ggml_is_contiguous(dst)); int64_t * dst_d = (int64_t *) dst->data; cudaStream_t stream = ctx.stream(); const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne < (1 << 30) && "atomicAdd implementation only supports int"); const int64_t dne = GGML_PAD((ne + 4*nsm - 1) / (4*nsm), CUDA_COUNT_EQUAL_CHUNK_SIZE); CUDA_CHECK(cudaMemsetAsync(dst_d, 0, ggml_nbytes(dst), stream)); const dim3 blocks_dim(WARP_SIZE, 1, 1); const dim3 blocks_num(std::min((int64_t)4*nsm, (ne + CUDA_COUNT_EQUAL_CHUNK_SIZE - 1)/CUDA_COUNT_EQUAL_CHUNK_SIZE), 1, 1); switch (src0->type) { case GGML_TYPE_I32: { const int * src0_d = (const int *) src0->data; const int * src1_d = (const int *) src1->data; count_equal<<>>(src0_d, src1_d, dst_d, dne, ne); } break; default: GGML_ASSERT(false); break; } } ggml-org-ggml-3678254/src/ggml-cuda/count-equal.cuh000066400000000000000000000002201512524704700217430ustar00rootroot00000000000000#include "common.cuh" #define CUDA_COUNT_EQUAL_CHUNK_SIZE 128 void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/cp-async.cuh000066400000000000000000000041231512524704700212310ustar00rootroot00000000000000// Simplified API for asynchronous data loading. #include "common.cuh" static __device__ __forceinline__ unsigned int ggml_cuda_cvta_generic_to_shared(void * generic_ptr) { #ifdef CP_ASYNC_AVAILABLE return __cvta_generic_to_shared(generic_ptr); #else GGML_UNUSED(generic_ptr); NO_DEVICE_CODE; return 0; #endif // CP_ASYNC_AVAILABLE } // Copies data from global to shared memory, cg == cache global. // Both the src and dst pointers must be aligned to 16 bit. // Shared memory uses 32 bit addressing, the pointer is passed as unsigned int. // Generic pointers can be converted to 32 bit shared memory pointers using __cvta_generic_to_shared. // Only the 16 bit copy is exposed because 4 and 8 bit copies did not yield performance improvements. template static __device__ __forceinline__ void cp_async_cg_16(const unsigned int dst, const void * src) { static_assert(preload == 0 || preload == 64 || preload == 128 || preload == 256, "bad preload"); #ifdef CP_ASYNC_AVAILABLE #if CUDART_VERSION >= 11040 if (preload == 256) { asm volatile("cp.async.cg.shared.global.L2::256B [%0], [%1], 16;" : : "r"(dst), "l"(src)); } else if (preload == 128) { asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], 16;" : : "r"(dst), "l"(src)); } else if (preload == 64) { asm volatile("cp.async.cg.shared.global.L2::64B [%0], [%1], 16;" : : "r"(dst), "l"(src)); } else #endif // CUDART_VERSION >= 11040 { asm volatile("cp.async.cg.shared.global [%0], [%1], 16;" : : "r"(dst), "l"(src)); } #else GGML_UNUSED(dst); GGML_UNUSED(src); NO_DEVICE_CODE; #endif // CP_ASYNC_AVAILABLE } // Makes each thread wait until its asynchronous data copies are done. // This does NOT provide any additional synchronization. // In particular, when copying data with multiple warps a call to __syncthreads will be needed. static __device__ __forceinline__ void cp_async_wait_all() { #ifdef CP_ASYNC_AVAILABLE asm volatile("cp.async.wait_all;"); #else NO_DEVICE_CODE; #endif // CP_ASYNC_AVAILABLE } ggml-org-ggml-3678254/src/ggml-cuda/cpy-utils.cuh000066400000000000000000000142741512524704700214550ustar00rootroot00000000000000#pragma once #include "ggml-common.h" #include "convert.cuh" static __device__ __forceinline__ int best_index_int8(int n, const int8_t * val, float x) { if (x <= val[0]) return 0; if (x >= val[n-1]) return n-1; int ml = 0, mu = n-1; while (mu-ml > 1) { int mav = (ml+mu)/2; if (x < val[mav]) mu = mav; else ml = mav; } return x - val[mu-1] < val[mu] - x ? mu-1 : mu; } static __device__ void quantize_f32_q4_0_block(const float * __restrict__ x, block_q4_0 * __restrict__ y) { float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK4_0; ++j) { const float v = x[j]; if (amax < fabsf(v)) { amax = fabsf(v); vmax = v; } } const float d = vmax / -8; const float id = d ? 1.0f/d : 0.0f; y->d = d; for (int j = 0; j < QK4_0/2; ++j) { const float x0 = x[0 + j]*id; const float x1 = x[QK4_0/2 + j]*id; const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f)); const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f)); y->qs[j] = xi0; y->qs[j] |= xi1 << 4; } } static __device__ void quantize_f32_q4_1_block(const float * __restrict__ x, block_q4_1 * __restrict__ y) { float vmin = FLT_MAX; float vmax = -FLT_MAX; for (int j = 0; j < QK4_1; ++j) { const float v = x[j]; if (v < vmin) vmin = v; if (v > vmax) vmax = v; } const float d = (vmax - vmin) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; y->dm.x = d; y->dm.y = vmin; for (int j = 0; j < QK4_1/2; ++j) { const float x0 = (x[0 + j] - vmin)*id; const float x1 = (x[QK4_1/2 + j] - vmin)*id; const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f)); const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f)); y->qs[j] = xi0; y->qs[j] |= xi1 << 4; } } static __device__ void quantize_f32_q5_0_block(const float * __restrict__ x, block_q5_0 * __restrict__ y) { float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK5_0; ++j) { const float v = x[j]; if (amax < fabsf(v)) { amax = fabsf(v); vmax = v; } } const float d = vmax / -16; const float id = d ? 1.0f/d : 0.0f; y->d = d; uint32_t qh = 0; for (int j = 0; j < QK5_0/2; ++j) { const float x0 = x[0 + j]*id; const float x1 = x[QK5_0/2 + j]*id; const uint8_t xi0 = min(31, (int8_t)(x0 + 16.5f)); const uint8_t xi1 = min(31, (int8_t)(x1 + 16.5f)); y->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); } memcpy(y->qh, &qh, sizeof(qh)); } static __device__ void quantize_f32_q5_1_block(const float * __restrict__ x, block_q5_1 * __restrict__ y) { float min = x[0]; float max = x[0]; for (int j = 1; j < QK5_1; ++j) { const float v = x[j]; min = v < min ? v : min; max = v > max ? v : max; } const float d = (max - min) / 31; const float id = d ? 1.0f/d : 0.0f; y->dm.x = d; y->dm.y = min; uint32_t qh = 0; for (int j = 0; j < QK5_1/2; ++j) { const float x0 = (x[0 + j] - min)*id; const float x1 = (x[QK5_1/2 + j] - min)*id; const uint8_t xi0 = (uint8_t)(x0 + 0.5f); const uint8_t xi1 = (uint8_t)(x1 + 0.5f); y->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); } memcpy(y->qh, &qh, sizeof(qh)); } static __device__ void quantize_f32_q8_0_block(const float * __restrict__ x, block_q8_0 * __restrict__ y) { float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = x[j]; amax = fmaxf(amax, fabsf(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y->d = d; for (int j = 0; j < QK8_0; ++j) { const float x0 = x[j]*id; y->qs[j] = roundf(x0); } } static __device__ void quantize_f32_iq4_nl_block(const float * __restrict__ x, block_iq4_nl * __restrict__ y) { float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK4_NL; ++j) { const float v = x[j]; if (amax < fabsf(v)) { amax = fabsf(v); vmax = v; } } float d = vmax / kvalues_iq4nl[0]; const float id = d ? 1.0f/d : 0.0f; float sumqx = 0, sumq2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { const float x0 = x[0 + j]*id; const float x1 = x[QK4_NL/2 + j]*id; const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0); const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1); y->qs[j] = xi0 | (xi1 << 4); const float v0 = kvalues_iq4nl[xi0]; const float v1 = kvalues_iq4nl[xi1]; const float w0 = x[0 + j]*x[0 + j]; const float w1 = x[QK4_NL/2 + j]*x[QK4_NL/2 + j]; sumqx += w0*v0*x[j] + w1*v1*x[QK4_NL/2 + j]; sumq2 += w0*v0*v0 + w1*v1*v1; } y->d = sumq2 > 0 ? sumqx/sumq2 : d; } // Wrapper functions for cpy.cu compatibility static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { quantize_f32_q4_0_block((const float *)cxi, (block_q4_0 *)cdsti); } static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { quantize_f32_q4_1_block((const float *)cxi, (block_q4_1 *)cdsti); } static __device__ void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) { quantize_f32_q5_0_block((const float *)cxi, (block_q5_0 *)cdsti); } static __device__ void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) { quantize_f32_q5_1_block((const float *)cxi, (block_q5_1 *)cdsti); } static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { quantize_f32_q8_0_block((const float *)cxi, (block_q8_0 *)cdsti); } static __device__ void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) { quantize_f32_iq4_nl_block((const float *)cxi, (block_iq4_nl *)cdsti); } template static __device__ void cpy_1_scalar(const char * cxi, char * cdsti) { *(dst_t *) cdsti = ggml_cuda_cast(*(const src_t *) cxi); } ggml-org-ggml-3678254/src/ggml-cuda/cpy.cu000066400000000000000000000635101512524704700201440ustar00rootroot00000000000000#include "cpy.cuh" #include "dequantize.cuh" #include "cpy-utils.cuh" #if defined(GGML_USE_MUSA) && defined(GGML_MUSA_MUDNN_COPY) #include "ggml-musa/mudnn.cuh" #endif // GGML_USE_MUSA && GGML_MUSA_MUDNN_COPY typedef void (*cpy_kernel_t)(const char * cx, char * cdst); const int CUDA_CPY_TILE_DIM_2D = 32; // 2D tile dimension for transposed blocks const int CUDA_CPY_BLOCK_NM = 8; // block size of 3rd dimension if available const int CUDA_CPY_BLOCK_ROWS = 8; // block dimension for marching through rows template static __global__ void cpy_scalar(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13) { const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= ne) { return; } // determine indices i03/i13, i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor // then combine those indices with the corresponding byte offsets to get the total offsets const int64_t i03 = i/(ne00 * ne01 * ne02); const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; const int64_t x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; const int64_t i13 = i/(ne10 * ne11 * ne12); const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13; cpy_1(cx + x_offset, cdst + dst_offset); } template static __global__ void cpy_scalar_transpose(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13) { const T* src = reinterpret_cast(cx); T* dst = reinterpret_cast(cdst); const int64_t nmat = ne / (ne00 * ne01); const int64_t n = ne00 * ne01; const int x = blockIdx.x * CUDA_CPY_TILE_DIM_2D + threadIdx.x; const int y = blockIdx.y * CUDA_CPY_TILE_DIM_2D + threadIdx.y; const int tx = blockIdx.y * CUDA_CPY_TILE_DIM_2D + threadIdx.x; // transpose block offset const int ty = blockIdx.x * CUDA_CPY_TILE_DIM_2D + threadIdx.y; __shared__ float tile[CUDA_CPY_TILE_DIM_2D][CUDA_CPY_TILE_DIM_2D+1]; #pragma unroll for (int i = 0; i < CUDA_CPY_BLOCK_NM; ++i) { const unsigned int imat = blockIdx.z * CUDA_CPY_BLOCK_NM + i; if (imat >= nmat) break; #pragma unroll for (int j = 0; j < CUDA_CPY_TILE_DIM_2D; j += CUDA_CPY_BLOCK_ROWS) { if(x < ne01 && y + j < ne00){ const int row = threadIdx.y+j; const int col = threadIdx.x * sizeof(float)/sizeof(T); T *tile2 = reinterpret_cast(tile[row]); tile2[col] = src[imat*n + (y+j)*ne01 + x]; } } __syncthreads(); #pragma unroll for (int j = 0; j < CUDA_CPY_TILE_DIM_2D; j += CUDA_CPY_BLOCK_ROWS) { if (ty + j < ne01 && tx < ne00) { const int col = (threadIdx.y+j)*sizeof(float)/sizeof(T); const T *tile2 = reinterpret_cast(tile[threadIdx.x]); dst[imat*n + (ty+j)*ne00 + tx] = tile2[col]; } } } GGML_UNUSED_VARS(ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static __device__ void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) { float * cdstf = (float *)(cdsti); #pragma unroll for (int j = 0; j < QK8_0; j += 2) { float2 dq; dequantize_q8_0(cxi, 0, j, dq); *(cdstf + j) = dq.x; *(cdstf + j + 1) = dq.y; } } template static __device__ void cpy_blck_q_f32(const char * cxi, char * cdsti) { float * cdstf = (float *)(cdsti); #pragma unroll for (int j = 0; j < qk/2; j++) { float2 dq; dequant(cxi, 0, j, dq); *(cdstf + j) = dq.x; *(cdstf + j + qk/2) = dq.y; } } template static __global__ void cpy_f32_q(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13) { const int i = (blockDim.x*blockIdx.x + threadIdx.x)*qk; if (i >= ne) { return; } const int i03 = i/(ne00 * ne01 * ne02); const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; const int x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; const int i13 = i/(ne10 * ne11 * ne12); const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; const int dst_offset = (i10/qk)*nb10 + i11*nb11 + i12*nb12 + i13*nb13; cpy_blck(cx + x_offset, cdst + dst_offset); } template static __global__ void cpy_q_f32(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13) { const int i = (blockDim.x*blockIdx.x + threadIdx.x)*qk; if (i >= ne) { return; } const int i03 = i/(ne00 * ne01 * ne02); const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; const int x_offset = (i00/qk)*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; const int i13 = i/(ne10 * ne11 * ne12); const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13; cpy_blck(cx + x_offset, cdst + dst_offset); } template static __global__ void cpy_scalar_contiguous(const char * cx, char * cdst, const int64_t ne) { const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= ne) { return; } const src_t * x = (const src_t *) cx; dst_t * dst = (dst_t *) cdst; dst[i] = ggml_cuda_cast(x[i]); } template static void ggml_cpy_scalar_contiguous_cuda( const char * cx, char * cdst, const int64_t ne, cudaStream_t stream) { const int64_t num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; cpy_scalar_contiguous<<>> (cx, cdst, ne); } template static void ggml_cpy_scalar_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { if (transposed) { GGML_ASSERT(ne == ne00*ne01*ne02); // ne[3] is 1 assumed int ne00n, ne01n, ne02n; if (nb00 <= nb02) { // most likely safe to handle nb00 = nb02 case here ne00n = ne00; ne01n = ne01; ne02n = ne02; } else { ne00n = ne00; ne01n = ne01*ne02; ne02n = 1; } dim3 dimGrid( (ne01n + CUDA_CPY_TILE_DIM_2D - 1) / CUDA_CPY_TILE_DIM_2D, (ne00n + CUDA_CPY_TILE_DIM_2D - 1) / CUDA_CPY_TILE_DIM_2D, (ne/(ne01n*ne00n) + CUDA_CPY_BLOCK_NM - 1) / CUDA_CPY_BLOCK_NM); dim3 dimBlock(CUDA_CPY_TILE_DIM_2D, CUDA_CPY_BLOCK_ROWS, 1); cpy_scalar_transpose<<>> (cx, cdst, ne, ne00n, ne01n, ne02n, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } else { const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; cpy_scalar><<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } } static void ggml_cpy_f32_q8_0_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK8_0 == 0); const int num_blocks = ne / QK8_0; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_q8_0_f32_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { const int num_blocks = ne; cpy_q_f32<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_f32_q4_0_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK4_0 == 0); const int num_blocks = ne / QK4_0; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_q4_0_f32_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { const int num_blocks = ne; cpy_q_f32, QK4_0><<>>( cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_f32_q4_1_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK4_1 == 0); const int num_blocks = ne / QK4_1; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_q4_1_f32_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { const int num_blocks = ne; cpy_q_f32, QK4_1><<>>( cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_f32_q5_0_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK5_0 == 0); const int num_blocks = ne / QK5_0; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_q5_0_f32_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { const int num_blocks = ne; cpy_q_f32, QK5_0><<>>( cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_f32_q5_1_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK5_1 == 0); const int num_blocks = ne / QK5_1; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_q5_1_f32_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { const int num_blocks = ne; cpy_q_f32, QK5_1><<>>( cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } static void ggml_cpy_f32_iq4_nl_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) { GGML_ASSERT(ne % QK4_NL == 0); const int num_blocks = ne / QK4_NL; cpy_f32_q<<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); } void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) { const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne == ggml_nelements(src1)); GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX); GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; //GGML_ASSERT(src0->ne[3] == 1); const int64_t nb00 = src0->nb[0]; const int64_t nb01 = src0->nb[1]; const int64_t nb02 = src0->nb[2]; const int64_t nb03 = src0->nb[3]; const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; const int64_t ne12 = src1->ne[2]; //GGML_ASSERT(src1->ne[3] == 1); const int64_t nb10 = src1->nb[0]; const int64_t nb11 = src1->nb[1]; const int64_t nb12 = src1->nb[2]; const int64_t nb13 = src1->nb[3]; cudaStream_t main_stream = ctx.stream(); char * src0_ddc = (char *) src0->data; char * src1_ddc = (char *) src1->data; const bool contiguous_srcs = ggml_is_contiguous(src0) && ggml_is_contiguous(src1); const bool can_be_transposed = nb01 == (int64_t)ggml_element_size(src0) && src0->ne[3] == 1 && nb02 == ne00 * ne01 * (int64_t)ggml_element_size(src0); if (src0->type == src1->type && contiguous_srcs) { GGML_ASSERT(ggml_nbytes(src0) == ggml_nbytes(src1)); #if defined(GGML_USE_MUSA) && defined(GGML_MUSA_MUDNN_COPY) if (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) { CUDA_CHECK(mudnnMemcpyAsync(ctx, src1, src0)); } else #endif // GGML_USE_MUSA && GGML_MUSA_MUDNN_COPY { CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream)); } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { if (can_be_transposed) { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { ggml_cpy_f32_q8_0_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q8_0_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { ggml_cpy_f32_q4_0_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q4_0_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { ggml_cpy_f32_q4_1_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_1 && src1->type == GGML_TYPE_F32) { ggml_cpy_q4_1_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) { ggml_cpy_f32_q5_0_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q5_0_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) { ggml_cpy_f32_iq4_nl_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) { ggml_cpy_f32_q5_1_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_F32) { ggml_cpy_q5_1_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { if (can_be_transposed) { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_BF16) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16) { if (can_be_transposed) { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F16) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { if (can_be_transposed) { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_I32) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_F32) { if (contiguous_srcs) { ggml_cpy_scalar_contiguous_cuda (src0_ddc, src1_ddc, ne, main_stream); } else { ggml_cpy_scalar_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } } else { GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type), ggml_type_name(src1->type)); } } void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; ggml_cuda_cpy(ctx, src0, dst); } ggml-org-ggml-3678254/src/ggml-cuda/cpy.cuh000066400000000000000000000003431512524704700203070ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CPY_BLOCK_SIZE 64 void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/cross-entropy-loss.cu000066400000000000000000000136741512524704700231640ustar00rootroot00000000000000#include "common.cuh" #include "cross-entropy-loss.cuh" #include "sum.cuh" #include #include template static __global__ void cross_entropy_loss_f32( const float * __restrict__ logits, const float * __restrict__ labels, float * __restrict__ dst, const int nclasses, const int k) { extern __shared__ float tmp[]; logits += int64_t(blockIdx.x)*nclasses; labels += int64_t(blockIdx.x)*nclasses; // Find maximum for softmax: float max_logit = -INFINITY; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float val = logits[i]; max_logit = fmaxf(max_logit, val); if (use_shared) { tmp[i] = val; } } max_logit = warp_reduce_max(max_logit); // Calculate log(softmax(logits)) which is just logits - max: float sum = 0.0f; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float logit_i = use_shared ? tmp[i] : logits[i]; sum += expf(logit_i - max_logit); } sum = warp_reduce_sum(sum); sum = logf(sum); // log(exp(logits - max) / sum) = (logits - max) - log(sum) float loss = 0.0f; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float logit_i = use_shared ? tmp[i] : logits[i]; loss += (logit_i - max_logit - sum) * labels[i]; } loss = -warp_reduce_sum(loss) / (float)k; if (threadIdx.x != 0) { return; } dst[blockIdx.x] = loss; } template static __global__ void cross_entropy_loss_back_f32( const float * __restrict__ grad, const float * __restrict__ logits, const float * __restrict__ labels, float * __restrict__ dst, const int nclasses) { extern __shared__ float tmp[]; logits += int64_t(blockIdx.x)*nclasses; labels += int64_t(blockIdx.x)*nclasses; dst += int64_t(blockIdx.x)*nclasses; float maxval = -INFINITY; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float val = logits[i]; maxval = fmaxf(maxval, val); if (use_shared) { tmp[i] = val; } } maxval = warp_reduce_max(maxval); float sum = 0.0f; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float val = expf((use_shared ? tmp[i] : logits[i]) - maxval); sum += val; if (use_shared) { tmp[i] = val; } else { dst[i] = val; } } sum = warp_reduce_sum(sum); const float sm_scale = 1.0f/sum; const float d_by_nrows = *grad/gridDim.x; for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) { const float val = use_shared ? tmp[i] : dst[i]; dst[i] = (val*sm_scale - labels[i])*d_by_nrows; } } void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(ggml_is_contiguous(dst)); const int64_t ne00 = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; ggml_cuda_pool & pool = ctx.pool(); cudaStream_t stream = ctx.stream(); const dim3 blocks_dim(WARP_SIZE, 1, 1); const dim3 blocks_num(nrows, 1, 1); const size_t nbytes_shared = ne00*sizeof(float); const int id = ggml_cuda_get_device(); const size_t smpbo = ggml_cuda_info().devices[id].smpbo; ggml_cuda_pool_alloc dst_tmp(pool, blocks_num.x); if (nbytes_shared <= smpbo) { CUDA_SET_SHARED_MEMORY_LIMIT((cross_entropy_loss_f32), smpbo); cross_entropy_loss_f32<<>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows); } else { cross_entropy_loss_f32<<>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows); } CUDA_CHECK(cudaGetLastError()); // Combine results from individual blocks: sum_f32_cuda(pool, dst_tmp.ptr, dst_d, blocks_num.x, stream); } void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; const ggml_tensor * src0f = dst->src[1]; const ggml_tensor * src1f = dst->src[2]; GGML_ASSERT(src0f->type == GGML_TYPE_F32); GGML_ASSERT(src1f->type == GGML_TYPE_F32); GGML_ASSERT( grad->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_scalar(grad)); GGML_ASSERT(ggml_is_contiguous(src0f)); GGML_ASSERT(ggml_is_contiguous(src1f)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_are_same_shape(src0f, src1f)); GGML_ASSERT(ggml_are_same_shape(src0f, dst)); const int64_t ne00 = src0f->ne[0]; const int64_t nrows = ggml_nrows(src0f); const float * grad_d = (const float *) grad->data; const float * src0f_d = (const float *) src0f->data; const float * src1f_d = (const float *) src1f->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); const dim3 blocks_dim(WARP_SIZE, 1, 1); const dim3 blocks_num(nrows, 1, 1); const size_t nbytes_shared = ne00*sizeof(float); const int id = ggml_cuda_get_device(); const size_t smpbo = ggml_cuda_info().devices[id].smpbo; if (nbytes_shared <= smpbo) { CUDA_SET_SHARED_MEMORY_LIMIT((cross_entropy_loss_back_f32), smpbo); cross_entropy_loss_back_f32<<>>(grad_d, src0f_d, src1f_d, dst_d, ne00); } else { cross_entropy_loss_back_f32<<>>(grad_d, src0f_d, src1f_d, dst_d, ne00); } } ggml-org-ggml-3678254/src/ggml-cuda/cross-entropy-loss.cuh000066400000000000000000000003731512524704700233240ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE 256 void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/cumsum.cu000066400000000000000000000222721512524704700206620ustar00rootroot00000000000000#include #include "cumsum.cuh" #include "convert.cuh" #include "ggml-cuda/common.cuh" #include "ggml.h" #ifdef GGML_CUDA_USE_CUB # include #endif // GGML_CUDA_USE_CUB template static __global__ void cumsum_cub_kernel( const T * __restrict__ src, T * __restrict__ dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t s1, const int64_t s2, const int64_t s3) { #ifdef GGML_CUDA_USE_CUB using BlockScanT = cub::BlockScan; __shared__ typename BlockScanT::TempStorage temp_storage; __shared__ T block_carry; const int tid = threadIdx.x; constexpr int UNROLL_FACTOR = 4; constexpr int TILE_SIZE = BLOCK_SIZE * UNROLL_FACTOR; const int64_t i1 = blockIdx.x; const int64_t i2 = blockIdx.y; const int64_t i3 = blockIdx.z; if (i1 >= ne01 || i2 >= ne02 || i3 >= ne03) { return; } const T * src_row = src + i1 * s01 + i2 * s02 + i3 * s03; T * dst_row = dst + i1 * s1 + i2 * s2 + i3 * s3; if (tid == 0) { block_carry = 0; } __syncthreads(); for (int64_t start = 0; start < ne00; start += TILE_SIZE) { T items[UNROLL_FACTOR]; T thread_sum = T(0); #pragma unroll for (int i = 0; i < UNROLL_FACTOR; i++) { int64_t idx = start + tid * UNROLL_FACTOR + i; T val = (idx < ne00) ? src_row[idx] : T(0); thread_sum += val; items[i] = thread_sum; } // Block-wide scan on thread sums T thread_prefix; T block_total; BlockScanT(temp_storage).InclusiveSum(thread_sum, thread_prefix, block_total); __syncthreads(); // Add offset to each item and store T thread_offset = thread_prefix - thread_sum + block_carry; #pragma unroll for (int i = 0; i < UNROLL_FACTOR; i++) { int64_t idx = start + tid * UNROLL_FACTOR + i; if (idx < ne00) { dst_row[idx] = items[i] + thread_offset; } } __syncthreads(); // Update carry for next tile if (tid == 0) { block_carry += block_total; } } #else NO_DEVICE_CODE; #endif // GGML_CUDA_USE_CUB } // Fallback kernel implementation template static __global__ void cumsum_kernel( const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t s00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t s0, const int64_t s1, const int64_t s2, const int64_t s3) { GGML_UNUSED_VARS(s00, s0); const int tid = threadIdx.x; constexpr int warp_size = ggml_cuda_get_physical_warp_size(); const int lane = tid % warp_size; const int warp = tid / warp_size; const int warps_per_block = blockDim.x / warp_size; extern __shared__ float smem[]; float * s_vals = smem; float * s_warp_sums = smem + blockDim.x; float * s_carry = smem + blockDim.x + warps_per_block; float * s_chunk_total = s_carry + 1; // Initialize carry if (tid == 0) { *s_carry = 0.0f; } __syncthreads(); const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; const int64_t i1 = blockIdx.x; if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } const T * src_row = src + i1 * s01 + i2 * s02 + i3 * s03; T * dst_row = dst + i1 * s1 + i2 * s2 + i3 * s3; // register blocking: process 4 elements per thread to hide latency // and reduce synchronization overhead constexpr int num_unroll = 4; T temp[num_unroll]; for (int64_t i = 0; i < ne00; i += num_unroll * blockDim.x) { int64_t idx = i + tid * num_unroll; // thread local sequential scan temp[0] = (idx < ne00 ? src_row[idx] : T(0)); #pragma unroll for (int64_t j = 1; j < num_unroll; j++) { temp[j] = temp[j - 1]; if (idx + j < ne00) { temp[j] += src_row[idx + j]; } else { temp[j] += 0; } } // last emenent is sum of all values assigned to thread float val = (idx < ne00) ? ggml_cuda_cast(temp[num_unroll - 1]) : 0.0f; // Warp inclusive scan val = warp_prefix_inclusive_sum(val); s_vals[tid] = val; if (lane == warp_size - 1) { s_warp_sums[warp] = val; } __syncthreads(); // Exclusive scan of warp sums (warp 0 only) if (warp == 0) { float w = (tid < warps_per_block) ? s_warp_sums[tid] : 0.0f; float inc = warp_prefix_inclusive_sum(w); if (tid < warps_per_block) { s_warp_sums[tid] = inc - w; // exclusive sum } if (tid == warps_per_block - 1) { *s_chunk_total = inc; // total sum of this chunk } } __syncthreads(); // write back results float carry = *s_carry; // calculate sum offset for this thread float final_val_offset = s_vals[tid] + s_warp_sums[warp] + carry - temp[num_unroll - 1]; #pragma unroll for (int32_t j = 0; j < num_unroll; j++) { if (idx + j < ne00) { dst_row[idx + j] = temp[j] + ggml_cuda_cast(final_val_offset); } } __syncthreads(); // Update carry for next chunk if (tid == 0) { *s_carry += *s_chunk_total; } } } template static void cumsum_cuda( const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03, const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3, cudaStream_t stream) { const size_t type_size = sizeof(T); bool use_cub = false; #ifdef GGML_CUDA_USE_CUB // Check if we can use CUB (data must be contiguous along innermost dimension) const bool is_contiguous = (nb00 == type_size) && (nb0 == type_size); if (is_contiguous) { use_cub = true; } #endif // GGML_CUDA_USE_CUB dim3 grid_dims(ne01, ne02, ne03); const auto &info = ggml_cuda_info().devices[ggml_cuda_get_device()]; const int warp_size = info.warp_size; const int num_warps = (ne00 + warp_size - 1) / warp_size; int block_size = num_warps * warp_size; block_size = std::min(block_size, CUDA_CUMSUM_BLOCK_SIZE); dim3 block_dims(block_size, 1, 1); const int warps_per_block = block_size / warp_size; const size_t shmem_size = (block_size + warps_per_block + 2) * sizeof(float); if (use_cub && ne00 >= 1024) { cumsum_cub_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } else { cumsum_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb00 / type_size, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb0 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } } void ggml_cuda_op_cumsum(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == dst->type); switch(src0->type) { case GGML_TYPE_F32: { cumsum_cuda( (const float *)src0->data, (float *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], stream ); } break; // We do not support those on CPU for now anyway, so comment them out because they cause errors on some CI platforms /*case GGML_TYPE_F16: { cumsum_cuda( (const half *)src0->data, (half *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], stream ); } break; case GGML_TYPE_BF16: { cumsum_cuda( (const nv_bfloat16 *)src0->data, (nv_bfloat16 *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], stream ); } break;*/ default: GGML_ABORT("fatal error"); } } ggml-org-ggml-3678254/src/ggml-cuda/cumsum.cuh000066400000000000000000000002111512524704700210170ustar00rootroot00000000000000#include "common.cuh" #define CUDA_CUMSUM_BLOCK_SIZE 256 void ggml_cuda_op_cumsum(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/dequantize.cuh000066400000000000000000000040231512524704700216640ustar00rootroot00000000000000#include "common.cuh" static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int64_t ib, const int iqs, float2 & v){ const block_q4_0 * x = (const block_q4_0 *) vx; const float d = x[ib].d; const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; v.x = (v.x - 8.0f) * d; v.y = (v.y - 8.0f) * d; } static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int64_t ib, const int iqs, float2 & v){ const block_q4_1 * x = (const block_q4_1 *) vx; const float2 dm = __half22float2(x[ib].dm); const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; v.x = (v.x * dm.x) + dm.y; v.y = (v.y * dm.x) + dm.y; } static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int64_t ib, const int iqs, float2 & v){ const block_q5_0 * x = (const block_q5_0 *) vx; const float d = x[ib].d; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); v.x = (v.x - 16.0f) * d; v.y = (v.y - 16.0f) * d; } static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int64_t ib, const int iqs, float2 & v){ const block_q5_1 * x = (const block_q5_1 *) vx; const float2 dm = __half22float2(x[ib].dm); uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); v.x = (v.x * dm.x) + dm.y; v.y = (v.y * dm.x) + dm.y; } static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int64_t ib, const int iqs, float2 & v){ const block_q8_0 * x = (const block_q8_0 *) vx; const float d = x[ib].d; v.x = x[ib].qs[iqs + 0]; v.y = x[ib].qs[iqs + 1]; v.x *= d; v.y *= d; } ggml-org-ggml-3678254/src/ggml-cuda/diag.cu000066400000000000000000000051541512524704700202550ustar00rootroot00000000000000#include "convert.cuh" #include "diag.cuh" #include "ggml.h" template static __global__ void diag_kernel(T * __restrict__ dst, const T * __restrict__ src, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, const int64_t total_elements) { const int64_t global_idx = blockIdx.x * blockDim.x + threadIdx.x; if (global_idx >= total_elements) { return; } const int64_t i0 = global_idx % ne0; const int64_t i1 = (global_idx / ne0) % ne1; const int64_t i2 = (global_idx / (ne0 * ne1)) % ne2; const int64_t i3 = global_idx / (ne0 * ne1 * ne2); const int64_t dst_idx = ((i3 * ne2 + i2) * ne1 + i1) * ne0 + i0; if (i0 == i1) { const int64_t batch_idx = i3 * ne2 + i2; const int64_t src_idx = batch_idx * ne0 + i0; dst[dst_idx] = src[src_idx]; } else { dst[dst_idx] = ggml_cuda_cast(0); } GGML_UNUSED_VARS(ne3); } void ggml_cuda_op_diag(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; void * dst_d = dst->data; const void * src0_d = src0->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_is_contiguous(src0)); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; const int64_t ne03 = src0->ne[3]; const int64_t ne0 = dst->ne[0]; const int64_t ne1 = dst->ne[1]; const int64_t ne2 = dst->ne[2]; const int64_t ne3 = dst->ne[3]; GGML_ASSERT(ne00 == ne0); GGML_ASSERT(ne01 == 1); GGML_ASSERT(ne02 == ne2); GGML_ASSERT(ne03 == ne3); const int64_t n_elems = ggml_nelements(dst); const int64_t num_blocks = (n_elems + CUDA_DIAG_BLOCK_SIZE - 1) / CUDA_DIAG_BLOCK_SIZE; switch (dst->type) { case GGML_TYPE_F32: diag_kernel<<>>((float *) dst_d, (const float *) src0_d, ne0, ne1, ne2, ne3, n_elems); break; case GGML_TYPE_F16: diag_kernel<<>>((half *) dst_d, (const half *) src0_d, ne0, ne1, ne2, ne3, n_elems); break; default: GGML_ABORT("unsupported type"); } } ggml-org-ggml-3678254/src/ggml-cuda/diag.cuh000066400000000000000000000002051512524704700204150ustar00rootroot00000000000000#include "common.cuh" #define CUDA_DIAG_BLOCK_SIZE 256 void ggml_cuda_op_diag(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/diagmask.cu000066400000000000000000000033401512524704700211240ustar00rootroot00000000000000#include "diagmask.cuh" static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) { const int col = blockDim.y*blockIdx.y + threadIdx.y; const int row = blockDim.x*blockIdx.x + threadIdx.x; if (col >= ncols) { return; } const int i = row*ncols + col; //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i]; //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX; } static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) { const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1); const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE; const dim3 block_nums(nrows_x, block_num_x, 1); diag_mask_inf_f32<<>>(x, dst, ncols_x, rows_per_channel, n_past); } void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int nrows0 = ggml_nrows(src0); const int n_past = ((int32_t *) dst->op_params)[0]; diag_mask_inf_f32_cuda(src0_d, dst_d, ne00, nrows0, ne01, n_past, stream); } ggml-org-ggml-3678254/src/ggml-cuda/diagmask.cuh000066400000000000000000000002261512524704700212740ustar00rootroot00000000000000#include "common.cuh" #define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32 void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/fattn-common.cuh000066400000000000000000001074431512524704700221270ustar00rootroot00000000000000#pragma once #include "common.cuh" #include "convert.cuh" #include "vecdotq.cuh" #include #define FATTN_KQ_STRIDE 256 #define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction. #define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs. // log(2) = 0.6931, by adding this to the KQ maximum used for the softmax the numerical range representable // by the VKQ accumulators is effectively being shifted up by a factor of 8. // This reduces issues with numerical overflow but also causes larger values to be flushed to zero. // However, as the output from FlashAttention will usually be used as an input for a matrix multiplication this should be negligible. #define FATTN_KQ_MAX_OFFSET 0.6931f typedef void (* fattn_kernel_t)( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, const char * __restrict__ sinks, const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, const float max_bias, const float m0, const float m1, const uint32_t n_head_log2, const float logit_softcap, const int32_t ne00, const uint3 ne01, const int32_t ne02, const int32_t ne03, const int32_t nb01, const int32_t nb02, const int32_t nb03, const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, const int32_t nb11, const int32_t nb12, const int64_t nb13, const int32_t nb21, const int32_t nb22, const int64_t nb23, const int32_t ne31, const int32_t ne32, const int32_t ne33, const int32_t nb31, const int32_t nb32, const int64_t nb33); typedef float (*vec_dot_KQ_t)( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds); template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_f16( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds_v) { const half2 * K_h2 = (const half2 *) K_c; GGML_UNUSED(Q_q8); GGML_UNUSED(Q_ds_v); constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += nthreads*cpy_ne) { half2 tmp[cpy_ne]; ggml_cuda_memcpy_1(tmp, K_h2 + k_KQ_0 + (threadIdx.x % nthreads)*cpy_ne); #pragma unroll for (int k_KQ_1 = 0; k_KQ_1 < cpy_ne; ++k_KQ_1) { #ifdef V_DOT2_F32_F16_AVAILABLE ggml_cuda_mad(sum, tmp[k_KQ_1] , ((const half2 *) Q_v)[k_KQ_0/nthreads + k_KQ_1]); #else ggml_cuda_mad(sum, __half22float2(tmp[k_KQ_1]), ((const float2 *) Q_v)[k_KQ_0/nthreads + k_KQ_1]); #endif // V_DOT2_F32_F16_AVAILABLE } } return sum; } template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_q4_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q4_0 * K_q4_0 = (const block_q4_0 *) K_c; GGML_UNUSED(Q_v); float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += nthreads) { const int k_KQ = k_KQ_0 + (nthreads == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads); const int ib = k_KQ / QI8_1; const int iqs4 = k_KQ % QI4_0; const int shift = k_KQ & (QI8_1/2); int v; ggml_cuda_memcpy_1(&v, K_q4_0[ib].qs + sizeof(int)*iqs4); v = (v >> shift) & 0x0F0F0F0F; const int u = Q_q8[k_KQ_0/nthreads]; const int sumi = ggml_cuda_dp4a(v, u, 0); const float2 Q_ds = ((const float2 *) Q_ds_v)[k_KQ_0/nthreads]; sum += __half2float(K_q4_0[ib].d) * (sumi*Q_ds.x - (8/QI8_1)*Q_ds.y); } return sum; } template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_q4_1( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q4_1 * K_q4_1 = (const block_q4_1 *) K_c; GGML_UNUSED(Q_v); float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += nthreads) { const int k_KQ = k_KQ_0 + (nthreads == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads); const int ib = k_KQ / QI8_1; const int iqs4 = k_KQ % QI4_1; const int shift = k_KQ & (QI8_1/2); int v; ggml_cuda_memcpy_1(&v, K_q4_1[ib].qs + sizeof(int)*iqs4); v = (v >> shift) & 0x0F0F0F0F; const int u = Q_q8[k_KQ_0/nthreads]; const int sumi = ggml_cuda_dp4a(v, u, 0); const float2 K_dm = __half22float2(K_q4_1[ib].dm); const float2 Q_ds = ((const float2 *) Q_ds_v)[k_KQ_0/nthreads]; sum += K_dm.x*Q_ds.x*sumi + K_dm.y*Q_ds.y/QI8_1; } return sum; } template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_q5_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q5_0 * K_q5_0 = (const block_q5_0 *) K_c; GGML_UNUSED(Q_v); float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += nthreads) { const int k_KQ = k_KQ_0 + (nthreads == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads); const int ib = k_KQ / QI8_1; const int iqs4 = k_KQ % QI5_0; const int iqs8 = k_KQ % QI8_1; const int shift = k_KQ & (QI8_1/2); int v; ggml_cuda_memcpy_1(&v, K_q5_0[ib].qs + sizeof(int)*iqs4); v = (v >> shift) & 0x0F0F0F0F; { int vh; ggml_cuda_memcpy_1(&vh, K_q5_0[ib].qh); vh >>= iqs8 * QI5_0; v |= (vh << 4) & 0x00000010; // 0 -> 4 v |= (vh << 11) & 0x00001000; // 1 -> 12 v |= (vh << 18) & 0x00100000; // 2 -> 20 v |= (vh << 25) & 0x10000000; // 3 -> 28 } const int u = Q_q8[k_KQ_0/nthreads]; const int sumi = ggml_cuda_dp4a(v, u, 0); const float2 Q_ds = ((const float2 *) Q_ds_v)[k_KQ_0/nthreads]; sum += __half2float(K_q5_0[ib].d) * (sumi*Q_ds.x - (16/QI8_1)*Q_ds.y); } return sum; } template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_q5_1( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q5_1 * K_q5_1 = (const block_q5_1 *) K_c; GGML_UNUSED(Q_v); float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += nthreads) { const int k_KQ = k_KQ_0 + (nthreads == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads); const int ib = k_KQ / QI8_1; const int iqs4 = k_KQ % QI5_1; const int iqs8 = k_KQ % QI8_1; const int shift = k_KQ & (QI8_1/2); int v; ggml_cuda_memcpy_1(&v, K_q5_1[ib].qs + sizeof(int)*iqs4); v = (v >> shift) & 0x0F0F0F0F; { int vh; ggml_cuda_memcpy_1(&vh, K_q5_1[ib].qh); vh >>= iqs8 * QI5_0; v |= (vh << 4) & 0x00000010; // 0 -> 4 v |= (vh << 11) & 0x00001000; // 1 -> 12 v |= (vh << 18) & 0x00100000; // 2 -> 20 v |= (vh << 25) & 0x10000000; // 3 -> 28 } const int u = Q_q8[k_KQ_0/nthreads]; const int sumi = ggml_cuda_dp4a(v, u, 0); const float2 K_dm = __half22float2(K_q5_1[ib].dm); const float2 Q_ds = ((const float2 *) Q_ds_v)[k_KQ_0/nthreads]; sum += K_dm.x*Q_ds.x*sumi + K_dm.y*Q_ds.y/QI8_1; } return sum; } template static __device__ __forceinline__ float vec_dot_fattn_vec_KQ_q8_0( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) { const block_q8_0 * K_q8_0 = (const block_q8_0 *) K_c; GGML_UNUSED(Q_v); float sum = 0.0f; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += nthreads) { const int k_KQ = k_KQ_0 + (nthreads == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads); const int ib = k_KQ / QI8_0; const int iqs = k_KQ % QI8_0; int v; ggml_cuda_memcpy_1(&v, K_q8_0[ib].qs + 4*iqs); const float2 * Q_ds = (const float2 *) Q_ds_v; const float Q_d = Q_ds[k_KQ_0/nthreads].x; sum += vec_dot_q8_0_q8_1_impl(&v, &Q_q8[k_KQ_0/nthreads], K_q8_0[ib].d, Q_d); } return sum; } template static __device__ __forceinline__ void quantize_q8_1_to_shared( const float * __restrict__ x, const float scale, int * __restrict__ yq32, void * __restrict__ yds) { float vals[sizeof(int)] = {0.0f}; #pragma unroll for (int l = 0; l < int(sizeof(int)); ++l) { vals[l] = (ni == WARP_SIZE || threadIdx.x < ni) ? scale * x[4*threadIdx.x + l] : 0.0f; } float amax = fabsf(vals[0]); float sum = vals[0]; #pragma unroll for (int l = 1; l < int(sizeof(int)); ++l) { amax = fmaxf(amax, fabsf(vals[l])); sum += vals[l]; } #pragma unroll for (int mask = QI8_1/2; mask > 0; mask >>= 1) { amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, mask, 32)); sum += __shfl_xor_sync(0xFFFFFFFF, sum, mask, 32); } const float d = amax / 127; int q32 = 0; int8_t * q8 = (int8_t *) &q32; if (d != 0.0f) { #pragma unroll for (int l = 0; l < int(sizeof(int)); ++l) { q8[l] = roundf(vals[l] / d); } } yq32[threadIdx.x] = q32; if (threadIdx.x % QI8_1 == 0 && (ni == WARP_SIZE || threadIdx.x < ni)) { if (std::is_same::value) { ((half2 *) yds)[threadIdx.x/QI8_1] = make_half2(d, sum); } else { ((float2 *) yds)[threadIdx.x/QI8_1] = make_float2(d, sum); } } } typedef void (*dequantize_V_t)(const void *, void *, const int64_t); template static __device__ __forceinline__ void dequantize_V_f16(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { if constexpr (std::is_same_v) { ggml_cuda_memcpy_1(dst, (const half *) vx + i0); } else if constexpr (std::is_same_v) { static_assert(ne % 2 == 0, "bad ne"); half2 tmp[ne/2]; ggml_cuda_memcpy_1(tmp, (const half *) vx + i0); float2 * dst_f2 = (float2 *) dst; #pragma unroll for (int l = 0; l < ne/2; ++l) { dst_f2[l] = __half22float2(tmp[l]); } } else { static_assert(std::is_same_v, "unsupported type"); } } template static __device__ __forceinline__ void dequantize_V_q4_0(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { const block_q4_0 * x = (const block_q4_0 *) vx; const int64_t ib = i0 / QK4_0; const int iqs = i0 % (QK4_0/2); const int shift = (i0 % QK4_0) / (QK4_0/2); int q; static_assert(ne == 2 || ne == 4, "bad ne"); ggml_cuda_memcpy_1(&q, x[ib].qs + iqs); q >>= 4*shift; q &= 0x0F0F0F0F; q = __vsubss4(q, 0x08080808); const int8_t * q8 = (const int8_t *) &q; #ifdef FP16_AVAILABLE if constexpr (std::is_same_v) { const half2 d = __half2half2(x[ib].d); #pragma unroll for (int l0 = 0; l0 < ne; l0 += 2) { ((half2 *) dst)[l0/2] = d * make_half2(q8[l0 + 0], q8[l0 + 1]); } } else #endif // FP16_AVAILABLE if constexpr (std::is_same_v) { const float d = x[ib].d; #pragma unroll for (int l = 0; l < ne; ++l) { ((float *) dst)[l] = d * q8[l]; } } else { static_assert(std::is_same_v, "bad type"); } } template static __device__ __forceinline__ void dequantize_V_q4_1(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { const block_q4_1 * x = (const block_q4_1 *) vx; const int64_t ib = i0 / QK4_1; const int iqs = i0 % (QK4_1/2); const int shift = (i0 % QK4_1) / (QK4_1/2); int q; static_assert(ne == 2 || ne == 4, "bad ne"); ggml_cuda_memcpy_1(&q, x[ib].qs + iqs); q >>= 4*shift; q &= 0x0F0F0F0F; const int8_t * q8 = (const int8_t *) &q; #ifdef FP16_AVAILABLE if constexpr (std::is_same_v) { const half2 dm = x[ib].dm; const half2 d = __half2half2( __low2half(dm)); const half2 m = __half2half2(__high2half(dm)); #pragma unroll for (int l0 = 0; l0 < ne; l0 += 2) { ((half2 *) dst)[l0/2] = d * make_half2(q8[l0 + 0], q8[l0 + 1]) + m; } } else #endif // FP16_AVAILABLE if constexpr (std::is_same_v) { const float2 dm = __half22float2(x[ib].dm); #pragma unroll for (int l = 0; l < ne; ++l) { ((float *) dst)[l] = dm.x * q8[l] + dm.y; } } else { static_assert(std::is_same_v, "bad type"); } } template static __device__ __forceinline__ void dequantize_V_q5_0(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { const block_q5_0 * x = (const block_q5_0 *) vx; const int64_t ib = i0 / QK5_0; const int idq = i0 % QK5_0; const int iqs = i0 % (QK5_0/2); const int shift = (i0 % QK5_0) / (QK5_0/2); int q; static_assert(ne == 2 || ne == 4, "bad ne"); ggml_cuda_memcpy_1(&q, x[ib].qs + iqs); q >>= 4*shift; q &= 0x0F0F0F0F; { int qh; ggml_cuda_memcpy_1(&qh, x[ib].qh); #pragma unroll for (int l = 0; l < ne; ++l) { q |= ((qh >> (idq + l)) & 0x00000001) << (8*l + 4); } } q = __vsubss4(q, 0x10101010); const int8_t * q8 = (const int8_t *) &q; #ifdef FP16_AVAILABLE if constexpr (std::is_same_v) { const half2 d = __half2half2(x[ib].d); #pragma unroll for (int l0 = 0; l0 < ne; l0 += 2) { ((half2 *) dst)[l0/2] = d * make_half2(q8[l0 + 0], q8[l0 + 1]); } } else #endif // FP16_AVAILABLE if constexpr (std::is_same_v) { const float d = x[ib].d; #pragma unroll for (int l = 0; l < ne; ++l) { ((float *) dst)[l] = d * q8[l]; } } else { static_assert(std::is_same_v, "bad type"); } } template static __device__ __forceinline__ void dequantize_V_q5_1(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { const block_q5_1 * x = (const block_q5_1 *) vx; const int64_t ib = i0 / QK5_1; const int idq = i0 % QK5_1; const int iqs = i0 % (QK5_1/2); const int shift = (i0 % QK5_1) / (QK5_1/2); int q; static_assert(ne == 2 || ne == 4, "bad ne"); ggml_cuda_memcpy_1(&q, x[ib].qs + iqs); q >>= 4*shift; q &= 0x0F0F0F0F; { int qh; ggml_cuda_memcpy_1(&qh, x[ib].qh); #pragma unroll for (int l = 0; l < ne; ++l) { q |= ((qh >> (idq + l)) & 0x00000001) << (8*l + 4); } } const int8_t * q8 = (const int8_t *) &q; #ifdef FP16_AVAILABLE if constexpr (std::is_same_v) { const half2 dm = x[ib].dm; const half2 d = __half2half2( __low2half(dm)); const half2 m = __half2half2(__high2half(dm)); #pragma unroll for (int l0 = 0; l0 < ne; l0 += 2) { ((half2 *) dst)[l0/2] = d * make_half2(q8[l0 + 0], q8[l0 + 1]) + m; } } else #endif // FP16_AVAILABLE if constexpr (std::is_same_v) { const float2 dm = __half22float2(x[ib].dm); #pragma unroll for (int l = 0; l < ne; ++l) { ((float *) dst)[l] = dm.x * q8[l] + dm.y; } } else { static_assert(std::is_same_v, "bad type"); } } template static __device__ __forceinline__ void dequantize_V_q8_0(const void * __restrict__ vx, void * __restrict__ dst, const int64_t i0) { const block_q8_0 * x = (const block_q8_0 *) vx; const int64_t ib = i0 / QK8_0; const int iqs = i0 % QK8_0; static_assert(ne % 2 == 0, "bad ne"); int8_t qs[ne]; ggml_cuda_memcpy_1(qs, x[ib].qs + iqs); #ifdef FP16_AVAILABLE if constexpr (std::is_same::value) { const half2 d = __half2half2(x[ib].d); #pragma unroll for (int l0 = 0; l0 < ne; l0 += 2) { ((half2 *) dst)[l0/2] = d * make_half2(qs[l0 + 0], qs[l0 + 1]); } } else #endif // FP16_AVAILABLE if constexpr (std::is_same::value) { const float d = x[ib].d; #pragma unroll for (int l = 0; l < ne; ++l) { ((float *) dst)[l] = d * qs[l]; } } else { static_assert(std::is_same_v, "unsupported type"); } } template constexpr __device__ vec_dot_KQ_t get_vec_dot_KQ() { if constexpr (type_K == GGML_TYPE_F16) { return vec_dot_fattn_vec_KQ_f16; } else if constexpr (type_K == GGML_TYPE_Q4_0) { return vec_dot_fattn_vec_KQ_q4_0; } else if constexpr (type_K == GGML_TYPE_Q4_1) { return vec_dot_fattn_vec_KQ_q4_1; } else if constexpr (type_K == GGML_TYPE_Q5_0) { return vec_dot_fattn_vec_KQ_q5_0; } else if constexpr (type_K == GGML_TYPE_Q5_1) { return vec_dot_fattn_vec_KQ_q5_1; } else if constexpr (type_K == GGML_TYPE_Q8_0) { return vec_dot_fattn_vec_KQ_q8_0; } else { static_assert(type_K == -1, "bad type"); return nullptr; } } template constexpr __device__ dequantize_V_t get_dequantize_V() { if constexpr (type_V == GGML_TYPE_F16) { return dequantize_V_f16; } else if constexpr (type_V == GGML_TYPE_Q4_0) { return dequantize_V_q4_0; } else if constexpr (type_V == GGML_TYPE_Q4_1) { return dequantize_V_q4_1; } else if constexpr (type_V == GGML_TYPE_Q5_0) { return dequantize_V_q5_0; } else if constexpr (type_V == GGML_TYPE_Q5_1) { return dequantize_V_q5_1; } else if constexpr (type_V == GGML_TYPE_Q8_0) { return dequantize_V_q8_0; } else { static_assert(type_V == -1, "bad type"); return nullptr; } } template __launch_bounds__(FATTN_KQ_STRIDE/2, 1) static __global__ void flash_attn_mask_to_KV_max( const half2 * __restrict__ mask, int * __restrict__ KV_max, const int ne30, const int s31, const int s33) { const int ne31 = gridDim.x; const int tid = threadIdx.x; const int sequence = blockIdx.y; const int jt = blockIdx.x; mask += sequence*s33 + jt*ncols1*s31; __shared__ int buf_iw[WARP_SIZE]; if (tid < WARP_SIZE) { buf_iw[tid] = 1; } __syncthreads(); int KV_max_sj = (ne30 - 1) * FATTN_KQ_STRIDE; for (; KV_max_sj >= 0; KV_max_sj -= FATTN_KQ_STRIDE) { int all_inf = 1; #pragma unroll for (int j = 0; j < ncols1; ++j) { const float2 tmp = __half22float2(mask[j*s31 + KV_max_sj/2 + tid]); all_inf = all_inf && int(isinf(tmp.x)) && int(isinf(tmp.y)); } all_inf = warp_reduce_all(all_inf); if (tid % WARP_SIZE == 0) { buf_iw[tid / WARP_SIZE] = all_inf; } __syncthreads(); all_inf = buf_iw[tid % WARP_SIZE]; __syncthreads(); all_inf = warp_reduce_all(all_inf); if (!all_inf) { break; } } // If the break in the loop was not triggered, KV_max_sj is now -FATTN_KQ_STRIDE. // If the break was triggered it's the lower edge of the tile with the first non-masked values. // In either case, walk back the decrementation by FATTN_KQ_STRIDE. KV_max_sj += FATTN_KQ_STRIDE; if (threadIdx.x != 0) { return; } KV_max[sequence*ne31 + jt] = KV_max_sj; } template // D == head size __launch_bounds__(D, 1) static __global__ void flash_attn_stream_k_fixup( float * __restrict__ dst, const float2 * __restrict__ dst_fixup, const int ne01, const int ne02, const int ne03, const int ne11, const int nbatch_fa) { constexpr int ncols = ncols1*ncols2; const int bidx0 = blockIdx.x; const int j = blockIdx.y; const int c = blockIdx.z; const int jc = j*ncols2 + c; const int tid = threadIdx.x; const float * dst_fixup_data = ((const float *) dst_fixup) + gridDim.x*(2*2*ncols); const int iter_k = (ne11 + (nbatch_fa - 1)) / nbatch_fa; const int iter_j = (ne01 + (ncols1 - 1)) / ncols1; const int kbc0 = int64_t(bidx0 + 0)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; const int kbc0_stop = int64_t(bidx0 + 1)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; const bool did_not_have_any_data = kbc0 == kbc0_stop; const bool wrote_beginning_of_tile = kbc0 % iter_k == 0; const bool did_not_write_last = kbc0/iter_k == kbc0_stop/iter_k && kbc0_stop % iter_k != 0; if (did_not_have_any_data || wrote_beginning_of_tile || did_not_write_last) { return; } const int sequence = kbc0 / (iter_k*iter_j*(ne02/ncols2)); const int head = (kbc0 - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); const int jt = (kbc0 - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*head) / iter_k; // j index of current tile. if (jt*ncols1 + j >= ne01) { return; } dst += sequence*ne02*ne01*D + jt*ne02*(ncols1*D) + head*(ncols2*D) + (j*ne02 + c)*D + tid; // Load the partial result that needs a fixup: float dst_val = 0.0f; float max_val = 0.0f; float rowsum = 0.0f; { dst_val = *dst; const float2 tmp = dst_fixup[bidx0*ncols + jc]; max_val = tmp.x; rowsum = tmp.y; } // Iterate over previous blocks and compute the combined results. // All CUDA blocks that get here must have a previous block that needs a fixup. int bidx = bidx0 - 1; int kbc_stop = kbc0; while(true) { const int kbc = int64_t(bidx)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; if (kbc == kbc_stop) { // Did not have any data. bidx--; kbc_stop = kbc; continue; } const float dst_add = dst_fixup_data[bidx*ncols*D + jc*D + tid]; const float2 tmp = dst_fixup[(gridDim.x + bidx)*ncols + jc]; // Scale the current and new value accumulators depending on the max. values. const float max_val_new = fmaxf(max_val, tmp.x); const float diff_val = max_val - max_val_new; const float diff_add = tmp.x - max_val_new; const float scale_val = diff_val >= SOFTMAX_FTZ_THRESHOLD ? expf(diff_val) : 0.0f; const float scale_add = diff_add >= SOFTMAX_FTZ_THRESHOLD ? expf(diff_add) : 0.0f; dst_val = scale_val*dst_val + scale_add*dst_add; rowsum = scale_val*rowsum + scale_add*tmp.y; max_val = max_val_new; // If this block started in a previous tile we are done and don't need to combine additional partial results. if (kbc % iter_k == 0 || kbc/iter_k < kbc0/iter_k) { break; } bidx--; kbc_stop = kbc; } // Write back final result: *dst = dst_val / rowsum; } template // D == head size __launch_bounds__(D, 1) static __global__ void flash_attn_combine_results( const float * __restrict__ VKQ_parts, const float2 * __restrict__ VKQ_meta, float * __restrict__ dst, const int parallel_blocks) { // Dimension 0: threadIdx.x // Dimension 1: blockIdx.x // Dimension 2: blockIdx.y // Dimension 3: blockIdx.z // Memory layout is permuted with [0, 2, 1, 3] const int ne01 = gridDim.x; const int ne02 = gridDim.y; const int col = blockIdx.x; const int head = blockIdx.y; const int sequence = blockIdx.z; const int j_dst_unrolled = (sequence*ne01 + col)*ne02 + head; VKQ_parts += j_dst_unrolled * parallel_blocks*D; VKQ_meta += j_dst_unrolled * parallel_blocks; dst += j_dst_unrolled * D; const int tid = threadIdx.x; __builtin_assume(tid < D); extern __shared__ float2 meta[]; for (int i = tid; i < 2*parallel_blocks; i += D) { ((float *) meta)[i] = ((const float *)VKQ_meta) [i]; } __syncthreads(); float kqmax = meta[0].x; for (int l = 1; l < parallel_blocks; ++l) { kqmax = max(kqmax, meta[l].x); } float VKQ_numerator = 0.0f; float VKQ_denominator = 0.0f; for (int l = 0; l < parallel_blocks; ++l) { const float KQ_max_scale = expf(meta[l].x - kqmax); VKQ_numerator += KQ_max_scale * VKQ_parts[l*D + tid]; VKQ_denominator += KQ_max_scale * meta[l].y; } dst[tid] = VKQ_numerator / VKQ_denominator; } template void launch_fattn( ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, const int nwarps, const size_t nbytes_shared, const int nbatch_fa, const bool need_f16_K, const bool need_f16_V, const bool stream_k, const int warp_size = WARP_SIZE ) { constexpr int ncols = ncols1 * ncols2; const bool is_mla = DV == 512; // TODO better parameterization const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; GGML_ASSERT(V || is_mla); const ggml_tensor * mask = dst->src[3]; const ggml_tensor * sinks = dst->src[4]; ggml_tensor * KQV = dst; GGML_ASSERT(Q->type == GGML_TYPE_F32); GGML_ASSERT(KQV->type == GGML_TYPE_F32); GGML_ASSERT( Q->nb[0] == ggml_element_size(Q)); GGML_ASSERT( K->nb[0] == ggml_element_size(K)); GGML_ASSERT(!V || V->nb[0] == ggml_element_size(V)); GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16); ggml_cuda_pool & pool = ctx.pool(); cudaStream_t main_stream = ctx.stream(); const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const int nsm = ggml_cuda_info().devices[id].nsm; ggml_cuda_pool_alloc K_f16(pool); ggml_cuda_pool_alloc V_f16(pool); ggml_cuda_pool_alloc KV_max(pool); ggml_cuda_pool_alloc dst_tmp(pool); ggml_cuda_pool_alloc dst_tmp_meta(pool); const char * K_data = (const char *) K->data; size_t nb11 = K->nb[1]; size_t nb12 = K->nb[2]; size_t nb13 = K->nb[3]; const char * V_data = V ? (const char *) V->data : nullptr; size_t nb21 = V ? V->nb[1] : nb11; size_t nb22 = V ? V->nb[2] : nb12; size_t nb23 = V ? V->nb[3] : nb13; if (need_f16_K && K->type != GGML_TYPE_F16) { const size_t bs = ggml_blck_size(K->type); const size_t ts = ggml_type_size(K->type); K_f16.alloc(ggml_nelements(K)); if (ggml_is_contiguously_allocated(K)) { to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(K->type); to_fp16(K_data, K_f16.ptr, ggml_nelements(K), main_stream); nb11 = nb11*bs*sizeof(half)/ts; nb12 = nb12*bs*sizeof(half)/ts; nb13 = nb13*bs*sizeof(half)/ts; } else { GGML_ASSERT(K->nb[0] == ts); to_fp16_nc_cuda_t to_fp16 = ggml_get_to_fp16_nc_cuda(K->type); const int64_t s01 = nb11 / ts; const int64_t s02 = nb12 / ts; const int64_t s03 = nb13 / ts; to_fp16(K_data, K_f16.ptr, K->ne[0], K->ne[1], K->ne[2], K->ne[3], s01, s02, s03, main_stream); nb11 = K->ne[0] * sizeof(half); nb12 = K->ne[1] * nb11; nb13 = K->ne[2] * nb12; } K_data = (char *) K_f16.ptr; } if (V && need_f16_V && V->type != GGML_TYPE_F16) { const size_t bs = ggml_blck_size(V->type); const size_t ts = ggml_type_size(V->type); V_f16.alloc(ggml_nelements(V)); if (ggml_is_contiguously_allocated(V)) { to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(V->type); to_fp16(V_data, V_f16.ptr, ggml_nelements(V), main_stream); V_data = (char *) V_f16.ptr; nb21 = nb21*bs*sizeof(half)/ts; nb22 = nb22*bs*sizeof(half)/ts; nb23 = nb23*bs*sizeof(half)/ts; } else { GGML_ASSERT(V->nb[0] == ts); to_fp16_nc_cuda_t to_fp16 = ggml_get_to_fp16_nc_cuda(V->type); const int64_t s01 = nb21 / ts; const int64_t s02 = nb22 / ts; const int64_t s03 = nb23 / ts; to_fp16(V_data, V_f16.ptr, V->ne[0], V->ne[1], V->ne[2], V->ne[3], s01, s02, s03, main_stream); nb21 = V->ne[0] * sizeof(half); nb22 = V->ne[1] * nb21; nb23 = V->ne[2] * nb22; } V_data = (char *) V_f16.ptr; } const int ntiles_x = ((Q->ne[1] + ncols1 - 1) / ncols1); const int ntiles_total = ntiles_x * (Q->ne[2] / ncols2) * Q->ne[3]; // Optional optimization where the mask is scanned to determine whether part of the calculation can be skipped. // Only worth the overhead if there is at lease one FATTN_KQ_STRIDE x FATTN_KQ_STRIDE square to be skipped or // multiple sequences of possibly different lengths. if (mask && K->ne[1] % FATTN_KQ_STRIDE == 0 && (Q->ne[1] >= 1024 || Q->ne[3] > 1)) { const int s31 = mask->nb[1] / sizeof(half2); const int s33 = mask->nb[3] / sizeof(half2); const dim3 blocks_num_KV_max(ntiles_x, Q->ne[3], 1); const dim3 block_dim_KV_max(FATTN_KQ_STRIDE/2, 1, 1); const int ne_KV_max = blocks_num_KV_max.x*blocks_num_KV_max.y; const int iter_k = K->ne[1] / FATTN_KQ_STRIDE; KV_max.alloc(ne_KV_max); flash_attn_mask_to_KV_max<<>> ((const half2 *) mask->data, KV_max.ptr, iter_k, s31, s33); CUDA_CHECK(cudaGetLastError()); } const dim3 block_dim(warp_size, nwarps, 1); int max_blocks_per_sm = 1; // Max. number of active blocks limited by occupancy. CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm, fattn_kernel, block_dim.x * block_dim.y * block_dim.z, nbytes_shared)); GGML_ASSERT(max_blocks_per_sm > 0); int parallel_blocks = max_blocks_per_sm; dim3 blocks_num; if (stream_k) { // For short contexts it can be faster to have the SMs work on whole tiles because this lets us skip the fixup. const int max_blocks = max_blocks_per_sm*nsm; const int tiles_nwaves = (ntiles_total + max_blocks - 1) / max_blocks; const int tiles_efficiency_percent = 100 * ntiles_total / (max_blocks*tiles_nwaves); const int nblocks_stream_k = max_blocks; const bool use_stream_k = cc >= GGML_CUDA_CC_ADA_LOVELACE || tiles_efficiency_percent < 75; blocks_num.x = use_stream_k ? nblocks_stream_k : ntiles_total; blocks_num.y = 1; blocks_num.z = 1; dst_tmp_meta.alloc(blocks_num.x*ncols * (2*2 + DV) * sizeof(float)); } else { const int ntiles_KQ = (K->ne[1] + nbatch_fa - 1) / nbatch_fa; // Max. number of parallel blocks limited by tensor size. // parallel_blocks must not be larger than what the tensor size allows: parallel_blocks = std::min(parallel_blocks, ntiles_KQ); // If ntiles_total % blocks_per_wave != 0 then some efficiency is lost due to tail effects. // Test whether parallel_blocks can be set to a higher value for better efficiency. const int blocks_per_wave = nsm * max_blocks_per_sm; int nwaves_best = 0; int efficiency_percent_best = 0; for (int parallel_blocks_test = parallel_blocks; parallel_blocks_test <= ntiles_KQ; ++parallel_blocks_test) { const int nblocks_total = ntiles_total * parallel_blocks_test; const int nwaves = (nblocks_total + blocks_per_wave - 1) / blocks_per_wave; const int efficiency_percent = 100 * nblocks_total / (nwaves*blocks_per_wave); // Stop trying configurations with more waves if we already have good efficiency to avoid excessive overhead. if (efficiency_percent_best >= 95 && nwaves > nwaves_best) { break; } if (efficiency_percent > efficiency_percent_best) { nwaves_best = nwaves; efficiency_percent_best = efficiency_percent; parallel_blocks = parallel_blocks_test; } } blocks_num.x = ntiles_x; blocks_num.y = parallel_blocks; blocks_num.z = (Q->ne[2]/ncols2)*Q->ne[3]; if (parallel_blocks > 1) { dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); } } float scale = 1.0f; float max_bias = 0.0f; float logit_softcap = 0.0f; memcpy(&scale, (const float *) KQV->op_params + 0, sizeof(float)); memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (logit_softcap != 0.0f) { scale /= logit_softcap; } const uint32_t n_head = Q->ne[2]; const uint32_t n_head_log2 = 1u << uint32_t(floorf(log2f(float(n_head)))); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); // TODO other tensor dimensions after removal of WMMA kernel: const uint3 ne01 = init_fastdiv_values(Q->ne[1]); GGML_ASSERT(block_dim.x % warp_size == 0); fattn_kernel<<>>( (const char *) Q->data, K_data, V_data, mask ? ((const char *) mask->data) : nullptr, sinks ? ((const char *) sinks->data) : nullptr, KV_max.ptr, !stream_k && parallel_blocks > 1 ? dst_tmp.ptr : (float *) KQV->data, dst_tmp_meta.ptr, scale, max_bias, m0, m1, n_head_log2, logit_softcap, Q->ne[0], ne01, Q->ne[2], Q->ne[3], Q->nb[1], Q->nb[2], Q->nb[3], K->ne[0], K->ne[1], K->ne[2], K->ne[3], nb11, nb12, nb13, nb21, nb22, nb23, mask ? mask->ne[1] : 0, mask ? mask->ne[2] : 0, mask ? mask->ne[3] : 0, mask ? mask->nb[1] : 0, mask ? mask->nb[2] : 0, mask ? mask->nb[3] : 0 ); CUDA_CHECK(cudaGetLastError()); if (stream_k) { if (ntiles_total % blocks_num.x != 0) { // Fixup is only needed if the SMs work on fractional tiles. const dim3 block_dim_combine(DV, 1, 1); const dim3 blocks_num_combine = {blocks_num.x, ncols1, ncols2}; flash_attn_stream_k_fixup <<>> ((float *) KQV->data, dst_tmp_meta.ptr, Q->ne[1], Q->ne[2], Q->ne[3], K->ne[1], nbatch_fa); } } else if (parallel_blocks > 1) { const dim3 block_dim_combine(DV, 1, 1); const dim3 blocks_num_combine(Q->ne[1], Q->ne[2], Q->ne[3]); const size_t nbytes_shared_combine = parallel_blocks*sizeof(float2); flash_attn_combine_results <<>> (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data, parallel_blocks); } CUDA_CHECK(cudaGetLastError()); } ggml-org-ggml-3678254/src/ggml-cuda/fattn-mma-f16.cuh000066400000000000000000002245161512524704700220040ustar00rootroot00000000000000#include "common.cuh" #include "cp-async.cuh" #include "mma.cuh" #include "fattn-common.cuh" using namespace ggml_cuda_mma; // Config options for the MMA kernel. // Should not affect results, only speed/register pressure/shared memory use. struct fattn_mma_config { int nthreads; // Number of threads per CUDA block. int occupancy; // Targeted occupancy for the MMA kernel. int nbatch_fa; // Number of KV rows per softmax rescaling of KQ rowsums and VKQ accumulators. int nbatch_K2; // Number of K half2 values in direction of DKQ to load in parallel. int nbatch_V2; // Number of V half2 values in direction of DV to load in parallel. int nbatch_combine; // Number of VKQ half2 values in direction of DV to combine in parallel. int nstages_target; // Number of pipeline stages to use ideally, 1 == always load data synchronously, 2 == preload data if there is hardware support. bool Q_in_reg; // Whether the Q values should be kept permanently in registers. constexpr __host__ __device__ fattn_mma_config( int nthreads, int occupancy, int nbatch_fa, int nbatch_K2, int nbatch_V2, int nbatch_combine, int nstages_target, bool Q_in_reg) : nthreads(nthreads), occupancy(occupancy), nbatch_fa(nbatch_fa), nbatch_K2(nbatch_K2), nbatch_V2(nbatch_V2), nbatch_combine(nbatch_combine), nstages_target(nstages_target), Q_in_reg(Q_in_reg) {} }; #define GGML_CUDA_FATTN_MMA_CONFIG_CASE(DKQ_, DV_, ncols_, nthreads_, occupancy_, nbatch_fa_, nbatch_K2_, nbatch_V2_, nbatch_combine_, nstages_target_, Q_in_reg_) \ if (DKQ == (DKQ_) && DV == (DV_) && ncols == (ncols_)) { \ static_assert((nthreads_) % 32 == 0 && (nthreads_) <= 512, "bad nthreads"); \ static_assert( (occupancy_) <= 8, "bad occupancy"); \ static_assert((nbatch_fa_) % 32 == 0 && (nbatch_fa_) <= 256, "bad nbatch_fa"); \ static_assert((nbatch_K2_) % 4 == 0 && (nbatch_K2_) <= 512, "bad nbatch_K2"); \ static_assert((nbatch_V2_) % 4 == 0 && (nbatch_V2_) <= 256, "bad nbatch_V2"); \ static_assert((nbatch_combine_) % 4 == 0 && (nbatch_combine_) <= 128, "bad nbatch_combine"); \ static_assert((nstages_target_) >= 1 && (nstages_target_) <= 2, "bad nstages_target"); \ return fattn_mma_config{(nthreads_), (occupancy_), (nbatch_fa_), (nbatch_K2_), (nbatch_V2_), (nbatch_combine_), (nstages_target_), (Q_in_reg_)}; \ } \ static constexpr __host__ __device__ fattn_mma_config ggml_cuda_fattn_mma_get_config_ampere(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_MMA_CONFIG_CASE( 64, 64, 8, 128, 2, 128, 32, 32, 32, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 64, 64, 16, 128, 2, 64, 32, 32, 32, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 64, 64, 32, 128, 2, 64, 32, 32, 32, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 64, 64, 64, 128, 2, 64, 32, 32, 32, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 80, 80, 8, 128, 2, 128, 40, 40, 40, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 80, 80, 16, 128, 2, 64, 40, 40, 40, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 80, 80, 32, 128, 2, 64, 40, 40, 40, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 80, 80, 64, 128, 2, 64, 40, 40, 40, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 96, 96, 8, 128, 2, 128, 48, 48, 48, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 96, 96, 16, 128, 2, 64, 48, 48, 48, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 96, 96, 32, 128, 2, 64, 48, 48, 48, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE( 96, 96, 64, 128, 2, 64, 48, 48, 48, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(112, 112, 8, 128, 2, 128, 56, 56, 56, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(112, 112, 16, 128, 2, 64, 56, 56, 56, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(112, 112, 32, 128, 2, 64, 56, 56, 56, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(112, 112, 64, 128, 2, 64, 56, 56, 56, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(128, 128, 8, 128, 2, 128, 64, 64, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(128, 128, 16, 128, 2, 64, 64, 64, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(128, 128, 32, 128, 2, 64, 64, 64, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(128, 128, 64, 128, 2, 64, 64, 64, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 8, 64, 4, 64, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 16, 64, 4, 32, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 32, 128, 2, 32, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 64, 128, 2, 32, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 8, 64, 4, 32, 288, 256, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 16, 64, 4, 32, 288, 256, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 32, 128, 2, 32, 160, 128, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 64, 256, 1, 32, 160, 128, 128, 1, false); return fattn_mma_config(32, 1, 0, 0, 0, 0, 0, false); } static constexpr __host__ __device__ fattn_mma_config ggml_cuda_fattn_mma_get_config_turing(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 8, 128, 2, 64, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 16, 128, 2, 64, 128, 128, 128, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 32, 128, 2, 64, 128, 128, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(256, 256, 64, 128, 2, 64, 128, 128, 64, 2, true); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 8, 64, 4, 32, 96, 64, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 16, 64, 4, 32, 96, 64, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 32, 128, 2, 32, 160, 128, 128, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 64, 256, 1, 32, 160, 128, 128, 1, false); return ggml_cuda_fattn_mma_get_config_ampere(DKQ, DV, ncols); } static constexpr __host__ __device__ fattn_mma_config ggml_cuda_fattn_mma_get_config_volta(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 8, 64, 4, 32, 288, 256, 64, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 16, 64, 4, 32, 288, 256, 64, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 32, 128, 2, 32, 160, 128, 64, 1, false); GGML_CUDA_FATTN_MMA_CONFIG_CASE(576, 512, 64, 256, 1, 32, 160, 128, 64, 1, false); // TODO tune specifically for Volta return ggml_cuda_fattn_mma_get_config_ampere(DKQ, DV, ncols); } static __host__ fattn_mma_config ggml_cuda_fattn_mma_get_config(const int DKQ, const int DV, const int ncols, const int cc) { if (ampere_mma_available(cc)) { return ggml_cuda_fattn_mma_get_config_ampere(DKQ, DV, ncols); } if (turing_mma_available(cc)) { return ggml_cuda_fattn_mma_get_config_turing(DKQ, DV, ncols); } GGML_ASSERT(volta_mma_available(cc)); return ggml_cuda_fattn_mma_get_config_volta(DKQ, DV, ncols); } static constexpr __device__ fattn_mma_config ggml_cuda_fattn_mma_get_config(const int DKQ, const int DV, const int ncols) { #if defined(AMPERE_MMA_AVAILABLE) return ggml_cuda_fattn_mma_get_config_ampere(DKQ, DV, ncols); #elif defined(TURING_MMA_AVAILABLE) return ggml_cuda_fattn_mma_get_config_turing(DKQ, DV, ncols); #elif defined(VOLTA_MMA_AVAILABLE) return ggml_cuda_fattn_mma_get_config_volta(DKQ, DV, ncols); #else GGML_UNUSED_VARS(DKQ, DV, ncols); return fattn_mma_config(32, 1, 0, 0, 0, 0, 0, false); #endif // defined(AMPERE_MMA_AVAILABLE) } static __host__ int ggml_cuda_fattn_mma_get_nthreads(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nthreads; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nthreads(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nthreads; } static __host__ int ggml_cuda_fattn_mma_get_occupancy(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).occupancy; } static constexpr __device__ int ggml_cuda_fattn_mma_get_occupancy(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).occupancy; } static __host__ int ggml_cuda_fattn_mma_get_nbatch_fa(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nbatch_fa; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nbatch_fa(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nbatch_fa; } static __host__ int ggml_cuda_fattn_mma_get_nbatch_K2(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nbatch_K2; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nbatch_K2(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nbatch_K2; } static __host__ int ggml_cuda_fattn_mma_get_nbatch_V2(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nbatch_V2; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nbatch_V2(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nbatch_V2; } static __host__ int ggml_cuda_fattn_mma_get_nbatch_combine(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nbatch_combine; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nbatch_combine(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nbatch_combine; } static __host__ int ggml_cuda_fattn_mma_get_nstages_target(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).nstages_target; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nstages_target(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).nstages_target; } static __host__ bool ggml_cuda_fattn_mma_get_Q_in_reg(const int DKQ, const int DV, const int ncols, const int cc) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols, cc).Q_in_reg; } static constexpr __device__ bool ggml_cuda_fattn_mma_get_Q_in_reg(const int DKQ, const int DV, const int ncols) { return ggml_cuda_fattn_mma_get_config(DKQ, DV, ncols).Q_in_reg; } // ------------------------------------------------------------------------------------------------------------------ static __host__ int ggml_cuda_fattn_mma_get_nstages(const int DKQ, const int DV, const int ncols1, const int ncols2, const int cc) { return cp_async_available(cc) && ncols2 >= 2 ? ggml_cuda_fattn_mma_get_nstages_target(DKQ, DV, ncols1*ncols2, cc) : 0; } static constexpr __device__ int ggml_cuda_fattn_mma_get_nstages(const int DKQ, const int DV, const int ncols1, const int ncols2) { #ifdef CP_ASYNC_AVAILABLE return ncols2 >= 2 ? ggml_cuda_fattn_mma_get_nstages_target(DKQ, DV, ncols1*ncols2) : 0; #else GGML_UNUSED_VARS(DKQ, DV, ncols1, ncols2); return 0; #endif // CP_ASYNC_AVAILABLE } // ------------------------------------------------------------------------------------------------------------------ template static __device__ __forceinline__ void flash_attn_ext_f16_load_tile( const half2 * const __restrict__ KV, half2 * const __restrict__ tile_KV, const int D2, const int stride_KV, const int i_sup) { // K/V data is loaded with decreasing granularity for D for better memory bandwidth. // The minimum granularity with cp.async is 16 bytes, with synchronous data loading it's 4 bytes. if constexpr (use_cp_async) { static_assert(!oob_check, "OOB check not compatible with cp_async"); constexpr int preload = 64; constexpr int h2_per_chunk = 16/sizeof(half2); const int chunks_per_row = D2 / h2_per_chunk; const unsigned int tile_KV_32 = ggml_cuda_cvta_generic_to_shared(tile_KV); auto load = [&] __device__ (auto n) { const int stride_k = WARP_SIZE >> n; const int k0_start = stride_k == WARP_SIZE ? 0 : chunks_per_row - chunks_per_row % (2*stride_k); const int k0_stop = chunks_per_row - chunks_per_row % (1*stride_k); const int stride_i = WARP_SIZE / stride_k; if (k0_start == k0_stop) { return; } #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += nwarps*stride_i) { const int i = i0 + threadIdx.y*stride_i + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); if (i0 + nwarps*stride_i > nbatch_fa && i >= nbatch_fa) { break; } #pragma unroll for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); cp_async_cg_16(tile_KV_32 + i*(stride_tile*sizeof(half2)) + k*16, KV + i*stride_KV + k*h2_per_chunk); } } }; // 1: max 32*16=512 bytes, 256 half // 2: max 16*16=256 bytes, 128 half // 3: max 8*16=128 bytes, 64 half // 4: max 4*16= 64 bytes, 32 half // 5: max 2*16= 32 bytes, 16 half // 6: max 1*16= 16 bytes, 8 half ggml_cuda_unroll<6>{}(load); } else { // TODO use ggml_cuda_memcpy_1 auto load = [&] __device__ (const int n) { const int stride_k = WARP_SIZE >> n; const int k0_start = stride_k == WARP_SIZE ? 0 : D2 - D2 % (2*stride_k); const int k0_stop = D2 - D2 % (1*stride_k); const int stride_i = WARP_SIZE / stride_k; if (k0_start == k0_stop) { return; } #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += nwarps*stride_i) { const int i = i0 + threadIdx.y*stride_i + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); if (i0 + nwarps*stride_i > nbatch_fa && i >= nbatch_fa) { break; } #pragma unroll for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); tile_KV[i*stride_tile + k] = !oob_check || i < i_sup ? KV[i*stride_KV + k] : make_half2(0.0f, 0.0f); } } }; // 1: max 32* 4=128 bytes, 64 half // 2: max 16* 4= 64 bytes, 32 half // 3: max 8* 4= 32 bytes, 16 half // 4: max 4* 4= 16 bytes, 8 half ggml_cuda_unroll<4>{}(load); } } template static __device__ __forceinline__ void flash_attn_ext_f16_load_mask( const half * const __restrict__ mask_h, half * const __restrict__ tile_mask, const int stride_mask, const int i_sup, const int j0, const uint3 ne01) { if constexpr (use_cp_async) { static_assert(nbatch_fa <= 8*WARP_SIZE && nbatch_fa % 8 == 0, "bad nbatch_fa"); static_assert(!oob_check, "OOB check incompatible with cp_async"); constexpr int preload = nbatch_fa >= 32 ? nbatch_fa * sizeof(half) : 64; constexpr int cols_per_warp = 8*WARP_SIZE/nbatch_fa; constexpr int stride_j = nwarps * cols_per_warp; const unsigned int tile_mask_32 = ggml_cuda_cvta_generic_to_shared(tile_mask); #pragma unroll for (int j1 = 0; j1 < ncols1; j1 += stride_j) { const int j_sram = j1 + threadIdx.y*cols_per_warp + threadIdx.x / (WARP_SIZE/cols_per_warp); const int j_vram = fastmodulo(j0 + j_sram, ne01); if (j1 + stride_j > ncols1 && j_sram >= ncols1) { break; } const int i = 8 * (threadIdx.x % (nbatch_fa/8)); cp_async_cg_16(tile_mask_32 + j_sram*(nbatch_fa*sizeof(half) + 16) + i*sizeof(half), mask_h + j_vram*stride_mask + i); } } else if constexpr (oob_check) { #pragma unroll for (int j1 = 0; j1 < ncols1; j1 += nwarps) { const int j_sram = j1 + threadIdx.y; const int j_vram = fastmodulo(j0 + j_sram, ne01); if (j1 + nwarps > ncols1 && j_sram >= ncols1) { break; } #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += WARP_SIZE) { const int i = i0 + threadIdx.x; tile_mask[j_sram*(nbatch_fa + 8) + i] = i < i_sup ? mask_h[j_vram*stride_mask + i] : half(0.0f); } } } else if constexpr (nbatch_fa < 2*WARP_SIZE) { constexpr int cols_per_warp = 2*WARP_SIZE/nbatch_fa; constexpr int stride_j = nwarps * cols_per_warp; #pragma unroll for (int j1 = 0; j1 < ncols1; j1 += stride_j) { const int j_sram = j1 + threadIdx.y*cols_per_warp + threadIdx.x / (WARP_SIZE/cols_per_warp); const int j_vram = fastmodulo(j0 + j_sram, ne01); if (j1 + stride_j > ncols1 && j_sram >= ncols1) { break; } const int i = threadIdx.x % (WARP_SIZE/cols_per_warp); ggml_cuda_memcpy_1(tile_mask + j_sram*(nbatch_fa + 8) + 2*i, mask_h + j_vram*stride_mask + 2*i); } } else { #pragma unroll for (int j1 = 0; j1 < ncols1; j1 += nwarps) { const int j_sram = j1 + threadIdx.y; const int j_vram = fastmodulo(j0 + j_sram, ne01); if (j1 + nwarps > ncols1 && j_sram >= ncols1) { break; } #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += 2*WARP_SIZE) { const int i = i0 + 2*threadIdx.x; ggml_cuda_memcpy_1(tile_mask + j_sram*(nbatch_fa + 8) + i, mask_h + j_vram*stride_mask + i); } } } } template static __device__ __forceinline__ void flash_attn_ext_f16_iter( const float2 * const __restrict__ Q_f2, const half2 * const __restrict__ K_h2, const half2 * const __restrict__ V_h2, const half * const __restrict__ mask_h, float2 * const __restrict__ dstk, float2 * const __restrict__ dstk_fixup, const float scale, const float slope, const float logit_softcap, const uint3 ne01, const int ne02, const int stride_K, const int stride_V, const int stride_mask, half2 * const __restrict__ tile_Q, half2 * const __restrict__ tile_K, half2 * const __restrict__ tile_V, half * const __restrict__ tile_mask, T_B_KQ * const __restrict__ Q_B, T_C_VKQ * const __restrict__ VKQ_C, float * const __restrict__ KQ_max, float * const __restrict__ KQ_rowsum, const int jt, const int kb0, const int k_VKQ_sup) { #if defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) constexpr int ncols = ncols1 * ncols2; constexpr int cols_per_warp = T_B_KQ::I; constexpr int cols_per_thread = 2; // This is specifically KQ columns, Volta only has a single VKQ column. constexpr int np = nwarps * (cols_per_warp/ncols2) / ncols1; // Number of parallel CUDA warps per Q column. constexpr int nbatch_fa = ggml_cuda_fattn_mma_get_nbatch_fa(DKQ, DV, ncols); constexpr int nbatch_K2 = ggml_cuda_fattn_mma_get_nbatch_K2(DKQ, DV, ncols); constexpr int nbatch_V2 = ggml_cuda_fattn_mma_get_nbatch_V2(DKQ, DV, ncols); constexpr bool Q_in_reg = ggml_cuda_fattn_mma_get_Q_in_reg (DKQ, DV, ncols); constexpr int nstages = ggml_cuda_fattn_mma_get_nstages (DKQ, DV, ncols1, ncols2); constexpr int stride_tile_Q = DKQ/2 + 4; constexpr int stride_tile_K = nbatch_K2 + 4; static_assert(!mla || nbatch_K2 >= nbatch_V2, "bad nbatch_K2, nbatch_V2 for MLA"); constexpr int stride_tile_V = mla ? stride_tile_K : nbatch_V2 + 4; const int k_VKQ_0 = kb0 * nbatch_fa; #if defined(TURING_MMA_AVAILABLE) T_C_KQ KQ_C[nbatch_fa/(np*(cols_per_warp == 8 ? T_C_KQ::I : T_C_KQ::J))]; #else // Volta T_C_KQ KQ_C[nbatch_fa/(np*T_C_KQ::J)]; #endif // defined(TURING_MMA_AVAILABLE) if constexpr (nstages > 1) { static_assert(!oob_check, "OOB check incompatible with multi-stage pipeline"); static_assert(!mla, "multi-stage loading not implemented for MLA"); static_assert(nbatch_K2 == DKQ/2, "batching not implemented for multi stage loading"); constexpr bool use_cp_async = true; cp_async_wait_all(); __syncthreads(); flash_attn_ext_f16_load_tile (V_h2 + int64_t(k_VKQ_0)*stride_V, tile_V, nbatch_V2, stride_V, k_VKQ_sup); } else { constexpr bool use_cp_async = nstages == 1; if (ncols2 > 1 || mask_h) { flash_attn_ext_f16_load_mask (mask_h + k_VKQ_0, tile_mask, stride_mask, k_VKQ_sup, jt*ncols1, ne01); } } #pragma unroll for (int k0_start = 0; k0_start < DKQ/2; k0_start += nbatch_K2) { const int k0_stop = k0_start + nbatch_K2 < DKQ/2 ? k0_start + nbatch_K2 : DKQ/2; const int k0_diff = k0_stop - k0_start; if constexpr (nstages <= 1) { constexpr bool use_cp_async = nstages == 1; flash_attn_ext_f16_load_tile (K_h2 + int64_t(k_VKQ_0)*stride_K + k0_start, tile_K, k0_diff, stride_K, k_VKQ_sup); if (use_cp_async) { cp_async_wait_all(); } __syncthreads(); } // Calculate tile of KQ: if constexpr (Q_in_reg) { #pragma unroll for (int i_KQ_00 = 0; i_KQ_00 < nbatch_fa; i_KQ_00 += np*T_A_KQ::I) { const int i_KQ_0 = i_KQ_00 + (threadIdx.y % np)*T_A_KQ::I; #pragma unroll for (int k_KQ_0 = k0_start; k_KQ_0 < k0_stop; k_KQ_0 += T_A_KQ::J) { T_A_KQ K_A; load_ldmatrix(K_A, tile_K + i_KQ_0*stride_tile_K + (k_KQ_0 - k0_start), stride_tile_K); if constexpr (cols_per_warp == 8) { mma(KQ_C[i_KQ_00/(np*T_A_KQ::I)], K_A, Q_B[k_KQ_0/T_A_KQ::J]); } else { // Wide version of KQ_C is column-major => swap A and B. mma(KQ_C[i_KQ_00/(np*T_A_KQ::I)], Q_B[k_KQ_0/T_A_KQ::J], K_A); } } } } else { static_assert(cols_per_warp != 8, "cols_per_warp == 8 not implemented"); #pragma unroll for (int k_KQ_0 = k0_start; k_KQ_0 < k0_stop; k_KQ_0 += T_A_KQ::J) { load_ldmatrix(Q_B[0], tile_Q + (threadIdx.y / np)*(T_B_KQ::I*stride_tile_Q) + k_KQ_0, stride_tile_Q); #pragma unroll for (int i_KQ_00 = 0; i_KQ_00 < nbatch_fa; i_KQ_00 += np*T_A_KQ::I) { const int i_KQ_0 = i_KQ_00 + (threadIdx.y % np)*T_A_KQ::I; T_A_KQ K_A; load_ldmatrix(K_A, tile_K + i_KQ_0*stride_tile_K + (k_KQ_0 - k0_start), stride_tile_K); // Wide version of KQ_C is column-major => swap A and B. mma(KQ_C[i_KQ_00/(np*T_A_KQ::I)], Q_B[0], K_A); } } } if constexpr (nstages <= 1) { __syncthreads(); // Only needed if tile_K == tile_V. } } if (use_logit_softcap) { constexpr int stride = cols_per_warp == 8 ? np*T_C_KQ::I : np*T_C_KQ::J; static_assert(nbatch_fa % stride == 0, "bad loop size"); #pragma unroll for (int i = 0; i < nbatch_fa/stride; ++i) { #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { KQ_C[i].x[l] = logit_softcap*tanhf(KQ_C[i].x[l]); } } } float KQ_max_new[cols_per_thread]; #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { KQ_max_new[col] = KQ_max[col]; } float KQ_rowsum_add[cols_per_thread] = {0.0f}; if constexpr (cols_per_warp == 8) { if (ncols2 > 1 || mask_h) { #pragma unroll for (int i00 = 0; i00 < nbatch_fa; i00 += np*T_C_KQ::I) { const int i0 = i00 + (threadIdx.y % np)*T_C_KQ::I; #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { const int i = i0 + T_C_KQ::get_i(l); const int j = ((threadIdx.y / np)*T_C_KQ::J + T_C_KQ::get_j(l)) / ncols2; KQ_C[i00/(np*T_C_KQ::I)].x[l] += slope * __half2float(tile_mask[j*(nbatch_fa + 8) + i]); } } } // Calculate softmax for each KQ column using the current max. value. // The divisor is stored in KQ_rowsum and will be applied at the end. static_assert(nbatch_fa % (np*T_C_KQ::I) == 0, "bad loop size"); #pragma unroll for (int k0 = 0; k0 < nbatch_fa; k0 += np*T_C_KQ::I) { #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { if (!oob_check || k0 + (threadIdx.y % np)*T_C_KQ::I + T_C_KQ::get_i(l) < k_VKQ_sup) { KQ_max_new[l % 2] = fmaxf(KQ_max_new[l % 2], KQ_C[k0/(np*T_C_KQ::I)].x[l] + FATTN_KQ_MAX_OFFSET); } } } // Values per KQ column are spread across 8 threads: #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { #pragma unroll for (int offset = 16; offset >= 4; offset >>= 1) { KQ_max_new[col] = fmaxf(KQ_max_new[col], __shfl_xor_sync(0xFFFFFFFF, KQ_max_new[col], offset, WARP_SIZE)); } } static_assert(nbatch_fa % (np*T_C_KQ::I) == 0, "bad loop size"); #pragma unroll for (int k0 = 0; k0 < nbatch_fa; k0 += np*T_C_KQ::I) { #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { if (!oob_check || k0 + (threadIdx.y % np)*T_C_KQ::I + T_C_KQ::get_i(l) < k_VKQ_sup) { KQ_C[k0/(np*T_C_KQ::I)].x[l] = expf(KQ_C[k0/(np*T_C_KQ::I)].x[l] - KQ_max_new[l % 2]); KQ_rowsum_add[l % 2] += KQ_C[k0/(np*T_C_KQ::I)].x[l]; } else { KQ_C[k0/(np*T_C_KQ::I)].x[l] = 0.0f; } } } } else { // not Turing mma or T_B_KQ::I > 8 if (ncols2 > 1 || mask_h) { #pragma unroll for (int i00 = 0; i00 < nbatch_fa; i00 += np*T_C_KQ::J) { const int i0 = i00 + (threadIdx.y % np)*T_C_KQ::J; #pragma unroll for (int l0 = 0; l0 < T_C_KQ::ne; l0 += 2) { const int i = (i0 + T_C_KQ::get_j(l0)) / 2; const int j = ((threadIdx.y / np)*cols_per_warp + T_C_KQ::get_i(l0)) / ncols2; const float2 tmp = __half22float2(((const half2 *)tile_mask)[j*(nbatch_fa/2 + 4) + i]); KQ_C[i00/(np*T_C_KQ::J)].x[l0 + 0] += slope*tmp.x; KQ_C[i00/(np*T_C_KQ::J)].x[l0 + 1] += slope*tmp.y; } } } // Calculate softmax for each KQ column using the current max. value. // The divisor is stored in KQ_rowsum and will be applied at the end. static_assert(nbatch_fa % (np*T_C_KQ::J) == 0, "bad loop size"); #pragma unroll for (int k0 = 0; k0 < nbatch_fa; k0 += np*T_C_KQ::J) { #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { if (!oob_check || k0 + (threadIdx.y % np)*T_C_KQ::J + T_C_KQ::get_j(l) < k_VKQ_sup) { // Turing + Volta: KQ_max_new[(l/2) % 2] = fmaxf(KQ_max_new[(l/2) % 2], KQ_C[(k0/(np*T_C_KQ::J))].x[l] + FATTN_KQ_MAX_OFFSET); } } } #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { #if defined(TURING_MMA_AVAILABLE) // Values per KQ column are spread across 4 threads: constexpr int offset_first = 2; constexpr int offset_last = 1; #else // Values per KQ column are spread across 2 threads: constexpr int offset_first = 2; constexpr int offset_last = 2; #endif // defined(TURING_MMA_AVAILABLE) #pragma unroll for (int offset = offset_first; offset >= offset_last; offset >>= 1) { KQ_max_new[col] = fmaxf(KQ_max_new[col], __shfl_xor_sync(0xFFFFFFFF, KQ_max_new[col], offset, WARP_SIZE)); } } static_assert(nbatch_fa % (np*T_C_KQ::J) == 0, "bad loop size"); #pragma unroll for (int k0 = 0; k0 < nbatch_fa; k0 += np*T_C_KQ::J) { #pragma unroll for (int l = 0; l < T_C_KQ::ne; ++l) { // Turing + Volta: if (!oob_check || k0 + (threadIdx.y % np)*T_C_KQ::J + T_C_KQ::get_j(l) < k_VKQ_sup) { KQ_C[(k0/(np*T_C_KQ::J))].x[l] = expf(KQ_C[(k0/(np*T_C_KQ::J))].x[l] - KQ_max_new[(l/2) % 2]); KQ_rowsum_add[(l/2) % 2] += KQ_C[(k0/(np*T_C_KQ::J))].x[l]; } else { KQ_C[(k0/(np*T_C_KQ::J))].x[l] = 0.0f; } } } } { float KQ_max_scale[cols_per_thread]; #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { const float KQ_max_diff = KQ_max[col] - KQ_max_new[col]; KQ_max_scale[col] = expf(KQ_max_diff); KQ_max[col] = KQ_max_new[col]; *((uint32_t *) &KQ_max_scale[col]) *= KQ_max_diff >= SOFTMAX_FTZ_THRESHOLD; // Scale previous KQ_rowsum to account for a potential increase in KQ_max: KQ_rowsum[col] = KQ_max_scale[col]*KQ_rowsum[col] + KQ_rowsum_add[col]; } #if defined(TURING_MMA_AVAILABLE) if constexpr (cols_per_warp == 8) { const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[0], KQ_max_scale[1]); #pragma unroll for (int i = 0; i < DV/T_C_VKQ::I; ++i) { #pragma unroll for (int l = 0; l < T_C_VKQ::ne; ++l) { VKQ_C[i].x[l] *= KQ_max_scale_h2; } } } else { #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[col], KQ_max_scale[col]); #pragma unroll for (int i = 0; i < (DV/2)/T_C_VKQ::J; ++i) { #pragma unroll for (int l0 = 0; l0 < T_C_VKQ::ne; l0 += 2) { VKQ_C[i].x[l0 + col] *= KQ_max_scale_h2; } } } } #else // Volta const half2 KQ_max_scale_h2 = make_half2( KQ_max_scale[(threadIdx.x / 2) % 2], KQ_max_scale[(threadIdx.x / 2) % 2]); #pragma unroll for (int i = 0; i < (DV/2)/T_C_VKQ::J; ++i) { #pragma unroll for (int l = 0; l < T_C_VKQ::ne; ++l) { VKQ_C[i].x[l] *= KQ_max_scale_h2; } } #endif // defined(TURING_MMA_AVAILABLE) } // Convert KQ C tiles into B tiles for VKQ calculation: T_B_VKQ B[nbatch_fa/(np*2*T_B_VKQ::J)]; static_assert(nbatch_fa % (np*2*T_B_VKQ::J) == 0, "bad loop size"); if constexpr (cols_per_warp == 8) { #pragma unroll for (int k = 0; k < nbatch_fa/(np*2*T_B_VKQ::J); ++k) { B[k] = get_transposed(get_half2(KQ_C[k])); } } else { for (int k = 0; k < nbatch_fa/(np*2*T_B_VKQ::J); ++k) { B[k] = get_half2(KQ_C[k]); } } if constexpr (nstages > 1) { // Preload K tile for next iteration: constexpr bool use_cp_async = true; cp_async_wait_all(); __syncthreads(); if (!last_iter) { if (ncols2 > 1 || mask_h) { flash_attn_ext_f16_load_mask (mask_h + k_VKQ_0 + nbatch_fa, tile_mask, stride_mask, k_VKQ_sup, jt*ncols1, ne01); } flash_attn_ext_f16_load_tile (K_h2 + int64_t(k_VKQ_0 + nbatch_fa)*stride_K, tile_K, nbatch_K2, stride_K, k_VKQ_sup); } } // For MLA K and V have the same data. // Therefore, iterate over V in reverse and re-use the data if possible. static_assert(!mla || nstages <= 1, "combination of MLA and multi-stage loading not implemented"); constexpr int reusable_cutoff = mla ? (DKQ - 1) - (DKQ - 1) % (2*nbatch_K2) - (DKQ - DV) : DV; // Calculate VKQ tile, need to use logical rather than physical elements for i0 due to transposition of V: #pragma unroll for (int i0_stop = DV; i0_stop > 0; i0_stop -= 2*nbatch_V2) { const int i0_start = i0_stop - 2*nbatch_V2 > 0 ? i0_stop - 2*nbatch_V2 : 0; const int i0_diff = i0_stop - i0_start; if constexpr (nstages <= 1) { if (i0_start < reusable_cutoff) { constexpr bool use_cp_async = nstages == 1; flash_attn_ext_f16_load_tile (V_h2 + int64_t(k_VKQ_0)*stride_V + i0_start/2, tile_V, i0_diff/2, stride_V, k_VKQ_sup); if (use_cp_async) { cp_async_wait_all(); } __syncthreads(); } } const half2 * tile_V_i = i0_start < reusable_cutoff ? tile_V : tile_V + (i0_start - reusable_cutoff)/2; #if defined(TURING_MMA_AVAILABLE) constexpr int i0_stride = cols_per_warp == 8 ? T_C_VKQ::I : 2*T_C_VKQ::J; #pragma unroll for (int i_VKQ_0 = i0_start; i_VKQ_0 < i0_stop; i_VKQ_0 += i0_stride) { static_assert((nbatch_fa/2) % (np*T_A_VKQ::J) == 0, "bad loop size"); #pragma unroll for (int k00 = 0; k00 < nbatch_fa/2; k00 += np*T_A_VKQ::J) { const int k0 = k00 + (threadIdx.y % np)*T_A_VKQ::J; T_A_VKQ A; // Transposed in SRAM but not in registers, gets transposed on load. load_ldmatrix_trans(A, tile_V_i + 2*k0*stride_tile_V + (i_VKQ_0 - i0_start)/2, stride_tile_V); if constexpr (T_B_KQ::I == 8) { mma(VKQ_C[i_VKQ_0/i0_stride], A, B[k00/(np*T_A_VKQ::J)]); } else { // Wide version of VKQ_C is column-major => swap A and B. mma(VKQ_C[i_VKQ_0/i0_stride], B[k00/(np*T_A_VKQ::J)], A); } } } #else // Volta constexpr int i0_stride = 2*T_C_VKQ::J; #pragma unroll for (int i_VKQ_0 = i0_start; i_VKQ_0 < i0_stop; i_VKQ_0 += i0_stride) { static_assert(nbatch_fa % (np*T_A_VKQ::I) == 0, "bad loop size"); static_assert(2*T_B_VKQ::J == T_A_VKQ::I, "bad tile sizes"); #pragma unroll for (int k00 = 0; k00 < nbatch_fa; k00 += np*T_A_VKQ::I) { const int k0 = k00 + (threadIdx.y % np)*T_A_VKQ::I; T_A_VKQ A; // Transposed in both SRAM and registers, load normally. load_ldmatrix(A, tile_V_i + k0*stride_tile_V + (i_VKQ_0 - i0_start)/2, stride_tile_V); mma(VKQ_C[i_VKQ_0/i0_stride], B[k00/(np*T_A_VKQ::I)], A); } } #endif // defined(TURING_MMA_AVAILABLE) if constexpr (nstages <= 1) { __syncthreads(); // Only needed if tile_K == tile_V. } } #else GGML_UNUSED_VARS(Q_f2, K_h2, V_h2, mask_h, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, kb0); NO_DEVICE_CODE; #endif // defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } #if defined(TURING_MMA_AVAILABLE) template struct mma_tile_sizes { using T_A_KQ = tile<16, 8, half2>; // row-major using T_B_KQ = tile<16, 8, half2>; // column-major using T_C_KQ = tile<16, 16, float>; // column-major using T_A_VKQ = tile<16, 8, half2>; // row-major using T_B_VKQ = tile<16, 8, half2>; // column-major using T_C_VKQ = tile<16, 8, half2>; // column-major }; template<> struct mma_tile_sizes<8> { using T_A_KQ = tile<16, 8, half2>; // row-major using T_B_KQ = tile< 8, 8, half2>; // column-major using T_C_KQ = tile<16, 8, float>; // row-major using T_A_VKQ = tile<16, 8, half2>; // row-major using T_B_VKQ = tile< 8, 8, half2>; // column-major using T_C_VKQ = tile<16, 4, half2>; // row-major }; #else // Volta template struct mma_tile_sizes { using T_A_KQ = tile< 8, 4, half2, DATA_LAYOUT_I_MAJOR_MIRRORED>; // row-major using T_B_KQ = tile<32, 4, half2, DATA_LAYOUT_I_MAJOR>; // column-major using T_C_KQ = tile<32, 8, float, DATA_LAYOUT_I_MAJOR>; // column-major using T_A_VKQ = tile< 8, 4, half2, DATA_LAYOUT_J_MAJOR_MIRRORED>; // column-major using T_B_VKQ = tile<32, 4, half2, DATA_LAYOUT_I_MAJOR>; // column-major using T_C_VKQ = tile<32, 4, half2, DATA_LAYOUT_I_MAJOR>; // column-major }; #endif // defined(TURING_MMA_AVAILABLE) template static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( const float2 * const __restrict__ Q_f2, const half2 * const __restrict__ K_h2, const half2 * const __restrict__ V_h2, const half * const __restrict__ mask_h, const float * const __restrict__ sinks_f, float2 * const __restrict__ dstk, float2 * const __restrict__ dstk_fixup, const float scale, const float slope, const float logit_softcap, const uint3 ne01, const int ne02, const int ne11, const int stride_Q1, const int stride_Q2, const int stride_K, const int stride_V, const int stride_mask, const int jt, const int kb0_start, const int kb0_stop) { #if defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) //In this kernel Q, K, V are matrices while i, j, k are matrix indices. constexpr int ncols = ncols1 * ncols2; using T_A_KQ = typename mma_tile_sizes::T_A_KQ; using T_B_KQ = typename mma_tile_sizes::T_B_KQ; using T_C_KQ = typename mma_tile_sizes::T_C_KQ; using T_A_VKQ = typename mma_tile_sizes::T_A_VKQ; using T_B_VKQ = typename mma_tile_sizes::T_B_VKQ; using T_C_VKQ = typename mma_tile_sizes::T_C_VKQ; constexpr int cols_per_warp = T_B_KQ::I; constexpr int cols_per_thread = 2; // This is specifically KQ columns, Volta only has a single VKQ column. constexpr int np = nwarps * (cols_per_warp/ncols2) / ncols1; // Number of parallel CUDA warps per Q column. constexpr int nbatch_fa = ggml_cuda_fattn_mma_get_nbatch_fa (DKQ, DV, ncols); constexpr int nbatch_K2 = ggml_cuda_fattn_mma_get_nbatch_K2 (DKQ, DV, ncols); constexpr int nbatch_V2 = ggml_cuda_fattn_mma_get_nbatch_V2 (DKQ, DV, ncols); constexpr int nbatch_combine = ggml_cuda_fattn_mma_get_nbatch_combine(DKQ, DV, ncols); constexpr bool Q_in_reg = ggml_cuda_fattn_mma_get_Q_in_reg (DKQ, DV, ncols); constexpr int nstages = ggml_cuda_fattn_mma_get_nstages (DKQ, DV, ncols1, ncols2); if (cols_per_warp > ncols) { NO_DEVICE_CODE; return; } static_assert(nwarps * (cols_per_warp/ncols2) % ncols1 == 0, "bad nwarps"); constexpr int stride_tile_Q = DKQ/2 + 4; constexpr int stride_tile_K = nbatch_K2 + 4; static_assert(!mla || nbatch_K2 >= nbatch_V2, "bad nbatch_K2, nbatch_V2 for MLA"); constexpr int stride_tile_V = mla ? stride_tile_K : nbatch_V2 + 4; constexpr int stride_tile_KV_max = stride_tile_K > stride_tile_V ? stride_tile_K : stride_tile_V; extern __shared__ half2 tile_Q[]; half2 * tile_K = Q_in_reg ? tile_Q : tile_Q + ncols * stride_tile_Q; half2 * tile_V = nstages > 1 ? tile_K + nbatch_fa * stride_tile_K : tile_K; half * tile_mask = (half *) (nstages > 1 ? tile_V + nbatch_fa * stride_tile_V : tile_V + nbatch_fa * stride_tile_KV_max); T_B_KQ Q_B[(Q_in_reg ? DKQ/(2*T_B_KQ::J) : 1)]; #if defined(TURING_MMA_AVAILABLE) T_C_VKQ VKQ_C[cols_per_warp == 8 ? DV/T_C_VKQ::I : DV/(2*T_C_VKQ::J)]; #else // Volta T_C_VKQ VKQ_C[ DV/(2*T_C_VKQ::J)]; #endif // defined(TURING_MMA_AVAILABLE) float KQ_rowsum[cols_per_thread] = {0.0f}; float KQ_max[cols_per_thread]; #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { KQ_max[col] = -FLT_MAX/2.0f; } // Load Q data into tile_Q, either temporarily or permanently. // Q in registers is faster, but register pressure is the biggest bottleneck. // The loading is done with decreasing granularity for D for better memory bandwidth. const half2 scale_h2 = make_half2(scale, scale); #pragma unroll for (int stride_k : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { const int k0_start = stride_k == WARP_SIZE ? 0 : DKQ/2 - (DKQ/2) % (2*stride_k); const int k0_stop = DKQ/2 - (DKQ/2) % (1*stride_k); const int stride_jc = WARP_SIZE / stride_k; if (k0_start == k0_stop) { continue; } #pragma unroll for (int jc0 = 0; jc0 < ncols; jc0 += nwarps*stride_jc) { const int jc = jc0 + threadIdx.y*stride_jc + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); if (jc0 + nwarps*stride_jc > ncols && jc >= ncols) { break; } const int j = jc / ncols2; const int c = jc % ncols2; if (jt*ncols1 + j < int(ne01.z)) { #pragma unroll for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); const float2 tmp = Q_f2[(jt*ncols1 + j)*stride_Q1 + c*stride_Q2 + k]; tile_Q[jc*stride_tile_Q + k] = scale_h2 * make_half2(tmp.x, tmp.y); } } else { #pragma unroll for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); tile_Q[jc*stride_tile_Q + k] = make_half2(0.0f, 0.0f); } } } } __syncthreads(); if (Q_in_reg) { const int j0 = (threadIdx.y / np) * cols_per_warp; #pragma unroll for (int k0 = 0; k0 < DKQ/2; k0 += T_B_KQ::J) { load_ldmatrix(Q_B[k0/T_B_KQ::J], tile_Q + j0*stride_tile_Q + k0, stride_tile_Q); } } __syncthreads(); int kb0 = kb0_start; // Preload mask and K data for first iteration when using cp_async with multiple stages: if constexpr (nstages > 1) { static_assert(nbatch_K2 == DKQ/2, "batching not implemented for multi-stage pipeline"); constexpr bool use_cp_async = true; constexpr bool oob_check = false; constexpr int k_VKQ_sup = nbatch_fa; if (ncols2 > 1 || mask_h) { flash_attn_ext_f16_load_mask (mask_h + kb0*nbatch_fa, tile_mask, stride_mask, k_VKQ_sup, jt*ncols1, ne01); } flash_attn_ext_f16_load_tile (K_h2 + int64_t(kb0)*nbatch_fa*stride_K, tile_K, nbatch_K2, stride_K, k_VKQ_sup); } // kb0_start is always < kb0_stop so the last iter can be executed unconditionally. if constexpr (ncols2 == 1) { constexpr bool oob_check = true; for (; kb0 < kb0_stop-1; ++kb0) { constexpr bool last_iter = false; constexpr int k_VKQ_sup = nbatch_fa; flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, jt, kb0, k_VKQ_sup); } constexpr bool last_iter = true; const int k_VKQ_sup = ne11 - kb0*nbatch_fa; flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, jt, kb0, k_VKQ_sup); } else { constexpr bool oob_check = false; for (; kb0 < kb0_stop-1; ++kb0) { constexpr bool last_iter = false; constexpr int k_VKQ_sup = nbatch_fa; flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, jt, kb0, k_VKQ_sup); } constexpr bool last_iter = true; constexpr int k_VKQ_sup = nbatch_fa; flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, jt, kb0, k_VKQ_sup); } // With multi-stage loading there is no __syncthreads at the end of the iter, // there can be a race condition on shared memory access for combining/writing back results. if constexpr (nstages > 1 && nwarps*cols_per_warp > nbatch_fa) { __syncthreads(); } // Finally, sum up partial KQ rowsums. { #if defined(TURING_MMA_AVAILABLE) // The partial sums are spread across 8/4 threads. constexpr int offset_first = cols_per_warp == 8 ? 16 : 2; constexpr int offset_last = cols_per_warp == 8 ? 4 : 1; #else // Volta // The partial sums are spread across 2 threads. constexpr int offset_first = 2; constexpr int offset_last = 2; #endif // defined(TURING_MMA_AVAILABLE) #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { #pragma unroll for (int offset = offset_first; offset >= offset_last; offset >>= 1) { KQ_rowsum[col] += __shfl_xor_sync(0xFFFFFFFF, KQ_rowsum[col], offset, WARP_SIZE); } } } // If attention sinks are used, potentially re-scale if KQ_max is small. // Also add the sink as a value to KQ_rowsum, this is done after synchonization of KQ_rowsum // so it's being done unconditionally for every thread. if (!is_fixup && (np == 1 || threadIdx.y % np == 0) && sinks_f) { float KQ_max_scale[cols_per_thread]; #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { const int jc = cols_per_warp == 8 ? T_C_KQ::get_j(col) : T_C_KQ::get_i(2*col); const float sink = sinks_f[jc % ncols2]; const float KQ_max_new = fmaxf(KQ_max[col], sink); const float KQ_max_diff = KQ_max[col] - KQ_max_new; KQ_max_scale[col] = expf(KQ_max_diff); KQ_max[col] = KQ_max_new; *((uint32_t *) &KQ_max_scale[col]) *= KQ_max_diff >= SOFTMAX_FTZ_THRESHOLD; const float KQ_max_add = expf(sink - KQ_max_new); KQ_rowsum[col] = KQ_max_scale[col]*KQ_rowsum[col] + KQ_max_add; } #if defined(TURING_MMA_AVAILABLE) if constexpr (cols_per_warp == 8) { const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[0], KQ_max_scale[1]); #pragma unroll for (int i = 0; i < DV/T_C_VKQ::I; ++i) { #pragma unroll for (int l = 0; l < T_C_VKQ::ne; ++l) { VKQ_C[i].x[l] *= KQ_max_scale_h2; } } } else { #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[col], KQ_max_scale[col]); #pragma unroll for (int i = 0; i < (DV/2)/T_C_VKQ::J; ++i) { #pragma unroll for (int l0 = 0; l0 < T_C_VKQ::ne; l0 += 2) { VKQ_C[i].x[l0 + col] *= KQ_max_scale_h2; } } } } #else // Volta const int col = (threadIdx.x / 2) % 2; const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[col], KQ_max_scale[col]); #pragma unroll for (int i = 0; i < (DV/2)/T_C_VKQ::J; ++i) { #pragma unroll for (int l = 0; l < T_C_VKQ::ne; ++l) { VKQ_C[i].x[l] *= KQ_max_scale_h2; } } #endif // defined(TURING_MMA_AVAILABLE) } // Combine VKQ accumulator values if np > 1. // It's also faster to do small writes to shared memory, then large write to VRAM than to do small writes to VRAM. // So also write VKQ accumulators to shared memory in column-major format if np == 1. constexpr int tile_stride = nbatch_combine + 4; static_assert((DV/2) % nbatch_combine == 0, "bad nbatch_combine"); if constexpr (cols_per_warp == 8) { const int jc_cwmo = (threadIdx.x % (2*T_C_VKQ::J)) / T_C_VKQ::J; // jc combine write meta offset const int jc_cwm = threadIdx.y*(2*T_C_VKQ::J) + 2*T_C_VKQ::get_j(-1) + jc_cwmo; // jc combine write meta const float2 KQ_cmr = make_float2(KQ_max[jc_cwmo], KQ_rowsum[jc_cwmo]); // KQ combine max rowsum if (((!needs_fixup && !is_fixup) || np > 1) && threadIdx.x < 2*T_C_VKQ::J) { // Use the 16 bytes of padding in each row to store the meta data: KQ max, KQ rowsum, KQ max scale. ((float2 *) tile_Q)[jc_cwm*(tile_stride/2) + nbatch_combine/2] = KQ_cmr; } __syncthreads(); if (np == 1) { // No combination is needed, the meta data can be directly written from registers to VRAM. if (needs_fixup && threadIdx.x < T_B_KQ::I) { float2 * dstk_fixup_meta = dstk_fixup + blockIdx.x*ncols; dstk_fixup_meta[jc_cwm] = KQ_cmr; } if (is_fixup && threadIdx.x < T_B_KQ::I) { float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols; dstk_fixup_meta[jc_cwm] = KQ_cmr; } } } else { // jc_cwm = jc combine write meta // KQ_cmr = KQ combine max rowsum // Use the 16 bytes of padding in each Q column to store the meta data: KQ max, KQ rowsum, KQ max scale. #if defined(TURING_MMA_AVAILABLE) const int jc_cwm = threadIdx.y*cols_per_warp + T_C_VKQ::get_i(threadIdx.x % 4); const float2 KQ_cmr = make_float2(KQ_max[threadIdx.x % cols_per_thread], KQ_rowsum[threadIdx.x % cols_per_thread]); const bool thread_should_write = threadIdx.x % 4 < cols_per_thread; #else // Volta const int jc_cwm = threadIdx.y*cols_per_warp + T_C_KQ::get_i(threadIdx.x & 2); const float2 KQ_cmr = make_float2(KQ_max[(threadIdx.x & 2) / 2], KQ_rowsum[(threadIdx.x & 2) / 2]); const bool thread_should_write = T_C_KQ::J == 8 || T_C_KQ::get_j(threadIdx.x & 2) < 8; #endif // defined(TURING_MMA_AVAILABLE) if (((!needs_fixup && !is_fixup) || np > 1) && thread_should_write) { ((float2 *) tile_Q)[jc_cwm*(tile_stride/2) + nbatch_combine/2] = KQ_cmr; } __syncthreads(); if (np == 1) { // No combination is needed, the meta data can be directly written from registers to VRAM. if (needs_fixup && thread_should_write) { float2 * dstk_fixup_meta = dstk_fixup + blockIdx.x*ncols; dstk_fixup_meta[jc_cwm] = KQ_cmr; } if (is_fixup && thread_should_write) { float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols; dstk_fixup_meta[jc_cwm] = KQ_cmr; } } } if (np > 1 && threadIdx.y % np == 0) { // Combine the meta data for parallel warps via shared memory. // Warps with threadIdx.y % np != 0 must NOT return early. // All threads must return simultaneously to avoid race conditions with work on the next tile. constexpr int nmeta = np*cols_per_warp >= WARP_SIZE ? np*cols_per_warp/WARP_SIZE : 1; const int jc_meta = threadIdx.y*cols_per_warp + (np*cols_per_warp < WARP_SIZE ? threadIdx.x % (np*cols_per_warp) : threadIdx.x); float2 * const meta_ptr = ((float2 *) tile_Q) + jc_meta*(tile_stride/2) + nbatch_combine/2; float2 meta[nmeta]; #pragma unroll for (int imeta = 0; imeta < nmeta; ++imeta) { meta[imeta] = meta_ptr[imeta * WARP_SIZE * tile_stride/2]; } float KQ_cmn = meta[0].x; // KQ combine max new, max between all parallel warps. #pragma unroll for (int imeta = 1; imeta < nmeta; ++imeta) { KQ_cmn = fmaxf(KQ_cmn, meta[imeta].x); } #pragma unroll for (int offset = np*cols_per_warp/2; offset >= cols_per_warp; offset >>= 1) { if (offset < WARP_SIZE) { KQ_cmn = fmaxf(KQ_cmn, __shfl_xor_sync(0xFFFFFFFF, KQ_cmn, offset, WARP_SIZE)); } } float KQ_cms[nmeta]; // KQ combine max scale per warp. #pragma unroll for (int imeta = 0; imeta < nmeta; ++imeta) { KQ_cms[imeta] = expf(meta[imeta].x - KQ_cmn); } float KQ_crs = KQ_cms[0]*meta[0].y; // KQ combine rowsum, scaled sum of all parallel warps. #pragma unroll for (int imeta = 1; imeta < nmeta; ++imeta) { KQ_crs += KQ_cms[imeta]*meta[imeta].y; } #pragma unroll for (int offset = np*cols_per_warp/2; offset >= cols_per_warp; offset >>= 1) { if (offset < WARP_SIZE) { KQ_crs += __shfl_xor_sync(0xFFFFFFFF, KQ_crs, offset, WARP_SIZE); } } __syncthreads(); // Write back combined meta data: #pragma unroll for (int imeta = 0; imeta < nmeta; ++imeta) { if (np*cols_per_warp >= WARP_SIZE || threadIdx.x < np*cols_per_warp) { // Combined KQ max scale + rowsum. meta_ptr[imeta * WARP_SIZE * tile_stride/2] = make_float2(KQ_cms[imeta], KQ_crs); } } // Combined KQ max + rowsum. static_assert(cols_per_warp <= WARP_SIZE); if (needs_fixup && (cols_per_warp == WARP_SIZE || threadIdx.x < cols_per_warp)) { float2 * dstk_fixup_meta = dstk_fixup + blockIdx.x*ncols; dstk_fixup_meta[(threadIdx.y/np)*cols_per_warp + threadIdx.x] = make_float2(KQ_cmn, KQ_crs); } if (is_fixup && (cols_per_warp == WARP_SIZE || threadIdx.x < cols_per_warp)) { float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols; dstk_fixup_meta[(threadIdx.y/np)*cols_per_warp + threadIdx.x] = make_float2(KQ_cmn, KQ_crs); } } else if (np > 1) { // Warps with threadIdx.y % np == 0 execute a __syncthreads() in the if branch. // Therefore, all other warps also need to execute a __syncthreads(). // Otherwise the points at which warps synchronize with each other would become misaligned. __syncthreads(); } #pragma unroll for (int k00 = 0; k00 < DV/2; k00 += nbatch_combine) { if constexpr (cols_per_warp == 8) { const int jc_cwd = threadIdx.y*T_B_KQ::I + T_B_KQ::get_i(-1); // jc combine write data #pragma unroll for (int k1 = 0; k1 < nbatch_combine; k1 += T_B_KQ::J) { const T_B_KQ B = get_transposed(VKQ_C[(k00 + k1)/T_B_KQ::J]); // Conversion of C to B matrix puts it in column-major format. #pragma unroll for (int l = 0; l < T_B_KQ::ne; ++l) { const int k = k1 + T_B_KQ::get_j(l); tile_Q[jc_cwd*tile_stride + k] = B.x[l]; } } } else { const int j0 = threadIdx.y*cols_per_warp; #pragma unroll for (int k1 = 0; k1 < nbatch_combine; k1 += T_C_VKQ::J) { #pragma unroll for (int l = 0; l < T_C_VKQ::ne; ++l) { const int j = j0 + T_C_VKQ::get_i(l); const int k = k1 + T_C_VKQ::get_j(l); tile_Q[j*tile_stride + k] = VKQ_C[(k00 + k1)/T_C_VKQ::J].x[l]; } } } __syncthreads(); if (np == 1 || threadIdx.y % np == 0) { // The first 2*2*gridDim.x*ncols floats in dstk_fixup are for storing max. values and row sums. // The values after that are for the partial results of the individual blocks. float2 * dstk_fixup_data = dstk_fixup + gridDim.x*(2*ncols) + blockIdx.x*(ncols*(DV/2)); #pragma unroll for (int stride_k : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { const int k0_start = stride_k == WARP_SIZE ? 0 : nbatch_combine - nbatch_combine % (2*stride_k); const int k0_stop = nbatch_combine - nbatch_combine % (1*stride_k); const int stride_jc = WARP_SIZE / stride_k; if (k0_start == k0_stop) { continue; } #pragma unroll for (int jc0_dst = 0; jc0_dst < ncols; jc0_dst += (nwarps/np)*stride_jc) { const int jc_dst = jc0_dst + (threadIdx.y/np)*stride_jc + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); if (jc0_dst + (nwarps/np)*stride_jc > ncols && jc_dst >= ncols) { break; } const int jc_tile_K = (jc_dst/cols_per_warp)*(np*cols_per_warp) + jc_dst % cols_per_warp; const int j_dst = jc_dst / ncols2; const int c_dst = jc_dst % ncols2; if (!is_fixup && jt*ncols1 + j_dst >= int(ne01.z)) { continue; } const float * meta_j = (const float *) tile_Q + jc_tile_K*tile_stride + nbatch_combine; #pragma unroll for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); float2 dstk_val = make_float2(0.0f, 0.0f); #pragma unroll for (int ip = 0; ip < np; ++ip) { const float KQ_crs = np == 1 ? 1.0f : meta_j[ip*cols_per_warp * tile_stride + 0]; const float2 dstk_val_add = __half22float2(tile_Q[(jc_tile_K + ip*cols_per_warp) * tile_stride + k]); dstk_val.x += dstk_val_add.x*KQ_crs; dstk_val.y += dstk_val_add.y*KQ_crs; } if (!needs_fixup && !is_fixup) { const float KQ_rowsum_j = meta_j[1]; dstk_val.x /= KQ_rowsum_j; dstk_val.y /= KQ_rowsum_j; } if (is_fixup) { dstk_fixup_data[jc_dst*(DV/2) + k00 + k] = dstk_val; } else { dstk[((jt*ncols1 + j_dst)*ne02 + c_dst)*(DV/2) + k00 + k] = dstk_val; } } } } } if (np > 1) { __syncthreads(); } } #else GGML_UNUSED_VARS(Q_f2, K_h2, V_h2, mask_h, sinks_f, dstk, dstk_fixup, scale, slope, logit_softcap, ne01, ne02, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start, kb0_stop); NO_DEVICE_CODE; #endif // defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } template __launch_bounds__(ggml_cuda_fattn_mma_get_nthreads(DKQ, DV, ncols1*ncols2), ggml_cuda_fattn_mma_get_occupancy(DKQ, DV, ncols1*ncols2)) static __global__ void flash_attn_ext_f16( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, const char * __restrict__ sinks, const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, const float max_bias, const float m0, const float m1, const uint32_t n_head_log2, const float logit_softcap, const int32_t ne00, const uint3 ne01, const int32_t ne02, const int32_t ne03, const int32_t nb01, const int32_t nb02, const int32_t nb03, const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, const int32_t nb11, const int32_t nb12, const int64_t nb13, const int32_t nb21, const int32_t nb22, const int64_t nb23, const int32_t ne31, const int32_t ne32, const int32_t ne33, const int32_t nb31, const int32_t nb32, const int64_t nb33) { #if defined(FLASH_ATTN_AVAILABLE) && (defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)) // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(DKQ == 128 || DKQ == 256)) { NO_DEVICE_CODE; return; } #if __CUDA_ARCH__ == GGML_CUDA_CC_TURING if (ncols1*ncols2 > 32) { NO_DEVICE_CODE; return; } #endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV"); constexpr int ncols = ncols1 * ncols2; constexpr int nbatch_fa = ggml_cuda_fattn_mma_get_nbatch_fa(DKQ, DV, ncols); constexpr int nthreads = ggml_cuda_fattn_mma_get_nthreads(DKQ, DV, ncols); constexpr int nwarps = nthreads / WARP_SIZE; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. const int stride_Q1 = nb01 / sizeof(float2); const int stride_Q2 = nb02 / sizeof(float2); const int stride_K = nb11 / sizeof(half2); const int stride_mask = nb31 / sizeof(half); const int stride_V = mla ? stride_K : nb21 / sizeof(half2); const int iter_k = (ne11 + (nbatch_fa - 1)) / nbatch_fa; const int iter_j = (ne01.z + (ncols1 - 1)) / ncols1; // kbc == k block continuous, current index in continuous ijk space. int kbc = int64_t(blockIdx.x + 0)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; const int kbc_stop = int64_t(blockIdx.x + 1)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; // If the seams of 2 CUDA blocks fall within an output tile their results need to be combined. // For this we need to track both the block that starts the tile (needs_fixup) and the block that finishes the tile (is_fixup). // In the most general case >2 seams can fall into the same tile. // kb0 == k start index when in the output tile. int kb0_start = kbc % iter_k; int kb0_stop = min(iter_k, kb0_start + kbc_stop - kbc); while (kbc < kbc_stop && kb0_stop == iter_k) { const int sequence = kbc / (iter_k*iter_j*(ne02/ncols2)); const int zt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); // head in units of ncols2 const int jt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*zt) / iter_k; // j index of current tile. const int head0 = zt * ncols2; const float2 * Q_f2 = (const float2 *) (Q + nb03*sequence + nb02* head0); const half2 * K_h2 = (const half2 *) (K + nb13*sequence + nb12*(head0 / gqa_ratio)); const half * mask_h = ncols2 == 1 && !mask ? nullptr : (const half *) (mask + nb33*(sequence % ne33)); float2 * dstk = ((float2 *) dst) + (sequence*ne01.z*ne02 + head0) * (DV/2); const half2 * V_h2 = mla ? K_h2 + (DKQ/2 - DV/2) : (const half2 *) (V + nb23*sequence + nb22*(head0 / gqa_ratio)); const float * sinks_f = sinks ? (const float *) sinks + head0 : nullptr; const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, head0, n_head_log2, m0, m1) : 1.0f; if (KV_max) { kb0_stop = min(kb0_stop, KV_max[sequence*iter_j + jt] / nbatch_fa); } constexpr bool is_fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. if (kb0_start == 0) { constexpr bool needs_fixup = false; // CUDA block is working on an entire tile. flash_attn_ext_f16_process_tile (Q_f2, K_h2, V_h2, mask_h, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, ne11, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start, kb0_stop); } else { constexpr bool needs_fixup = true; // CUDA block is missing the beginning of a tile. flash_attn_ext_f16_process_tile (Q_f2, K_h2, V_h2, mask_h, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, ne11, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start, kb0_stop); } kbc += iter_k; kbc -= kbc % iter_k; kb0_start = 0; kb0_stop = min(iter_k, kbc_stop - kbc); } if (kbc >= kbc_stop) { return; } const int sequence = kbc / (iter_k*iter_j*(ne02/ncols2)); const int zt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); // head in units of ncols2 const int jt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*zt) / iter_k; // j index of current tile. const int head0 = zt * ncols2; const float2 * Q_f2 = (const float2 *) (Q + nb03*sequence + nb02* head0); const half2 * K_h2 = (const half2 *) (K + nb13*sequence + nb12*(head0 / gqa_ratio)); const half * mask_h = ncols2 == 1 && !mask ? nullptr : (const half *) (mask + nb33*(sequence % ne33)); float2 * dstk = ((float2 *) dst) + (sequence*ne01.z*ne02 + head0) * (DV/2); const half2 * V_h2 = mla ? K_h2 + (DKQ/2 - DV/2) : (const half2 *) (V + nb23*sequence + nb22*(head0 / gqa_ratio)); const float * sinks_f = sinks ? (const float *) sinks + head0 : nullptr; const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, head0, n_head_log2, m0, m1) : 1.0f; if (KV_max) { kb0_stop = min(kb0_stop, KV_max[sequence*iter_j + jt] / nbatch_fa); } constexpr bool is_fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. constexpr bool needs_fixup = false; flash_attn_ext_f16_process_tile (Q_f2, K_h2, V_h2, mask_h, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, ne11, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start, kb0_stop); #else GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && (defined(VOLTA_MMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)) } template void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; constexpr int ncols = ncols1 * ncols2; const int nthreads = ggml_cuda_fattn_mma_get_nthreads (DKQ, DV, ncols, cc); const int nbatch_fa = ggml_cuda_fattn_mma_get_nbatch_fa (DKQ, DV, ncols, cc); const int nbatch_K2 = ggml_cuda_fattn_mma_get_nbatch_K2 (DKQ, DV, ncols, cc); const int nbatch_V2 = ggml_cuda_fattn_mma_get_nbatch_V2 (DKQ, DV, ncols, cc); const int nbatch_combine = ggml_cuda_fattn_mma_get_nbatch_combine(DKQ, DV, ncols, cc); const bool Q_in_reg = ggml_cuda_fattn_mma_get_Q_in_reg (DKQ, DV, ncols, cc); const int nstages = ggml_cuda_fattn_mma_get_nstages (DKQ, DV, ncols1, ncols2, cc); const int cols_per_warp = std::min(ncols, turing_mma_available(cc) ? 16 : 32); const int nwarps = nthreads / WARP_SIZE; constexpr bool mla = DKQ == 576; const size_t nbytes_shared_KV_1stage = nbatch_fa * std::max(nbatch_K2 + 4, nbatch_V2 + 4) * sizeof(half2); const size_t nbytes_shared_KV_2stage = nbatch_fa * (nbatch_K2 + 4 + nbatch_V2 + 4) * sizeof(half2); const size_t nbytes_shared_Q = ncols * (DKQ/2 + 4) * sizeof(half2); const size_t nbytes_shared_mask = ncols1 * (nbatch_fa/2 + 4) * sizeof(half2); const size_t nbytes_shared_combine = nwarps*cols_per_warp * (nbatch_combine + 4) * sizeof(half2); const size_t nbytes_shared_KV = nstages <= 1 ? nbytes_shared_KV_1stage : nbytes_shared_KV_2stage; const size_t nbytes_shared_total = std::max(nbytes_shared_combine, Q_in_reg ? std::max(nbytes_shared_Q, nbytes_shared_KV + nbytes_shared_mask) : nbytes_shared_Q + nbytes_shared_KV + nbytes_shared_mask); float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); fattn_kernel_t fattn_kernel; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; fattn_kernel = flash_attn_ext_f16; #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; if (!shared_memory_limit_raised[id]) { CUDA_CHECK(cudaFuncSetAttribute(fattn_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared_total)); shared_memory_limit_raised[id] = true; } #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) } else { constexpr bool use_logit_softcap = true; fattn_kernel = flash_attn_ext_f16; #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; if (!shared_memory_limit_raised[id]) { CUDA_CHECK(cudaFuncSetAttribute(fattn_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared_total)); shared_memory_limit_raised[id] = true; } #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) } launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared_total, nbatch_fa, true, true, true); } #define DECL_FATTN_MMA_F16_CASE(DKQ, DV, ncols1, ncols2) \ template void ggml_cuda_flash_attn_ext_mma_f16_case \ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \ #define DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(DKQ, DV, ncols) \ extern DECL_FATTN_MMA_F16_CASE(DKQ, DV, (ncols)/ 1, 1); \ extern DECL_FATTN_MMA_F16_CASE(DKQ, DV, (ncols)/ 2, 2); \ extern DECL_FATTN_MMA_F16_CASE(DKQ, DV, (ncols)/ 4, 4); \ extern DECL_FATTN_MMA_F16_CASE(DKQ, DV, (ncols)/ 8, 8); \ extern DECL_FATTN_MMA_F16_CASE(DKQ, DV, (ncols)/16, 16); \ DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 64, 64, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 80, 80, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 96, 96, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(112, 112, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(128, 128, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(256, 256, 8) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 64, 64, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 80, 80, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 96, 96, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(112, 112, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(128, 128, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(256, 256, 16) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 64, 64, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 80, 80, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 96, 96, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(112, 112, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(128, 128, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(256, 256, 32) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 64, 64, 64) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 80, 80, 64) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2( 96, 96, 64) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(112, 112, 64) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(128, 128, 64) DECL_FATTN_MMA_F16_CASE_ALL_NCOLS2(256, 256, 64) // The number of viable configurations for Deepseek is very limited: extern DECL_FATTN_MMA_F16_CASE(576, 512, 1, 16); extern DECL_FATTN_MMA_F16_CASE(576, 512, 2, 16); extern DECL_FATTN_MMA_F16_CASE(576, 512, 4, 16); ggml-org-ggml-3678254/src/ggml-cuda/fattn-tile.cu000066400000000000000000000033021512524704700214110ustar00rootroot00000000000000#include "common.cuh" #include "fattn-tile.cuh" #include "fattn-wmma-f16.cuh" void ggml_cuda_flash_attn_ext_tile(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; switch (K->ne[0]) { case 40: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case< 40, 40>(ctx, dst); } break; case 64: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case< 64, 64>(ctx, dst); } break; case 72: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case< 72, 72>(ctx, dst); } break; case 80: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case< 80, 80>(ctx, dst); } break; case 96: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case< 96, 96>(ctx, dst); } break; case 112: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case<112, 112>(ctx, dst); } break; case 128: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case<128, 128>(ctx, dst); } break; case 256: { GGML_ASSERT(V->ne[0] == K->ne[0]); ggml_cuda_flash_attn_ext_tile_case<256, 256>(ctx, dst); } break; case 576: { GGML_ASSERT(V->ne[0] == 512); ggml_cuda_flash_attn_ext_tile_case<576, 512>(ctx, dst); } break; default: { GGML_ABORT("Unsupported head size"); } break; } } ggml-org-ggml-3678254/src/ggml-cuda/fattn-tile.cuh000066400000000000000000001571671512524704700216040ustar00rootroot00000000000000#include "common.cuh" #include "fattn-common.cuh" #include "fattn-wmma-f16.cuh" // nbatch_fa == number of KQ rows to process per iteration // nbatch_K == number of K columns to load in parallel for KQ calculation // TODO optimize kernel parameters for FP16 NVIDIA (P100) // TODO optimize kernel parameters for head sizes 40, 72, 80, 96, 112 // The ROCm compiler cannot handle templating in __launch_bounds__. // As a workaround, define a macro to package the kernel parameters as uint32_t: #define GGML_CUDA_FATTN_TILE_CONFIG_CASE(DKQ_, DV_, ncols_, nthreads, occupancy, nbatch_fa, nbatch_K) \ if (DKQ == (DKQ_) && DV == (DV_) && ncols == (ncols_)) { \ static_assert((nthreads) <= 512, "bad nthreads"); \ static_assert((occupancy) <= 8, "bad occupancy"); \ static_assert((nbatch_fa) <= 256, "bad nbatch_fa"); \ static_assert((nbatch_K) <= 256, "bad nbatch_K"); \ return ((nthreads) << 0) | ((occupancy) << 10) | ((nbatch_fa) << 14) | ((nbatch_K) << 23); \ } \ static constexpr __host__ __device__ uint32_t ggml_cuda_fattn_tile_get_config_nvidia_fp16(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 2, 64, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 4, 128, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 8, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 16, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 32, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 2, 64, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 4, 128, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 8, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 16, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 2, 64, 2, 64, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 4, 128, 2, 64, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 8, 256, 2, 64, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 16, 256, 2, 64, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 32, 256, 2, 64, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 2, 64, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 4, 128, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 8, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 16, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 32, 256, 2, 64, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 2, 64, 2, 64, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 4, 128, 2, 64, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 8, 256, 2, 64, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 16, 256, 2, 64, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 32, 256, 2, 64, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 2, 64, 2, 64, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 4, 128, 2, 64, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 8, 256, 2, 64, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 16, 256, 2, 64, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 32, 256, 2, 64, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 2, 64, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 4, 128, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 8, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 16, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 2, 64, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 4, 128, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 8, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 16, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 16, 256, 2, 64, 64) return 0; } static constexpr __host__ __device__ uint32_t ggml_cuda_fattn_tile_get_config_nvidia_fp32(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 2, 128, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 4, 128, 3, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 8, 128, 3, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 16, 128, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 2, 64, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 4, 128, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 8, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 16, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 32, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 2, 64, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 4, 128, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 8, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 16, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 32, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 2, 64, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 4, 128, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 8, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 16, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 32, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 2, 128, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 4, 128, 3, 32, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 8, 128, 3, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 16, 128, 3, 32, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 2, 128, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 4, 128, 3, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 8, 256, 2, 32, 256) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 16, 256, 2, 32, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 32, 256, 2, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 16, 256, 2, 32, 64) return 0; } static constexpr __host__ __device__ uint32_t ggml_cuda_fattn_tile_get_config_amd(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 64, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 2, 64, 3, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 4, 128, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 8, 128, 2, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 16, 256, 2, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 64, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 2, 64, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 4, 128, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 8, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 16, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 32, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 64, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 64, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 2, 64, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 4, 128, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 8, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 16, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 32, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 64, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 2, 64, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 4, 128, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 8, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 16, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 32, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 64, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 2, 256, 2, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 4, 128, 2, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 8, 256, 2, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 16, 256, 2, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 32, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 64, 256, 2, 64, 32) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 2, 256, 2, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 4, 256, 2, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 8, 256, 2, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 16, 256, 2, 32, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 32, 256, 2, 32, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 16, 256, 2, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 32, 512, 1, 128, 64) return 0; } static constexpr __host__ __device__ uint32_t ggml_cuda_fattn_tile_get_config_amd_rdna(const int DKQ, const int DV, const int ncols) { GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 40, 40, 64, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 2, 64, 8, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 4, 64, 8, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 8, 128, 5, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 16, 128, 5, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 32, 128, 4, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 64, 64, 64, 128, 5, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 2, 64, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 4, 128, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 8, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 16, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 32, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 72, 72, 64, 256, 2, 32, 72) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 2, 64, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 4, 128, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 8, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 16, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 32, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 80, 80, 64, 256, 2, 32, 40) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 2, 64, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 4, 128, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 8, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 16, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 32, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE( 96, 96, 64, 256, 2, 32, 48) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 2, 64, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 4, 128, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 8, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 16, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 32, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(112, 112, 64, 256, 2, 32, 56) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 2, 64, 8, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 4, 128, 8, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 8, 128, 8, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 16, 256, 3, 128, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 32, 256, 3, 128, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(128, 128, 64, 256, 3, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 2, 64, 8, 32, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 4, 128, 6, 32, 256) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 8, 128, 6, 32, 256) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 16, 256, 5, 32, 256) GGML_CUDA_FATTN_TILE_CONFIG_CASE(256, 256, 32, 256, 3, 64, 128) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 16, 256, 4, 64, 64) GGML_CUDA_FATTN_TILE_CONFIG_CASE(576, 512, 32, 256, 2, 128, 64) return 0; } static __host__ uint32_t ggml_cuda_fattn_tile_get_config(const int DKQ, const int DV, const int ncols, const int cc) { if (GGML_CUDA_CC_IS_AMD(cc)) { if (GGML_CUDA_CC_IS_RDNA(cc)) { return ggml_cuda_fattn_tile_get_config_amd_rdna(DKQ, DV, ncols); } return ggml_cuda_fattn_tile_get_config_amd(DKQ, DV, ncols); } if (fast_fp16_available(cc)) { return ggml_cuda_fattn_tile_get_config_nvidia_fp16(DKQ, DV, ncols); } return ggml_cuda_fattn_tile_get_config_nvidia_fp32(DKQ, DV, ncols); } static constexpr __device__ uint32_t ggml_cuda_fattn_tile_get_config(const int DKQ, const int DV, const int ncols) { #ifdef GGML_USE_HIP #ifdef RDNA return ggml_cuda_fattn_tile_get_config_amd_rdna(DKQ, DV, ncols); #else return ggml_cuda_fattn_tile_get_config_amd(DKQ, DV, ncols); #endif // RDNA #else #ifdef FAST_FP16_AVAILABLE return ggml_cuda_fattn_tile_get_config_nvidia_fp16(DKQ, DV, ncols); #else return ggml_cuda_fattn_tile_get_config_nvidia_fp32(DKQ, DV, ncols); #endif // FAST_FP16_AVAILABLE #endif // GGML_USE_HIP } static __host__ int ggml_cuda_fattn_tile_get_nthreads(const int DKQ, const int DV, const int ncols, const int cc) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols, cc) >> 0) & ((1 << 10) - 1); } static constexpr __device__ int ggml_cuda_fattn_tile_get_nthreads(const int DKQ, const int DV, const int ncols) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols) >> 0) & ((1 << 10) - 1); } static __host__ int ggml_cuda_fattn_tile_get_occupancy(const int DKQ, const int DV, const int ncols, const int cc) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols, cc) >> 10) & ((1 << 4) - 1); } static constexpr __device__ int ggml_cuda_fattn_tile_get_occupancy(const int DKQ, const int DV, const int ncols) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols) >> 10) & ((1 << 4) - 1); } static __host__ int ggml_cuda_fattn_tile_get_nbatch_fa(const int DKQ, const int DV, const int ncols, const int cc) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols, cc) >> 14) & ((1 << 9) - 1); } static constexpr __device__ int ggml_cuda_fattn_tile_get_nbatch_fa(const int DKQ, const int DV, const int ncols) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols) >> 14) & ((1 << 9) - 1); } static __host__ int ggml_cuda_fattn_tile_get_nbatch_K(const int DKQ, const int DV, const int ncols, const int cc) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols, cc) >> 23) & ((1 << 9) - 1); } static constexpr __device__ int ggml_cuda_fattn_tile_get_nbatch_K(const int DKQ, const int DV, const int ncols) { return (ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols) >> 23) & ((1 << 9) - 1); } // TODO: deduplicate with mma-f16 template static __device__ __forceinline__ void flash_attn_tile_load_tile( const half2 * const __restrict__ KV, half2 * const __restrict__ tile_KV, const int stride_KV, const int i_sup) { constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; auto load = [&] __device__ (const int n) { const int stride_j = warp_size >> n; if (stride_j == 0) { return; } const int j0_start = stride_j == warp_size ? 0 : ((J/2)/cpy_ne) - ((J/2)/cpy_ne) % (2*stride_j); const int j0_stop = ((J/2)/cpy_ne) - ((J/2)/cpy_ne) % (1*stride_j); const int stride_i = warp_size / stride_j; if (j0_start == j0_stop) { return; } #pragma unroll for (int i0 = 0; i0 < I; i0 += nwarps*stride_i) { const int i = i0 + threadIdx.y*stride_i + (stride_j == warp_size ? 0 : threadIdx.x / stride_j); if (i0 + nwarps*stride_i <= I || i < I) { #pragma unroll for (int j0 = j0_start; j0 < j0_stop; j0 += stride_j) { const int j = j0*cpy_ne + (stride_j == warp_size ? threadIdx.x : threadIdx.x % stride_j)*cpy_ne; const half2 zero[cpy_ne] = {{0.0f, 0.0f}}; ggml_cuda_memcpy_1( tile_KV + i*(J/2 + J_padding) + j, !oob_check || i < i_sup ? KV + i*stride_KV + j : zero); } } } }; // 1: max 64*16=512 bytes, 512 half // 2: max 32*16=512 bytes, 256 half // 3: max 16*16=256 bytes, 128 half // 4: max 8*16=128 bytes, 64 half // 5: max 4*16= 64 bytes, 32 half // 6: max 2*16= 32 bytes, 16 half // 7: max 1*16= 16 bytes, 8 half static_assert(J % 8 == 0, "bad J"); static_assert((J/2) % cpy_ne == 0, "bad J"); ggml_cuda_unroll<7>{}(load); } template static __device__ __forceinline__ void flash_attn_tile_load_tile( const half2 * const __restrict__ KV, float * const __restrict__ tile_KV, const int stride_KV, const int i_sup) { constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; auto load = [&] __device__ (const int n) { const int stride_j = warp_size >> n; if (stride_j == 0) { return; } const int j0_start = stride_j == warp_size ? 0 : (J/cpy_ne) - (J/cpy_ne) % (2*stride_j); const int j0_stop = (J/cpy_ne) - (J/cpy_ne) % (1*stride_j); const int stride_i = warp_size / stride_j; if (j0_start == j0_stop) { return; } #pragma unroll for (int i0 = 0; i0 < I; i0 += nwarps*stride_i) { const int i = i0 + threadIdx.y*stride_i + (stride_j == warp_size ? 0 : threadIdx.x / stride_j); if (i0 + nwarps*stride_i <= I || i < I) { #pragma unroll for (int j0 = j0_start; j0 < j0_stop; j0 += stride_j) { const int j = j0*(cpy_ne/2) + (stride_j == warp_size ? threadIdx.x : threadIdx.x % stride_j)*(cpy_ne/2); const half2 zero[cpy_ne/2] = {{0.0f, 0.0f}}; half2 tmp_h2[cpy_ne/2]; ggml_cuda_memcpy_1( tmp_h2, !oob_check || i < i_sup ? KV + i*stride_KV + j : zero); float2 tmp_f2[cpy_ne/2]; #pragma unroll for (int l = 0; l < cpy_ne/2; ++l) { tmp_f2[l] = __half22float2(tmp_h2[l]); } ggml_cuda_memcpy_1(tile_KV + i*(J + J_padding) + 2*j, tmp_f2); } } } }; // 1: max 32*16=512 bytes, 128 float // 2: max 16*16=256 bytes, 64 float // 3: max 8*16=128 bytes, 32 float // 4: max 4*16= 64 bytes, 16 float // 5: max 2*16= 32 bytes, 8 float static_assert(J % 8 == 0, "bad J"); static_assert(J % cpy_ne == 0, "bad J"); ggml_cuda_unroll<5>{}(load); } // Function that performs a single iteration in for the KQ matrix multiplication: template static __device__ __forceinline__ void flash_attn_tile_iter_KQ( T_vec_dot * const Q_tmp, const half2 * const __restrict__ K_h2, T_vec_dot * const KV_tmp, const int stride_K2, const int k_VKQ_0, const int k_VKQ_sup, const int k_KQ_0, float * KQ_acc) { constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; constexpr int ncols = ncols1*ncols2; constexpr int cpw = ncols > nwarps ? ncols/nwarps : 1; // Q columns per warp constexpr int np = nwarps > ncols ? nwarps/ncols : 1; // number of parallel warps per Q column flash_attn_tile_load_tile (K_h2 + int64_t(k_VKQ_0)*stride_K2 + k_KQ_0/2, KV_tmp, stride_K2, k_VKQ_sup); __syncthreads(); #ifdef FAST_FP16_AVAILABLE static_assert((nbatch_K/2) % cpy_ne == 0, "bad nbatch_K"); #pragma unroll for (int k_KQ_1 = 0; k_KQ_1 < nbatch_K/2; k_KQ_1 += cpy_ne) { half2 K_k[nbatch_fa/(np*warp_size)][cpy_ne]; half2 Q_k[cpw][cpy_ne]; #else static_assert(nbatch_K % cpy_ne == 0, "bad nbatch_K"); #pragma unroll for (int k_KQ_1 = 0; k_KQ_1 < nbatch_K; k_KQ_1 += cpy_ne) { float K_k[nbatch_fa/(np*warp_size)][cpy_ne]; float Q_k[cpw][cpy_ne]; #endif // FAST_FP16_AVAILABLE #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < nbatch_fa; i_KQ_0 += np*warp_size) { const int i_KQ = i_KQ_0 + (threadIdx.y % np)*warp_size + threadIdx.x; #ifdef FAST_FP16_AVAILABLE ggml_cuda_memcpy_1(&K_k[i_KQ_0/(np*warp_size)], &KV_tmp[i_KQ*(nbatch_K/2 + cpy_ne) + k_KQ_1]); #else ggml_cuda_memcpy_1(&K_k[i_KQ_0/(np*warp_size)], &KV_tmp[i_KQ*(nbatch_K + cpy_ne) + k_KQ_1]); #endif // FAST_FP16_AVAILABLE } #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { const int jc = jc0 + (threadIdx.y / np)*cpw; #ifdef FAST_FP16_AVAILABLE ggml_cuda_memcpy_1(&Q_k[jc0], &Q_tmp[jc*(DKQ/2) + k_KQ_0/2 + k_KQ_1]); #else ggml_cuda_memcpy_1(&Q_k[jc0], &Q_tmp[jc* DKQ + k_KQ_0 + k_KQ_1]); #endif // FAST_FP16_AVAILABLE } #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < nbatch_fa; i_KQ_0 += np*warp_size) { #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { #pragma unroll for (int k = 0; k < cpy_ne; ++k) { ggml_cuda_mad(KQ_acc[i_KQ_0/(np*warp_size)*cpw + jc0], K_k[i_KQ_0/(np*warp_size)][k], Q_k[jc0][k]); } } } } if (k_KQ_0 + nbatch_K < DKQ) { __syncthreads(); // Sync not needed on last iteration. } } // Function that performs a single iteration of the main loop over up to nbatch_fa tokens. template static __device__ __forceinline__ void flash_attn_tile_iter( T_vec_dot * const Q_tmp, const half2 * const __restrict__ K_h2, const half2 * const __restrict__ V_h2, const half * const __restrict__ mask, const uint3 ne01, const float logit_softcap, const float slope, T_KQ * const KQ, T_vec_dot * const KV_tmp, const int stride_K2, const int stride_V2, const int stride_mask, float * const KQ_max, float * const KQ_sum, T_acc * const VKQ, const int k_VKQ_0, const int k_VKQ_max, const int col_Q_0) { constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; constexpr int ncols = ncols1*ncols2; constexpr int cpw = ncols > nwarps ? ncols/nwarps : 1; // Q columns per warp constexpr int np = nwarps > ncols ? nwarps/ncols : 1; // number of parallel warps per Q column constexpr int DVp = (DV + 2*warp_size - 1) & ~(2*warp_size - 1); // DV padded to multiple of 2*warp_size. // KQ_cs == KQ chunk size, number of KQ values in j direction to store as one contiguous chunk in memory. // KQ is originally 2D but uses a Z-shaped 3D memory pattern like KQ[ncols/KQ_cs][DVp][KQ_cs]. #ifdef FAST_FP16_AVAILABLE constexpr int KQ_cs = cpw < 2*cpy_ne ? cpw : 2*cpy_ne; #else constexpr int KQ_cs = cpw < 1*cpy_ne ? cpw : 1*cpy_ne; #endif // FAST_FP16_AVAILABLE static_assert(cpw % KQ_cs == 0, "bad KQ_cs"); const int k_VKQ_sup = k_VKQ_max - k_VKQ_0; // k supremum, only smaller k values have valid KV data float KQ_max_new[cpw]; #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { KQ_max_new[jc0] = KQ_max[jc0]; } float KQ_acc[nbatch_fa/(np*warp_size) * cpw] = {0.0f}; // Accumulators for KQ matrix multiplication. // KQ = K @ Q matrix multiplication: constexpr int nbatch_K_last = DKQ % nbatch_K; #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < DKQ - nbatch_K_last; k_KQ_0 += nbatch_K) { flash_attn_tile_iter_KQ( Q_tmp, K_h2, KV_tmp, stride_K2, k_VKQ_0, k_VKQ_sup, k_KQ_0, KQ_acc); } if (nbatch_K_last > 0) { constexpr int k_KQ_0 = DKQ - nbatch_K_last; flash_attn_tile_iter_KQ( Q_tmp, K_h2, KV_tmp, stride_K2, k_VKQ_0, k_VKQ_sup, k_KQ_0, KQ_acc); } // Apply logit softcap + mask, update KQ_max: #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { const int j = fastmodulo(col_Q_0 + (jc0 + (threadIdx.y / np)*cpw)/ncols2, ne01); #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < nbatch_fa; i_KQ_0 += np*warp_size) { const int i_KQ = i_KQ_0 + (threadIdx.y % np)*warp_size + threadIdx.x; #if defined(FAST_FP16_AVAILABLE) && !defined(V_DOT2_F32_F16_AVAILABLE) // Without the v_dot2_f32_f16 instruction there is a higher risk of numerical overflow in the KQ calculation. // Therefore, scale down Q values and apply the inverse scale the FP32 KQ values afterwards again. KQ_acc[i_KQ_0/(np*warp_size)*cpw + jc0] *= 4.0f; #endif // defined(FAST_FP16_AVAILABLE) && !defined(V_DOT2_F32_F16_AVAILABLE) if (use_logit_softcap) { KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] = logit_softcap * tanhf(KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0]); } if (!oob_check || i_KQ < k_VKQ_sup) { KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] += (ncols2 > 1 || mask) ? slope*__half2float(mask[j*stride_mask + k_VKQ_0 + i_KQ]) : 0.0f; KQ_max_new[jc0] = fmaxf(KQ_max_new[jc0], KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] + FATTN_KQ_MAX_OFFSET); } } KQ_max_new[jc0] = warp_reduce_max(KQ_max_new[jc0]); } if constexpr (np == 1) { __syncthreads(); } else { static_assert(cpw == 1, "bad cpw"); __shared__ float KQ_max_new_shared[nwarps]; if (threadIdx.x == 0) { KQ_max_new_shared[threadIdx.y] = KQ_max_new[0]; } __syncthreads(); KQ_max_new[0] = KQ_max_new_shared[(threadIdx.y & ~(np-1)) + threadIdx.x % np]; KQ_max_new[0] = warp_reduce_max(KQ_max_new[0]); } // Calculate KQ softmax, write to shared KQ buffer, re-scale VKQ accumulators: #pragma unroll for (int jc0 = 0; jc0 < cpw; jc0 += KQ_cs) { #ifdef FAST_FP16_AVAILABLE half tmp[nbatch_fa/(np*warp_size)][KQ_cs]; #else float tmp[nbatch_fa/(np*warp_size)][KQ_cs]; #endif // FAST_FP16_AVAILABLE #pragma unroll for (int jc1 = 0; jc1 < KQ_cs; ++jc1) { const int jc = jc0 + jc1; const float KQ_max_scale = expf(KQ_max[jc] - KQ_max_new[jc]); KQ_max[jc] = KQ_max_new[jc]; float KQ_sum_add = 0.0f; #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += np*warp_size) { const float val = !oob_check || i0 + (threadIdx.y % np)*warp_size + threadIdx.x < static_cast(k_VKQ_sup) ? expf(KQ_acc[(i0/(np*warp_size))*cpw + jc] - KQ_max[jc]) : 0.0f; KQ_sum_add += val; tmp[i0/(np*warp_size)][jc1] = val; } KQ_sum[jc] = KQ_sum[jc]*KQ_max_scale + KQ_sum_add; #ifdef FAST_FP16_AVAILABLE const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale, KQ_max_scale); #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { VKQ[jc*((DVp/2)/warp_size) + i0/warp_size] *= KQ_max_scale_h2; } #else #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { VKQ[jc*((DVp/2)/warp_size) + i0/warp_size].x *= KQ_max_scale; VKQ[jc*((DVp/2)/warp_size) + i0/warp_size].y *= KQ_max_scale; } #endif // FAST_FP16_AVAILABLE } #pragma unroll for (int i0 = 0; i0 < nbatch_fa; i0 += np*warp_size) { const int i = i0 + (threadIdx.y % np)*warp_size + threadIdx.x; ggml_cuda_memcpy_1( KQ + (jc0/KQ_cs + (threadIdx.y / np)*(cpw/KQ_cs))*(nbatch_fa*KQ_cs) + i*KQ_cs, tmp[i0/(np*warp_size)]); } } // VKQ = V @ KQ matrix multiplication: static_assert(DV <= DKQ, "bad DV"); static_assert(DV % nbatch_K == 0 || (nbatch_K % 3 == 0 && DV % (nbatch_K*2/3) == 0), "bad nbatch_K"); constexpr int nbatch_V = (DV % nbatch_K == 0 ? nbatch_K : nbatch_K*2/3) * nbatch_fa / DV; // Number of V columns that fit in SRAM for K. static_assert(nbatch_fa % nbatch_V == 0, "bad nbatch_V"); static_assert(nbatch_V % np == 0, "bad nbatch_V"); #pragma unroll for (int k0 = 0; k0 < nbatch_fa; k0 += nbatch_V) { flash_attn_tile_load_tile (V_h2 + int64_t(k_VKQ_0 + k0)*stride_V2, KV_tmp, stride_V2, k_VKQ_sup - k0); __syncthreads(); #ifdef FAST_FP16_AVAILABLE #pragma unroll for (int k1 = 0; k1 < nbatch_V; k1 += np) { half2 V_k[(DVp/2)/warp_size]; half2 KQ_k[cpw]; constexpr int cpy_ne_D = cpy_ne/2 < (DVp/2)/warp_size ? cpy_ne/2 : (DVp/2)/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size*cpy_ne_D) { ggml_cuda_memcpy_1(&V_k[i0/warp_size], &KV_tmp[(k1 + threadIdx.y % np)*(DV/2) + i0 + threadIdx.x*cpy_ne_D]); } #pragma unroll for (int jc_VKQ_0 = 0; jc_VKQ_0 < cpw; jc_VKQ_0 += KQ_cs) { const int jc_KQ = jc_VKQ_0/KQ_cs + (threadIdx.y / np)*(cpw/KQ_cs); half tmp[KQ_cs]; ggml_cuda_memcpy_1( &tmp, KQ + jc_KQ*(nbatch_fa*KQ_cs) + (k0 + k1 + threadIdx.y % np)*KQ_cs); #pragma unroll for (int jc_VKQ_1 = 0; jc_VKQ_1 < KQ_cs; ++jc_VKQ_1) { KQ_k[jc_VKQ_0+jc_VKQ_1] = __half2half2(tmp[jc_VKQ_1]); } } #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { #pragma unroll for (int jc_VKQ_0 = 0; jc_VKQ_0 < cpw; ++jc_VKQ_0) { VKQ[jc_VKQ_0*((DVp/2)/warp_size) + i0/warp_size] += V_k[i0/warp_size]*KQ_k[jc_VKQ_0]; } } } #else #pragma unroll for (int k1 = 0; k1 < nbatch_V; k1 += np) { float2 V_k[(DVp/2)/warp_size]; float KQ_k[cpw]; constexpr int cpy_ne_D = cpy_ne < DVp/warp_size ? cpy_ne : DVp/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp; i0 += warp_size*cpy_ne_D) { ggml_cuda_memcpy_1(&V_k[i0/(2*warp_size)], &KV_tmp[(k1 + threadIdx.y % np)*DV + i0 + threadIdx.x*cpy_ne_D]); } #pragma unroll for (int jc_VKQ_0 = 0; jc_VKQ_0 < cpw; jc_VKQ_0 += KQ_cs) { const int jc_KQ = jc_VKQ_0/KQ_cs + (threadIdx.y / np)*(cpw/KQ_cs); ggml_cuda_memcpy_1( &KQ_k[jc_VKQ_0], KQ + jc_KQ*(nbatch_fa*KQ_cs) + (k0 + k1 + threadIdx.y % np)*KQ_cs); } #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { #pragma unroll for (int jc_VKQ_0 = 0; jc_VKQ_0 < cpw; ++jc_VKQ_0) { VKQ[jc_VKQ_0*((DVp/2)/warp_size) + i0/warp_size].x += V_k[i0/warp_size].x*KQ_k[jc_VKQ_0]; VKQ[jc_VKQ_0*((DVp/2)/warp_size) + i0/warp_size].y += V_k[i0/warp_size].y*KQ_k[jc_VKQ_0]; } } } #endif // FAST_FP16_AVAILABLE __syncthreads(); } } template // D == head size __launch_bounds__(ggml_cuda_fattn_tile_get_nthreads(DKQ, DV, ncols1*ncols2), ggml_cuda_fattn_tile_get_occupancy(DKQ, DV, ncols1*ncols2)) static __global__ void flash_attn_tile( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, const char * __restrict__ sinks, const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, const float max_bias, const float m0, const float m1, const uint32_t n_head_log2, const float logit_softcap, const int32_t ne00, const uint3 ne01, const int32_t ne02, const int32_t ne03, const int32_t nb01, const int32_t nb02, const int32_t nb03, const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, const int32_t nb11, const int32_t nb12, const int64_t nb13, const int32_t nb21, const int32_t nb22, const int64_t nb23, const int32_t ne31, const int32_t ne32, const int32_t ne33, const int32_t nb31, const int32_t nb32, const int64_t nb33) { #ifdef FLASH_ATTN_AVAILABLE // Skip unused kernel variants for faster compilation: if ( #ifdef GGML_USE_WMMA_FATTN (ncols2 != 1 && DV != 40 && DV != 72 && DV != 512) || #endif // GGML_USE_WMMA_FATTN (use_logit_softcap && !(DV == 128 || DV == 256)) ) { GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; return; } static_assert(ggml_cuda_fattn_tile_get_config(DKQ, DV, ncols1*ncols2) != 0, "kernel config not defined"); constexpr int ncols = ncols1*ncols2; constexpr int warp_size = 32; constexpr int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, ncols1*ncols2) / warp_size; constexpr int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, ncols1*ncols2); constexpr int nbatch_K = ggml_cuda_fattn_tile_get_nbatch_K (DKQ, DV, ncols1*ncols2); // In this kernel Q, K, V are matrices while i, j, k are matrix indices. const int col_Q_0 = blockIdx.x * ncols1; // Index of the first Q column for this CUDA block to work on. const int sequence = blockIdx.z / (ne02/ncols2); const int head0 = blockIdx.z*ncols2 - sequence*ne02; // == blockIdx.z % (ne02/ncols2) const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. const float * Q_f = (const float *) (Q + nb03*sequence + nb02* head0); const half2 * K_h2 = (const half2 *) (K + nb13*sequence + nb12*(head0 / gqa_ratio)); const half2 * V_h2 = (const half2 *) (V + nb23*sequence + nb22*(head0 / gqa_ratio)); // K and V have same shape const half * maskh = mask ? (const half *) (mask + nb33*(sequence % ne33)) : nullptr; const int stride_K2 = nb11 / sizeof(half2); const int stride_V2 = nb21 / sizeof(half2); const int stride_mask = nb31 / sizeof(half); const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, head0, n_head_log2, m0, m1) : 1.0f; constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; constexpr int cpw = ncols > nwarps ? ncols/nwarps : 1; // Q columns per warp. constexpr int np = nwarps > ncols ? nwarps/ncols : 1; // Number of parallel warps per Q column. static_assert(cpw == 1 || np == 1, "bad cpw / np"); static_assert(nbatch_fa % (np*warp_size) == 0, "nbatch_fa % (np*warp_size) != 0"); constexpr int DKQp = (DKQ + 2*warp_size - 1) & ~(2*warp_size - 1); // DKQ padded to multiple of 2*warp_size. constexpr int DVp = (DV + 2*warp_size - 1) & ~(2*warp_size - 1); // DV padded to multiple of 2*warp_size. // Q_tmp == SRAM buffer to hold Q data for the entire lifetime of the kernel. // KV_tmp == SRAM buffer to hold fragments of K/V data while iterating over ne11. // KV_tmp is padded to avoid memory conflicts for K (cpy_ne) and OOB accesses for V (DVp-DV). // KQ == SRAM buffer to hold KQ fragments between KQ and VKQ matrix multiplications. // VKQ == Accumulators in registers for the final VKQ result. #ifdef FAST_FP16_AVAILABLE __shared__ half2 Q_tmp[ncols * DKQ/2]; __shared__ half2 KV_tmp[nbatch_fa * (nbatch_K/2 + cpy_ne) + DVp-DV]; __shared__ half KQ[ncols * nbatch_fa]; half2 VKQ[cpw * ((DVp/2)/warp_size)] = {{0.0f, 0.0f}}; #else __shared__ float Q_tmp[ncols * DKQ]; __shared__ float KV_tmp[nbatch_fa * (nbatch_K + cpy_ne) + DVp-DV]; __shared__ float KQ[ncols * nbatch_fa]; float2 VKQ[cpw * ((DVp/2)/warp_size)] = {{0.0f, 0.0f}}; #endif // FAST_FP16_AVAILABLE float KQ_max[cpw]; #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { KQ_max[j0/nwarps] = -FLT_MAX/2.0f; } float KQ_sum[cpw] = {0.0f}; // Load Q data, convert to FP16 if fast: #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { const int jc = jc0 + (threadIdx.y / np)*cpw; const int j = jc / ncols2; const int c = jc % ncols2; constexpr int cpy_ne_D = cpy_ne < DKQp/warp_size ? cpy_ne : DKQp/warp_size; #pragma unroll for (int i0 = 0; i0 < DKQp; i0 += np*warp_size*cpy_ne_D) { if (i0 + np*warp_size*cpy_ne_D <= DKQ || i0 + (threadIdx.y % np)*(warp_size*cpy_ne_D) + threadIdx.x*cpy_ne_D < DKQ) { float tmp_f[cpy_ne_D] = {0.0f}; ggml_cuda_memcpy_1 (tmp_f, &Q_f[c*(nb02/sizeof(float)) + fastmodulo(col_Q_0 + j, ne01)*(nb01/sizeof(float)) + i0 + (threadIdx.y % np)*(warp_size*cpy_ne_D) + threadIdx.x*cpy_ne_D]); #pragma unroll for (int i1 = 0; i1 < cpy_ne_D; ++i1) { tmp_f[i1] *= scale; } #ifdef FAST_FP16_AVAILABLE half2 tmp_h2[cpy_ne_D/2]; #pragma unroll for (int i1 = 0; i1 < cpy_ne_D; i1 += 2) { tmp_h2[i1/2] = make_half2(tmp_f[i1 + 0], tmp_f[i1 + 1]); #if defined(FAST_FP16_AVAILABLE) && !defined(V_DOT2_F32_F16_AVAILABLE) // Without the v_dot2_f32_f16 instruction there is a higher risk of numerical overflow in the KQ calculation. // Therefore, scale down Q values and apply the inverse scale the FP32 KQ values afterwards again. tmp_h2[i1/2] *= make_half2(0.25f, 0.25f); #endif // defined(FAST_FP16_AVAILABLE) && !defined(V_DOT2_F32_F16_AVAILABLE) } ggml_cuda_memcpy_1( &Q_tmp[jc*(DKQ/2) + i0/2 + (threadIdx.y % np)*(warp_size*cpy_ne_D/2) + threadIdx.x*(cpy_ne_D/2)], tmp_h2); #else ggml_cuda_memcpy_1( &Q_tmp[jc* DKQ + i0 + (threadIdx.y % np)*(warp_size*cpy_ne_D) + threadIdx.x* cpy_ne_D], tmp_f); #endif // FAST_FP16_AVAILABLE } } } __syncthreads(); // Main loop over KV cache: const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; if (ncols2 == 1) { // Branch with out-of-bounds checks. int k_VKQ_0 = blockIdx.y*nbatch_fa; while (k_VKQ_0 < k_VKQ_max - nbatch_fa) { constexpr bool oob_check = false; flash_attn_tile_iter (Q_tmp, K_h2, V_h2, maskh, ne01, logit_softcap, slope, KQ, KV_tmp, stride_K2, stride_V2, stride_mask, KQ_max, KQ_sum, VKQ, k_VKQ_0, k_VKQ_max, col_Q_0); k_VKQ_0 += gridDim.y*nbatch_fa; } if (k_VKQ_0 < k_VKQ_max) { constexpr bool oob_check = true; flash_attn_tile_iter (Q_tmp, K_h2, V_h2, maskh, ne01, logit_softcap, slope, KQ, KV_tmp, stride_K2, stride_V2, stride_mask, KQ_max, KQ_sum, VKQ, k_VKQ_0, k_VKQ_max, col_Q_0); } } else { // Branch without out-of-bounds checks. for (int k_VKQ_0 = blockIdx.y*nbatch_fa; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*nbatch_fa) { constexpr bool oob_check = false; flash_attn_tile_iter (Q_tmp, K_h2, V_h2, maskh, ne01, logit_softcap, slope, KQ, KV_tmp, stride_K2, stride_V2, stride_mask, KQ_max, KQ_sum, VKQ, k_VKQ_0, k_VKQ_max, col_Q_0); } } #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { KQ_sum[jc0] = warp_reduce_sum(KQ_sum[jc0]); } if constexpr (np > 1) { static_assert(cpw == 1, "bad cpw"); static_assert(nbatch_fa*nbatch_K >= nwarps*DVp, "KV_tmp too small"); #ifdef FAST_FP16_AVAILABLE half2 * VKQ_combine = (half2 *) KV_tmp; #else float * VKQ_combine = (float *) KV_tmp; #endif // FAST_FP16_AVAILABLE float * KQ_sum_combine = (float *) Q_tmp; if (threadIdx.y % np != 0) { #ifdef FAST_FP16_AVAILABLE constexpr int cpy_ne_D = cpy_ne < (DVp/2)/warp_size ? cpy_ne : (DVp/2)/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size*cpy_ne_D) { ggml_cuda_memcpy_1(&VKQ_combine[threadIdx.y*(DVp/2) + i0 + threadIdx.x*cpy_ne_D], &VKQ[i0/warp_size]); } #else constexpr int cpy_ne_D = cpy_ne < DVp/warp_size ? cpy_ne : DVp/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp; i0 += warp_size*cpy_ne_D) { ggml_cuda_memcpy_1( &VKQ_combine[threadIdx.y*DVp + i0 + threadIdx.x*cpy_ne_D], ((const float *) VKQ) + i0/warp_size); } #endif // FAST_FP16_AVAILABLE if (threadIdx.x == 0) { KQ_sum_combine[threadIdx.y] = KQ_sum[0]; } return; } __syncthreads(); #pragma unroll for (int ip = 1; ip < np; ++ip) { #ifdef FAST_FP16_AVAILABLE constexpr int cpy_ne_D = cpy_ne < (DVp/2)/warp_size ? cpy_ne : (DVp/2)/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size*cpy_ne_D) { half2 tmp[cpy_ne_D]; ggml_cuda_memcpy_1(tmp, &VKQ_combine[(threadIdx.y + ip)*(DVp/2) + i0 + threadIdx.x*cpy_ne_D]); #pragma unroll for (int i1 = 0; i1 < cpy_ne_D; ++i1) { VKQ[i0/warp_size + i1] += tmp[i1]; } } #else constexpr int cpy_ne_D = cpy_ne < DVp/warp_size ? cpy_ne : DVp/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp; i0 += warp_size*cpy_ne_D) { float tmp[cpy_ne_D]; ggml_cuda_memcpy_1(tmp, &VKQ_combine[(threadIdx.y + ip)*DVp + i0 + threadIdx.x*cpy_ne_D]); #pragma unroll for (int i1 = 0; i1 < cpy_ne_D; ++i1) { ((float *)VKQ)[i0/warp_size + i1] += tmp[i1]; } } #endif // FAST_FP16_AVAILABLE KQ_sum[0] += KQ_sum_combine[threadIdx.y + ip]; } } // Attention sink: adjust KQ max and sum only for the first of all parallel blocks: if (sinks && blockIdx.y == 0) { #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { const int jc = jc0 + (threadIdx.y/np)*cpw; const float sink = ((const float *) sinks)[head0 + jc % ncols2]; float KQ_max_new_j = fmaxf(KQ_max[jc0], sink); const float KQ_max_scale = expf(KQ_max[jc0] - KQ_max_new_j); KQ_max[jc0] = KQ_max_new_j; const float val = expf(sink - KQ_max[jc0]); KQ_sum[jc0] = KQ_sum[jc0]*KQ_max_scale + val; #ifdef FAST_FP16_AVAILABLE const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale, KQ_max_scale); #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { VKQ[jc0*((DVp/2)/warp_size) + i0/warp_size] *= KQ_max_scale_h2; } #else #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size) { VKQ[jc0*((DVp/2)/warp_size) + i0/warp_size].x *= KQ_max_scale; VKQ[jc0*((DVp/2)/warp_size) + i0/warp_size].y *= KQ_max_scale; } #endif // FAST_FP16_AVAILABLE } } // Write back results: #pragma unroll for (int jc0 = 0; jc0 < cpw; ++jc0) { const int jc = jc0 + (threadIdx.y/np)*cpw; const int j = jc / ncols2; const int c = jc % ncols2; if (ncols1 > 1 && col_Q_0 + j >= int(ne01.z)) { return; } const float scale = gridDim.y == 1 ? 1.0f/KQ_sum[jc0] : 1.0f; const int j_dst_unrolled = ((sequence*int(ne01.z) + col_Q_0 + j)*ne02 + head0 + c)*gridDim.y + blockIdx.y; #ifdef FAST_FP16_AVAILABLE constexpr int cpy_ne_D = cpy_ne/2 < (DVp/2)/warp_size ? cpy_ne/2 : (DVp/2)/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp/2; i0 += warp_size*cpy_ne_D) { float2 tmp[cpy_ne_D]; #pragma unroll for (int i1 = 0; i1 < cpy_ne_D; ++i1) { tmp[i1] = __half22float2(VKQ[jc0*((DVp/2)/warp_size) + i0/warp_size + i1]); tmp[i1].x *= scale; tmp[i1].y *= scale; } if (i0 + warp_size*cpy_ne_D <= DV/2 || i0 + threadIdx.x*cpy_ne_D < DV/2) { ggml_cuda_memcpy_1(&dst[j_dst_unrolled*DV + 2*i0 + threadIdx.x*(2*cpy_ne_D)], tmp); } } #else constexpr int cpy_ne_D = cpy_ne < DVp/warp_size ? cpy_ne : DVp/warp_size; #pragma unroll for (int i0 = 0; i0 < DVp; i0 += warp_size*cpy_ne_D) { if (i0 + warp_size*cpy_ne_D <= DV || i0 + threadIdx.x*cpy_ne_D < DV) { #pragma unroll for (int i1 = 0; i1 < cpy_ne_D/2; ++i1) { VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size) + i1].x *= scale; VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size) + i1].y *= scale; } ggml_cuda_memcpy_1( &dst[j_dst_unrolled*DV + i0 + threadIdx.x*cpy_ne_D], &VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size)]); } } #endif // FAST_FP16_AVAILABLE if (gridDim.y != 1 && threadIdx.x == 0) { dst_meta[j_dst_unrolled] = make_float2(KQ_max[jc0], KQ_sum[jc0]); } } #else GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; #endif // FLASH_ATTN_AVAILABLE } template static void launch_fattn_tile_switch_ncols1(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * Q = dst->src[0]; const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const int warp_size = 32; constexpr size_t nbytes_shared = 0; #ifdef GGML_USE_HIP if constexpr (DV <= 128) { if (Q->ne[1] > 32/ncols2) { constexpr int cols_per_block = 64; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } } #endif // GGML_USE_HIP #ifndef GGML_USE_HIP if constexpr (DV <= 256) #endif // GGML_USE_HIP { if (Q->ne[1] > 16/ncols2) { constexpr int cols_per_block = 32; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } } if (Q->ne[1] > 8/ncols2) { constexpr int cols_per_block = 16; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } if constexpr (ncols2 <= 8) { if (Q->ne[1] > 4/ncols2) { constexpr int cols_per_block = 8; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } } if constexpr (ncols2 <= 4) { if (Q->ne[1] > 2/ncols2) { constexpr int cols_per_block = 4; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } } if constexpr (ncols2 <= 2) { constexpr int cols_per_block = 2; const int nwarps = ggml_cuda_fattn_tile_get_nthreads (DKQ, DV, cols_per_block, cc) / warp_size; const int nbatch_fa = ggml_cuda_fattn_tile_get_nbatch_fa(DKQ, DV, cols_per_block, cc); fattn_kernel_t fattn_kernel = flash_attn_tile; launch_fattn (ctx, dst, fattn_kernel, nwarps, nbytes_shared, nbatch_fa, true, true, false, warp_size); return; } GGML_ABORT("fatal error"); } template static void launch_fattn_tile_switch_ncols2(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * mask = dst->src[3]; float max_bias = 0.0f; memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); GGML_ASSERT(Q->ne[2] % K->ne[2] == 0); const int gqa_ratio = Q->ne[2] / K->ne[2]; const bool nvidia = GGML_CUDA_CC_IS_NVIDIA(ggml_cuda_info().devices[ggml_cuda_get_device()].cc); const int gqa_limit = nvidia && gqa_ratio <= 4 ? 16 : INT_MAX; const bool use_gqa_opt = mask && max_bias == 0.0f && Q->ne[1] <= gqa_limit && K->ne[1] % FATTN_KQ_STRIDE == 0; if constexpr (DV == 512) { if (use_gqa_opt && gqa_ratio % 16 == 0) { launch_fattn_tile_switch_ncols1(ctx, dst); return; } } if constexpr (DV <= 256) { if (use_gqa_opt && gqa_ratio % 8 == 0) { launch_fattn_tile_switch_ncols1(ctx, dst); return; } if (use_gqa_opt && gqa_ratio % 4 == 0) { launch_fattn_tile_switch_ncols1(ctx, dst); return; } if (use_gqa_opt && gqa_ratio % 2 == 0) { launch_fattn_tile_switch_ncols1(ctx, dst); return; } launch_fattn_tile_switch_ncols1(ctx, dst); return; } GGML_ABORT("fatal error"); } template void ggml_cuda_flash_attn_ext_tile_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; launch_fattn_tile_switch_ncols2(ctx, dst); } else { constexpr bool use_logit_softcap = true; launch_fattn_tile_switch_ncols2(ctx, dst); } } void ggml_cuda_flash_attn_ext_tile(ggml_backend_cuda_context & ctx, ggml_tensor * dst); #define DECL_FATTN_TILE_CASE(DKQ, DV) \ template void ggml_cuda_flash_attn_ext_tile_case \ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \ extern DECL_FATTN_TILE_CASE( 40, 40); extern DECL_FATTN_TILE_CASE( 64, 64); extern DECL_FATTN_TILE_CASE( 72, 72); extern DECL_FATTN_TILE_CASE( 80, 80); extern DECL_FATTN_TILE_CASE( 96, 96); extern DECL_FATTN_TILE_CASE(112, 112); extern DECL_FATTN_TILE_CASE(128, 128); extern DECL_FATTN_TILE_CASE(256, 256); extern DECL_FATTN_TILE_CASE(576, 512); ggml-org-ggml-3678254/src/ggml-cuda/fattn-vec.cuh000066400000000000000000000561231512524704700214120ustar00rootroot00000000000000#include "common.cuh" #include "fattn-common.cuh" static int ggml_cuda_fattn_vec_get_nthreads_host(const int cc) { return 128; GGML_UNUSED(cc); } static constexpr __device__ int ggml_cuda_fattn_vec_get_nthreads_device() { return 128; } // Currenlty llvm with the amdgcn target dose not support unrolling loops // that contain a break that can not be resolved at compile time. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" #endif // __clang__ template // D == head size __launch_bounds__(ggml_cuda_fattn_vec_get_nthreads_device(), 1) static __global__ void flash_attn_ext_vec( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, const char * __restrict__ sinks, const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, const float max_bias, const float m0, const float m1, const uint32_t n_head_log2, const float logit_softcap, const int32_t ne00, const uint3 ne01, const int32_t ne02, const int32_t ne03, const int32_t nb01, const int32_t nb02, const int32_t nb03, const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, const int32_t nb11, const int32_t nb12, const int64_t nb13, const int32_t nb21, const int32_t nb22, const int64_t nb23, const int32_t ne31, const int32_t ne32, const int32_t ne33, const int32_t nb31, const int32_t nb32, const int64_t nb33) { #ifdef FLASH_ATTN_AVAILABLE // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(D == 128 || D == 256)) { GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; return; } //In this kernel Q, K, V are matrices while i, j, k are matrix indices. constexpr int cpy_nb = ggml_cuda_get_max_cpy_bytes(); constexpr int cpy_ne = cpy_nb / 4; #ifdef GGML_USE_HIP #ifdef RDNA constexpr int nthreads_KQ_q = 2; #else constexpr int nthreads_KQ_q = 4; #endif // RDNA constexpr int nthreads_V_q = (D/4 < 32 ? D/4 : 32); #else constexpr int nthreads_KQ_q = (D/4 < 32 ? D/4 : 32); constexpr int nthreads_V_q = (D/4 < 32 ? D/4 : 32); #endif // GGML_USE_HIP constexpr int nthreads = ggml_cuda_fattn_vec_get_nthreads_device(); constexpr int nthreads_KQ = type_K == GGML_TYPE_F16 ? 128 / cpy_nb : nthreads_KQ_q; constexpr int nthreads_V = type_V == GGML_TYPE_F16 ? 128 / cpy_nb : nthreads_V_q; static_assert(WARP_SIZE % nthreads_KQ == 0, "bad nthreads_K"); static_assert(WARP_SIZE % nthreads_V == 0, "bad nthreads_V"); constexpr int V_rows_per_thread = type_V == GGML_TYPE_F16 ? 2*cpy_ne : 4; constexpr int V_cols_per_iter = WARP_SIZE / nthreads_V; constexpr vec_dot_KQ_t vec_dot_KQ = get_vec_dot_KQ(); constexpr bool Q_q8_1 = type_K != GGML_TYPE_F16; #ifdef V_DOT2_F32_F16_AVAILABLE constexpr dequantize_V_t dequantize_V = get_dequantize_V(); #else constexpr dequantize_V_t dequantize_V = get_dequantize_V(); #endif // V_DOT2_F32_F16_AVAILABLE const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. const int sequence = blockIdx.z / ne02; const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. Q += nb03*sequence + nb02* head + nb01*ic0; K += nb13*sequence + nb12*(head / gqa_ratio); V += nb23*sequence + nb22*(head / gqa_ratio); const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); const float slope = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); constexpr int nwarps = nthreads / WARP_SIZE; const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; __builtin_assume(tid < nthreads); constexpr int ne_KQ = ncols*D; constexpr int ne_combine = nwarps*V_cols_per_iter*D; #ifdef V_DOT2_F32_F16_AVAILABLE half2 VKQ[ncols][(D/2)/nthreads_V] = {{{0.0f, 0.0f}}}; __shared__ half KQ[ne_KQ > ne_combine ? ne_KQ : ne_combine]; #else float2 VKQ[ncols][(D/2)/nthreads_V] = {{{0.0f, 0.0f}}}; __shared__ float KQ[ne_KQ > ne_combine ? ne_KQ : ne_combine]; #endif // V_DOT2_F32_F16_AVAILABLE float KQ_max[ncols]; float KQ_sum[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { KQ_max[j] = -FLT_MAX/2.0f; KQ_sum[j] = 0.0f; } // Convert Q to float2 (f16 K) or q8_1 (quantized K) and store in registers: #ifdef V_DOT2_F32_F16_AVAILABLE half2 Q_reg[ncols][(D/2)/nthreads_KQ]; // Will be initialized completely. #else float2 Q_reg[ncols][(D/2)/nthreads_KQ] = {{{0.0f, 0.0f}}}; // May be only partially initialized. #endif // V_DOT2_F32_F16_AVAILABLE int Q_i32[ncols][1 > D/(sizeof(int)*nthreads_KQ) ? 1 : D/(sizeof(int)*nthreads_KQ)]; float2 Q_ds[ncols][1 > D/(sizeof(int)*nthreads_KQ) ? 1 : D/(sizeof(int)*nthreads_KQ)]; if constexpr (Q_q8_1) { #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j0 + nwarps > ncols && j >= ncols) { break; } // Reuse KQ as temporary storage for converting Q to q8_1: int * tmp_q_i32 = (int *) &KQ[j*D]; float2 * tmp_q_ds = (float2 *) (tmp_q_i32 + D/sizeof(int)); // Set memory to zero if out of bounds: if (ncols > 1 && ic0 + j >= int(ne01.z)) { #pragma unroll for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += WARP_SIZE) { const int i = i0 + threadIdx.x; if (i0 + WARP_SIZE <= int(D/sizeof(int)) || i < int(D/sizeof(int))) { tmp_q_i32[i] = 0; } } if (threadIdx.x < D/QK8_1) { tmp_q_ds[threadIdx.x] = make_float2(0.0f, 0.0f); } } else { const float * Q_f = (const float *) (Q + j*nb01); constexpr int nthreads_quantize = D/sizeof(int) < WARP_SIZE ? D/sizeof(int) : WARP_SIZE; #pragma unroll for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += nthreads_quantize) { quantize_q8_1_to_shared (Q_f + i0*sizeof(int), scale, tmp_q_i32 + i0, tmp_q_ds + i0/QI8_1); } } } __syncthreads(); #pragma unroll for (int j = 0; j < ncols; ++j) { int * tmp_q_i32 = (int *) &KQ[j*D]; float2 * tmp_q_ds = (float2 *) (tmp_q_i32 + D/sizeof(int)); #pragma unroll for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += nthreads_KQ) { const int i = i0 + (nthreads_KQ == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_KQ); Q_i32[j][i0/nthreads_KQ] = tmp_q_i32[i]; Q_ds[j][i0/nthreads_KQ] = tmp_q_ds[i/QI8_1]; } } __syncthreads(); } else { #ifdef V_DOT2_F32_F16_AVAILABLE const half2 scale_h2 = make_half2(scale, scale); #pragma unroll for (int j = 0; j < ncols; ++j) { const float2 * Q_j = (const float2 *) (Q + j*nb01); #pragma unroll for (int i0 = 0; i0 < D/2; i0 += nthreads_KQ*cpy_ne) { const int i = i0 + (nthreads_KQ == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_KQ)*cpy_ne; float2 tmp[cpy_ne] = {{0.0f, 0.0f}}; if (ncols == 1 || ic0 + j < int(ne01.z)) { ggml_cuda_memcpy_1(tmp, &Q_j[i]); ggml_cuda_memcpy_1(tmp + cpy_ne/2, &Q_j[i + cpy_ne/2]); } #pragma unroll for (int i1 = 0; i1 < cpy_ne; ++i1) { Q_reg[j][i0/nthreads_KQ + i1] = make_half2(tmp[i1].x, tmp[i1].y); } } #pragma unroll for (int k = 0; k < (D/2)/nthreads_KQ; ++k) { Q_reg[j][k] *= scale_h2; } } #else #pragma unroll for (int j = 0; j < ncols; ++j) { const float2 * Q_j = (const float2 *) (Q + j*nb01); #pragma unroll for (int i0 = 0; i0 < D/2; i0 += nthreads_KQ*cpy_ne) { const int i = i0 + (nthreads_KQ == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_KQ)*cpy_ne; if (ncols == 1 || ic0 + j < int(ne01.z)) { ggml_cuda_memcpy_1(&Q_reg[j][i0/nthreads_KQ], &Q_j[i]); ggml_cuda_memcpy_1(&Q_reg[j][i0/nthreads_KQ + cpy_ne/2], &Q_j[i + cpy_ne/2]); } } #pragma unroll for (int k = 0; k < (D/2)/nthreads_KQ; ++k) { Q_reg[j][k].x *= scale; Q_reg[j][k].y *= scale; } } #endif // V_DOT2_F32_F16_AVAILABLE } const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; K += blockIdx.y*nthreads * nb11; V += blockIdx.y*nthreads * nb21; maskh += blockIdx.y*nthreads; for (int k_VKQ_0 = blockIdx.y*nthreads; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*nthreads, // Increment pointers after each loop: K += gridDim.y*nthreads*nb11, V += gridDim.y*nthreads*nb21, maskh += gridDim.y*nthreads) { // Calculate KQ tile and keep track of new maximum KQ values: float KQ_reg[ncols]; // KQ in registers. float KQ_max_new[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { KQ_max_new[j] = KQ_max[j]; } #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < nthreads_KQ; ++i_KQ_0) { const int i_KQ = threadIdx.y*WARP_SIZE + (nthreads_KQ == WARP_SIZE ? 0 : (threadIdx.x & ~(nthreads_KQ-1))) + i_KQ_0; #pragma unroll for (int j = 0; j < ncols; ++j) { float sum = vec_dot_KQ(K + i_KQ*nb11, Q_reg[j], Q_i32[j], Q_ds[j]); sum = warp_reduce_sum(sum); if (use_logit_softcap) { sum = logit_softcap*tanhf(sum); } if (mask && (ncols == 1 || ic0 + j < int(ne01.z))) { sum += slope*__half2float(maskh[j*ne11 + i_KQ]); } KQ_max_new[j] = fmaxf(KQ_max_new[j], sum + FATTN_KQ_MAX_OFFSET); if ((nthreads_KQ == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_KQ) == uint32_t(i_KQ_0)) { KQ_reg[j] = sum; } } } #pragma unroll for (int j = 0; j < ncols; ++j) { #pragma unroll for (int offset = nthreads_KQ; offset < WARP_SIZE; offset <<= 1) { KQ_max_new[j] = fmaxf(KQ_max_new[j], __shfl_xor_sync(0xFFFFFFFF, KQ_max_new[j], offset, WARP_SIZE)); } const float KQ_max_scale = expf(KQ_max[j] - KQ_max_new[j]); KQ_max[j] = KQ_max_new[j]; KQ_reg[j] = expf(KQ_reg[j] - KQ_max[j]); KQ_sum[j] = KQ_sum[j]*KQ_max_scale + KQ_reg[j]; KQ[j*nthreads + tid] = KQ_reg[j]; #ifdef V_DOT2_F32_F16_AVAILABLE const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale, KQ_max_scale); #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j][i_VKQ_0/nthreads_V] *= KQ_max_scale_h2; } #else #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j][i_VKQ_0/nthreads_V].x *= KQ_max_scale; VKQ[j][i_VKQ_0/nthreads_V].y *= KQ_max_scale; } #endif // V_DOT2_F32_F16_AVAILABLE } #ifndef GGML_USE_HIP __syncwarp(); #endif // GGML_USE_HIP #pragma unroll for (int k0 = 0; k0 < WARP_SIZE; k0 += V_cols_per_iter) { const int k = threadIdx.y*WARP_SIZE + k0 + (nthreads_V == WARP_SIZE ? 0 : threadIdx.x / nthreads_V); #ifdef V_DOT2_F32_F16_AVAILABLE half2 KQ_k[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { KQ_k[j] = __half2half2(KQ[j*nthreads + k]); } #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V*V_rows_per_thread/2) { half2 tmp[V_rows_per_thread/2]; dequantize_V(V + k*nb21, tmp, 2*i_VKQ_0 + (nthreads_V == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_V)*V_rows_per_thread); #pragma unroll for (int i_VKQ_1 = 0; i_VKQ_1 < V_rows_per_thread/2; ++i_VKQ_1) { #pragma unroll for (int j = 0; j < ncols; ++j) { VKQ[j][i_VKQ_0/nthreads_V + i_VKQ_1] += tmp[i_VKQ_1]*KQ_k[j]; } } } #else float KQ_k[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { KQ_k[j] = KQ[j*nthreads + k]; } #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V*V_rows_per_thread/2) { float2 tmp[V_rows_per_thread/2]; dequantize_V(V + k*nb21, tmp, 2*i_VKQ_0 + (nthreads_V == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_V)*V_rows_per_thread); #pragma unroll for (int i_VKQ_1 = 0; i_VKQ_1 < V_rows_per_thread/2; ++i_VKQ_1) { #pragma unroll for (int j = 0; j < ncols; ++j) { VKQ[j][i_VKQ_0/nthreads_V + i_VKQ_1].x += tmp[i_VKQ_1].x*KQ_k[j]; VKQ[j][i_VKQ_0/nthreads_V + i_VKQ_1].y += tmp[i_VKQ_1].y*KQ_k[j]; } } } #endif // V_DOT2_F32_F16_AVAILABLE } } if (sinks && blockIdx.y == 0) { const float sink = ((const float *) sinks)[head]; #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j0 + nwarps > ncols && j >= ncols) { break; } const float kqmax_new_j = fmaxf(sink, KQ_max[j]); const float KQ_max_scale = expf(KQ_max[j] - kqmax_new_j); KQ_max[j] = kqmax_new_j; KQ_sum[j] = KQ_sum[j]*KQ_max_scale + (threadIdx.x == 0 ? expf(sink - KQ_max[j]) : 0.0f); #ifdef V_DOT2_F32_F16_AVAILABLE const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale, KQ_max_scale); #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j][i_VKQ_0/nthreads_V] *= KQ_max_scale_h2; } #else #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j][i_VKQ_0/nthreads_V].x *= KQ_max_scale; VKQ[j][i_VKQ_0/nthreads_V].y *= KQ_max_scale; } #endif // V_DOT2_F32_F16_AVAILABLE } } __shared__ float KQ_max_shared[ncols][WARP_SIZE]; __shared__ float KQ_sum_shared[ncols][WARP_SIZE]; #pragma unroll for (int j = 0; j < ncols; ++j) { if (threadIdx.y == 0) { KQ_max_shared[j][threadIdx.x] = -FLT_MAX/2.0f; KQ_sum_shared[j][threadIdx.x] = 0.0f; } } __syncthreads(); #pragma unroll for (int j = 0; j < ncols; ++j) { if (threadIdx.x == 0) { KQ_max_shared[j][threadIdx.y] = KQ_max[j]; } } __syncthreads(); #pragma unroll for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) { if (ncols > 1 && ic0 + j_VKQ >= int(ne01.z)) { break; } float kqmax_new = KQ_max_shared[j_VKQ][threadIdx.x]; kqmax_new = warp_reduce_max(kqmax_new); const float kqmax_scale = expf(KQ_max[j_VKQ] - kqmax_new); KQ_max[j_VKQ] = kqmax_new; #ifdef V_DOT2_F32_F16_AVAILABLE half2 * VKQ_tmp = (half2 *) KQ + threadIdx.y*(V_cols_per_iter*D/2) + (nthreads_V == WARP_SIZE ? 0 : threadIdx.x / nthreads_V)*(D/2); const half2 kqmax_scale_h2 = make_half2(kqmax_scale, kqmax_scale); #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j_VKQ][i_VKQ_0/nthreads_V] *= kqmax_scale_h2; } #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V*V_rows_per_thread/2) { const int i_VKQ = i_VKQ_0 + (nthreads_V == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_V)*(V_rows_per_thread/2); ggml_cuda_memcpy_1(VKQ_tmp + i_VKQ, &VKQ[j_VKQ][i_VKQ_0/nthreads_V]); } #else float2 * VKQ_tmp = (float2 *) KQ + threadIdx.y*(V_cols_per_iter*D/2) + (nthreads_V == WARP_SIZE ? 0 : threadIdx.x / nthreads_V)*(D/2); #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V) { VKQ[j_VKQ][i_VKQ_0/nthreads_V].x *= kqmax_scale; VKQ[j_VKQ][i_VKQ_0/nthreads_V].y *= kqmax_scale; } #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D/2; i_VKQ_0 += nthreads_V*V_rows_per_thread/2) { const int i_VKQ = i_VKQ_0 + (nthreads_V == WARP_SIZE ? threadIdx.x : threadIdx.x % nthreads_V)*(V_rows_per_thread/2); ggml_cuda_memcpy_1(VKQ_tmp + i_VKQ, &VKQ[j_VKQ][i_VKQ_0/nthreads_V]); ggml_cuda_memcpy_1(VKQ_tmp + i_VKQ + V_rows_per_thread/4, &VKQ[j_VKQ][i_VKQ_0/nthreads_V + V_rows_per_thread/4]); } #endif // V_DOT2_F32_F16_AVAILABLE KQ_sum[j_VKQ] *= kqmax_scale; KQ_sum[j_VKQ] = warp_reduce_sum(KQ_sum[j_VKQ]); if (threadIdx.x == 0) { KQ_sum_shared[j_VKQ][threadIdx.y] = KQ_sum[j_VKQ]; } __syncthreads(); if (nthreads <= D || tid < D) { KQ_sum[j_VKQ] = KQ_sum_shared[j_VKQ][threadIdx.x]; KQ_sum[j_VKQ] = warp_reduce_sum(KQ_sum[j_VKQ]); #pragma unroll for (int i0 = 0; i0 < D; i0 += nthreads) { float dst_val = 0; #pragma unroll for (int w = 0; w < nwarps; ++w) { #pragma unroll for (int v = 0; v < V_cols_per_iter; ++v) { dst_val += float(KQ[w*V_cols_per_iter*D + v*D + i0 + tid]); } } if (gridDim.y == 1) { dst_val /= KQ_sum[j_VKQ]; } dst[(((sequence*int(ne01.z) + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y)*D + i0 + tid] = dst_val; } } if (j_VKQ < ncols-1) { __syncthreads(); } } if (gridDim.y != 1 && tid < ncols && (ncols == 1 || ic0 + tid < int(ne01.z))) { dst_meta[((sequence*int(ne01.z) + ic0 + tid)*ne02 + head)*gridDim.y + blockIdx.y] = make_float2(KQ_max[tid], KQ_sum[tid]); } #else GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; #endif // FLASH_ATTN_AVAILABLE } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ template void ggml_cuda_flash_attn_ext_vec_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const int nthreads = ggml_cuda_fattn_vec_get_nthreads_host(cc); const int nwarps = nthreads / WARP_SIZE; fattn_kernel_t fattn_kernel = flash_attn_ext_vec; const bool need_f16_K = type_K == GGML_TYPE_F16; const bool need_f16_V = type_V == GGML_TYPE_F16; constexpr size_t nbytes_shared = 0; launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, D, need_f16_K, need_f16_V, false); } template void ggml_cuda_flash_attn_ext_vec_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (Q->ne[1] == 1) { constexpr int cols_per_block = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; ggml_cuda_flash_attn_ext_vec_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; ggml_cuda_flash_attn_ext_vec_case_impl(ctx, dst); } return; } constexpr int cols_per_block = 2; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; ggml_cuda_flash_attn_ext_vec_case_impl(ctx, dst); } else { constexpr bool use_logit_softcap = true; ggml_cuda_flash_attn_ext_vec_case_impl(ctx, dst); } } #define DECL_FATTN_VEC_CASE(D, type_K, type_V) \ template void ggml_cuda_flash_attn_ext_vec_case \ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \ #define EXTERN_DECL_FATTN_VEC_CASES(D, type_K) \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_F16); \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_Q4_0); \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_Q4_1); \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_Q5_0); \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_Q5_1); \ extern DECL_FATTN_VEC_CASE(D, type_K, GGML_TYPE_Q8_0); \ EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_F16) EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_Q4_0) EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_Q4_1) EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_Q5_0) EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_Q5_1) EXTERN_DECL_FATTN_VEC_CASES( 64, GGML_TYPE_Q8_0) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_F16) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_Q4_0) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_Q4_1) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_Q5_0) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_Q5_1) EXTERN_DECL_FATTN_VEC_CASES(128, GGML_TYPE_Q8_0) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_F16) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_Q4_0) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_Q4_1) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_Q5_0) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_Q5_1) EXTERN_DECL_FATTN_VEC_CASES(256, GGML_TYPE_Q8_0) ggml-org-ggml-3678254/src/ggml-cuda/fattn-wmma-f16.cu000066400000000000000000000670561512524704700220270ustar00rootroot00000000000000// Old and deprecated WMMA FlashAttention implementation. // It is still needed for Volta since the memory layout of NVIDIA tensor cores changed with Turing. // Long-term the WMMA code should be replaced with a dedicated Volta implementation. #include "common.cuh" #include "fattn-common.cuh" #include "fattn-wmma-f16.cuh" #ifdef GGML_USE_WMMA_FATTN #if !defined(GGML_USE_HIP) #include #if defined(GGML_USE_MUSA) namespace wmma = mtmusa::wmma; #else // GGML_USE_MUSA namespace wmma = nvcuda::wmma; #endif // GGML_USE_MUSA #elif defined(GGML_USE_HIP) #include namespace wmma = rocwmma; #endif // !defined(GGML_USE_HIP) #endif // GGML_USE_WMMA_FATTN // D == head size, VKQ_stride == num VKQ rows calculated in parallel: template __launch_bounds__(nwarps*ggml_cuda_get_physical_warp_size(), 1) static __global__ void flash_attn_ext_f16( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, const char * __restrict__ sinks, const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, const float max_bias, const float m0, const float m1, const uint32_t n_head_log2, const float logit_softcap, const int32_t ne00, const uint3 ne01, const int32_t ne02, const int32_t ne03, const int32_t nb01, const int32_t nb02, const int32_t nb03, const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, const int32_t nb11, const int32_t nb12, const int64_t nb13, const int32_t nb21, const int32_t nb22, const int64_t nb23, const int32_t ne31, const int32_t ne32, const int32_t ne33, const int32_t nb31, const int32_t nb32, const int64_t nb33) { #if defined(FLASH_ATTN_AVAILABLE) && (defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_USE_WMMA_FATTN)) // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(D == 128 || D == 256)) { NO_DEVICE_CODE; return; } //In this kernel Q, K, V are matrices while i, j, k are matrix indices. constexpr int warp_size = ggml_cuda_get_physical_warp_size(); const int ic0 = ncols*blockIdx.x; // Index of the first Q/QKV column to work on. static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE."); static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16."); constexpr int frag_m = ncols == 8 ? 32 : 16; constexpr int frag_n = ncols == 8 ? 8 : 16; static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0."); typedef wmma::fragment frag_a_K; typedef wmma::fragment frag_a_V; typedef wmma::fragment frag_b; typedef wmma::fragment frag_c_KQ; typedef wmma::fragment frag_c_VKQ; constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel. constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy. static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps."); // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts: constexpr int D_padded = D + 8; constexpr int kqs_padded = FATTN_KQ_STRIDE + 8; constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); const int sequence = blockIdx.z / ne02; const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. const float * Q_f = (const float *) (Q + nb03* sequence + nb02* head + nb01*ic0); const half * K_h = (const half *) (K + nb13* sequence + nb12*(head / gqa_ratio)); const half * V_h = (const half *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); const half2 * mask2 = (const half2 *) maskh; const float * sinksf = (const float *) sinks; const int stride_Q = nb01 / sizeof(float); const int stride_KV = nb11 / sizeof(half); const float slopef = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); const half2 slope2 = make_half2(slopef, slopef); const half2 logit_softcap_2 = make_half2(logit_softcap, logit_softcap); frag_b Q_b[D/16][ncols/frag_n]; // A single buffer for temporarily holding tiles of KQ and VKQ parts: constexpr int mem_KQ = ncols*kqs_padded*kqar; constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded; __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts]; float * KQ_f = (float *) KQ; half2 * KQ2 = (half2 *) KQ; float KQ_rowsum_f[ncols/nwarps] = {0.0f}; float KQ_max_f[ncols/nwarps]; float KQ_max_scale_f[ncols/nwarps] = {0.0f}; #pragma unroll for (int j = 0; j < ncols/nwarps; ++j) { KQ_max_f[j] = -FLT_MAX/2.0f; } half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}}; half2 KQ_max_h2[ncols/nwarps]; half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}}; #pragma unroll for (int j = 0; j < ncols/nwarps; ++j) { KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF); } __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice. half2 * VKQ2 = (half2 *) VKQ; #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < D/2; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D/2 && i >= D/2) { break; } VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f); } } // Convert Q to half and apply scale, temporarily store in KQ: #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < D; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D && i >= D) { break; } KQ[j*D_padded + i] = ic0 + j < int(ne01.z) ? Q_f[j*stride_Q + i] * scale : 0.0f; } } __syncthreads(); // Load Q into tensor core fragments/registers since it will be used frequently: #pragma unroll for (int i0 = 0; i0 < D; i0 += 16) { #pragma unroll for (int j0 = 0; j0 < ncols; j0 += frag_n) { wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded); } } __syncthreads(); // Iterate over ne11 == previous tokens: const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE) { // Calculate tile of KQ: #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { frag_c_KQ KQ_c[ncols/frag_n]; #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::fill_fragment(KQ_c[j], static_cast(0.0f)); } #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) { frag_a_K K_a; wmma::load_matrix_sync(K_a, K_h + int64_t(k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]); } } #pragma unroll for (int j0 = 0; j0 < ncols; j0 += frag_n) { wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, wmma::mem_col_major); } } __syncthreads(); // Calculate softmax for each KQ column using the current max. value. // The divisor is stored in KQ_rowsum and will be applied at the end. #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; if (std::is_same::value) { float KQ_f_tmp[FATTN_KQ_STRIDE / warp_size]; #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += warp_size) { const int k = k0 + threadIdx.x; KQ_f_tmp[k0/warp_size] = KQ_f[j*kqs_padded + k]; if (use_logit_softcap) { KQ_f_tmp[k0/warp_size] = logit_softcap*tanhf(KQ_f_tmp[k0/warp_size]); } } float KQ_max_new = KQ_max_f[j0/nwarps]; #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += warp_size) { const int k = k0 + threadIdx.x; KQ_f_tmp[k0/warp_size] += mask && ic0 + j < int(ne01.z) ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f; KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/warp_size] + FATTN_KQ_MAX_OFFSET); } KQ_max_new = warp_reduce_max(KQ_max_new); const float diff = KQ_max_f[j0/nwarps] - KQ_max_new; KQ_max_scale_f[j0/nwarps] = expf(diff); if (diff <= SOFTMAX_FTZ_THRESHOLD) { KQ_max_scale_f[j0/nwarps] = 0.0f; } KQ_max_f[j0/nwarps] = KQ_max_new; float KQ_rowsum_add = 0.0f; #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += warp_size) { const int k = k0 + threadIdx.x; const float diff = KQ_f_tmp[k0/warp_size] - KQ_max_f[j0/nwarps]; KQ_f_tmp[k0/warp_size] = expf(diff); if (diff <= SOFTMAX_FTZ_THRESHOLD) { KQ_f_tmp[k0/warp_size] = 0.0f; } KQ_rowsum_add += KQ_f_tmp[k0/warp_size]; KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/warp_size]; } KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); // Scale previous KQ_rowsum to account for a potential increase in KQ_max: KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add; } else { half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*warp_size)]; #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += warp_size) { const int k = k0 + threadIdx.x; KQ2_tmp[k0/warp_size] = KQ2[j*(kqs_padded/2) + k]; if (use_logit_softcap) { // There is no dedicated tangens hyperbolicus function for half2. KQ2_tmp[k0/warp_size] = h2exp(KQ2_tmp[k0/warp_size]*make_half2(2.0f, 2.0f)); KQ2_tmp[k0/warp_size] = (KQ2_tmp[k0/warp_size] - make_half2(1.0f, 1.0f)) /(KQ2_tmp[k0/warp_size] + make_half2(1.0f, 1.0f)); KQ2_tmp[k0/warp_size] *= logit_softcap_2; } } half2 KQ_max_new = KQ_max_h2[j0/nwarps]; #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += warp_size) { const int k = k0 + threadIdx.x; KQ2_tmp[k0/warp_size] += mask && ic0 + j < int(ne01.z) ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f); KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/warp_size]); } KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new; KQ_max_scale_h2[j0/nwarps] = h2exp(diff); const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask; KQ_max_h2[j0/nwarps] = KQ_max_new; half2 KQ_rowsum_add = make_half2(0.0f, 0.0f); #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += warp_size) { const int k = k0 + threadIdx.x; const half2 diff = KQ2_tmp[k0/warp_size] - KQ_max_h2[j0/nwarps]; KQ2_tmp[k0/warp_size] = h2exp(diff); const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); *((uint32_t *) &KQ2_tmp[k0/warp_size]) &= ftz_mask; KQ_rowsum_add += KQ2_tmp[k0/warp_size]; KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/warp_size]; } KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); // Scale previous KQ_rowsum to account for a potential increase in KQ_max: KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add; } } __syncthreads(); frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n]; #pragma unroll for (int j0 = 0; j0 < ncols; j0 += frag_n) { #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { const int k = k0 + (threadIdx.y % VKQ_ratio)*16; wmma::load_matrix_sync( KQ_b[k0/(VKQ_ratio*16)][j0/frag_n], KQ + j0*(kqar*kqs_padded) + k, kqar*kqs_padded); } } frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n]; #pragma unroll for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) { #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], static_cast(0.0f)); } #pragma unroll for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { const int k = k0 + (threadIdx.y % VKQ_ratio)*16; frag_a_V v_a; wmma::load_matrix_sync(v_a, V_h + int64_t(k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]); } } } __syncthreads(); const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded); #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) { #pragma unroll for (int j0 = 0; j0 < ncols; j0 += frag_n) { wmma::store_matrix_sync( KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio), VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n], D_padded, wmma::mem_col_major); } } __syncthreads(); #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; half2 VKQ_scale; if (std::is_same::value) { VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]); } else { VKQ_scale = KQ_max_scale_h2[j0/nwarps]; } #pragma unroll for (int i0 = 0; i0 < D/2; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D/2 && i >= D/2) { break; } half2 VKQ_add = make_half2(0.0f, 0.0f); #pragma unroll for (int l = 0; l < VKQ_ratio; ++l) { VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i]; } VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add; } } __syncthreads(); } // Apply attention sinks if (sinksf && blockIdx.y == 0) { const float sinkf = sinksf[head]; const half sinkh = __float2half(sinkf); #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j = j0 + threadIdx.y; if (std::is_same::value) { float kqmax_new = fmaxf(KQ_max_f[j0/nwarps], sinkf); const float KQ_max_scale = expf(KQ_max_f[j0/nwarps] - kqmax_new); KQ_max_f[j0/nwarps] = kqmax_new; KQ_rowsum_f[j0/nwarps] = KQ_rowsum_f[j0/nwarps] * KQ_max_scale + expf(sinkf - KQ_max_f[j0/nwarps]); const half2 scale_h2 = make_half2(KQ_max_scale, KQ_max_scale); #pragma unroll for (int i0 = 0; i0 < D/2; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D/2 && i >= D/2) break; VKQ2[j*(D_padded/2) + i] *= scale_h2; } } else { half kqmax_old = __low2half(KQ_max_h2[j0/nwarps]); half kqmax_new = fmaxf(kqmax_old, sinkh); KQ_max_h2[j0/nwarps] = __half2half2(kqmax_new); const half KQ_max_scale_h = hexp(kqmax_old - kqmax_new); const half2 KQ_max_scale = __half2half2(KQ_max_scale_h); KQ_rowsum_h2[j0/nwarps] = KQ_rowsum_h2[j0/nwarps] * KQ_max_scale; const half val = hexp(sinkh - kqmax_new); KQ_rowsum_h2[j0/nwarps].x = __hadd(KQ_rowsum_h2[j0/nwarps].x, val); #pragma unroll for (int i0 = 0; i0 < D/2; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D/2 && i >= D/2) break; VKQ2[j*(D_padded/2) + i] *= KQ_max_scale; } } } __syncthreads(); } #pragma unroll for (int j0 = 0; j0 < ncols; j0 += nwarps) { const int j_VKQ = j0 + threadIdx.y; if (ic0 + j_VKQ >= int(ne01.z)) { return; } float KQ_rowsum_j; if (std::is_same::value) { KQ_rowsum_j = KQ_rowsum_f[j0/nwarps]; } else { KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]); } const int j_dst_unrolled = ((sequence*int(ne01.z) + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y; #pragma unroll for (int i0 = 0; i0 < D; i0 += warp_size) { const int i = i0 + threadIdx.x; if (i0 + warp_size > D && i >= D) { break; } float dst_val = VKQ[j_VKQ*D_padded + i]; if (gridDim.y == 1) { dst_val /= KQ_rowsum_j; } dst[j_dst_unrolled*D + i] = dst_val; } if (gridDim.y == 1 || threadIdx.x != 0) { continue; } float2 dst_meta_val; if (std::is_same::value) { dst_meta_val.x = KQ_max_f[j0/nwarps]; } else { dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); } dst_meta_val.y = KQ_rowsum_j; dst_meta[j_dst_unrolled] = dst_meta_val; } #else GGML_UNUSED_VARS(Q, K, V, mask, sinks, KV_max, dst, dst_meta, scale, max_bias, m0, m1, n_head_log2, logit_softcap, ne00, ne01, ne02, ne03, nb01, nb02, nb03, ne10, ne11, ne12, ne13, nb11, nb12, nb13, nb21, nb22, nb23, ne31, ne32, ne33, nb31, nb32, nb33); NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && (defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_USE_WMMA_FATTN)) } constexpr int get_max_power_of_2(int x) { return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1; } static_assert(get_max_power_of_2(1) == 1, "Test failed."); static_assert(get_max_power_of_2(2) == 2, "Test failed."); static_assert(get_max_power_of_2(4) == 4, "Test failed."); static_assert(get_max_power_of_2(6) == 2, "Test failed."); // Number of VKQ rows calculated in parallel: constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) { return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m; } static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed."); static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed."); static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed."); static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed."); static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed."); static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed."); static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed."); static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed."); static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed."); template void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; constexpr int nwarps = 4; constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); fattn_kernel_t fattn_kernel; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; fattn_kernel = flash_attn_ext_f16< D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), KQ_acc_t, use_logit_softcap>; } else { constexpr bool use_logit_softcap = true; fattn_kernel = flash_attn_ext_f16< D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), KQ_acc_t, use_logit_softcap>; } launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, FATTN_KQ_STRIDE, true, true, false, warp_size); } void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); const int warp_size = ggml_cuda_info().devices[ctx.device].warp_size; if (prec != GGML_PREC_DEFAULT) { if (Q->ne[1] <= 32 || Q->ne[0] > 128) { constexpr int cols_per_block = 16; switch (Q->ne[0]) { case 64: ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); break; case 80: ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); break; case 96: ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); break; case 112: ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); break; case 128: ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); break; case 256: ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst); break; default: GGML_ABORT("fatal error"); break; } } else { constexpr int cols_per_block = 32; switch (Q->ne[0]) { case 64: ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); break; case 80: ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); break; case 96: ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); break; case 112: ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); break; case 128: ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); break; // case 256: // ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst); // break; default: GGML_ABORT("fatal error"); break; } } return; } #if !defined(GGML_USE_HIP) if (Q->ne[1] <= 8 && Q->ne[0] % warp_size == 0) { constexpr int cols_per_block = 8; switch (Q->ne[0]) { case 64: ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); break; case 96: ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); break; case 128: ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); break; case 256: ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); break; default: GGML_ABORT("fatal error"); break; } return; } #endif // !defined(GGML_USE_HIP) if (Q->ne[1] <= 32) { constexpr int cols_per_block = 16; switch (Q->ne[0]) { case 64: ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); break; case 80: ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); break; case 96: ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); break; case 112: ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); break; case 128: ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); break; case 256: ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); break; default: GGML_ABORT("fatal error"); break; } return; } constexpr int cols_per_block = 32; switch (Q->ne[0]) { case 64: ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); break; case 80: ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); break; case 96: ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); break; case 112: ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); break; case 128: ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); break; case 256: ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); break; default: GGML_ABORT("fatal error"); break; } } ggml-org-ggml-3678254/src/ggml-cuda/fattn-wmma-f16.cuh000066400000000000000000000040501512524704700221600ustar00rootroot00000000000000#pragma once #include "common.cuh" #if defined(GGML_USE_MUSA) #define GGML_USE_WMMA_FATTN #endif // defined(GGML_USE_MUSA) #if defined(GGML_HIP_ROCWMMA_FATTN) #if defined(CDNA) && (ROCWMMA_VERSION_MAJOR < 2 || ROCWMMA_VERSION_MINOR > 0 || ROCWMMA_VERSION_PATCH > 0) #define GGML_USE_WMMA_FATTN #elif defined(CDNA) #warning "rocwmma fattn on CDNA is broken on rocwmma v2.0.0, expect degraded performance" #endif // defined(CDNA) && (ROCWMMA_VERSION_MAJOR < 2 || ROCWMMA_VERSION_MINOR > 0 || ROCWMMA_VERSION_PATCH > 0) #if defined(RDNA3) #define GGML_USE_WMMA_FATTN #endif // defined(RDNA3) #if defined(RDNA4) && ROCWMMA_VERSION_MAJOR > 1 #define GGML_USE_WMMA_FATTN #elif defined(RDNA4) #warning "rocwmma fattn is not suported on RDNA4 on rocwmma < v2.0.0, expect degraded performance" #endif // defined(RDNA4) && ROCWMMA_VERSION_MAJOR > 1 #endif // defined(GGML_HIP_ROCWMMA_FATTN) // WMMA flash attention requires FP16 matrix instructions to be available for ggml code. static bool ggml_cuda_should_use_wmma_fattn(const int cc) { #if defined(GGML_USE_HIP) && !defined(GGML_HIP_ROCWMMA_FATTN) return false; #else if ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_MTHREADS(cc)) { return true; } else if (GGML_CUDA_CC_IS_CDNA(cc)){ #if defined(GGML_HIP_ROCWMMA_FATTN) && (ROCWMMA_VERSION_MAJOR < 2 || ROCWMMA_VERSION_MINOR > 0 || ROCWMMA_VERSION_PATCH > 0) return true; #else return false; #endif // defined(GGML_HIP_ROCWMMA_FATTN) (ROCWMMA_VERSION_MAJOR < 2 || ROCWMMA_VERSION_MINOR > 0 || ROCWMMA_VERSION_PATCH > 0) } else if (GGML_CUDA_CC_IS_RDNA4(cc)) { #if defined(GGML_HIP_ROCWMMA_FATTN) && ROCWMMA_VERSION_MAJOR > 1 return true; #else return false; #endif // defined(GGML_HIP_ROCWMMA_FATTN) && ROCWMMA_VERSION_MAJOR > 1 } else { return false; } #endif // defined(GGML_USE_HIP) && !defined(GGML_HIP_ROCWMMA_FATTN) } void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/fattn.cu000066400000000000000000000345371512524704700204740ustar00rootroot00000000000000#include "common.cuh" #include "fattn-common.cuh" #include "fattn-mma-f16.cuh" #include "fattn-tile.cuh" #include "fattn-vec.cuh" #include "fattn-wmma-f16.cuh" #include "fattn.cuh" template static void ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const ggml_tensor * Q = dst->src[0]; if constexpr (ncols2 <= 8) { if (turing_mma_available(cc) && Q->ne[1] <= 8/ncols2) { ggml_cuda_flash_attn_ext_mma_f16_case(ctx, dst); return; } } if (turing_mma_available(cc) && Q->ne[1] <= 16/ncols2) { ggml_cuda_flash_attn_ext_mma_f16_case(ctx, dst); return; } if (ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_TURING || Q->ne[1] <= 32/ncols2) { ggml_cuda_flash_attn_ext_mma_f16_case(ctx, dst); return; } ggml_cuda_flash_attn_ext_mma_f16_case(ctx, dst); } template static void ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; const ggml_tensor * mask = dst->src[3]; float max_bias = 0.0f; memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); // Edge cases like no mask, ALiBi, unpadded K/V, or misaligned addresses for large data transfers // are put into the template specialization without GQA optimizations. bool use_gqa_opt = mask && max_bias == 0.0f && K->ne[1] % FATTN_KQ_STRIDE == 0; for (const ggml_tensor * t : {Q, K, V, mask}) { if (t == nullptr) { continue; } for (size_t i = 1; i < GGML_MAX_DIMS; ++i) { if (t->nb[i] % 16 != 0) { use_gqa_opt = false; break; } } } GGML_ASSERT(Q->ne[2] % K->ne[2] == 0); const int gqa_ratio = Q->ne[2] / K->ne[2]; if (use_gqa_opt && gqa_ratio % 8 == 0) { ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ctx, dst); return; } if (use_gqa_opt && gqa_ratio % 4 == 0) { ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ctx, dst); return; } if (use_gqa_opt && gqa_ratio % 2 == 0) { ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ctx, dst); return; } ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ctx, dst); } static void ggml_cuda_flash_attn_ext_mma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; const ggml_tensor * mask = dst->src[3]; switch (Q->ne[0]) { case 64: GGML_ASSERT(V->ne[0] == 64); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2< 64, 64>(ctx, dst); break; case 80: GGML_ASSERT(V->ne[0] == 80); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2< 80, 80>(ctx, dst); break; case 96: GGML_ASSERT(V->ne[0] == 96); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2< 96, 96>(ctx, dst); break; case 112: GGML_ASSERT(V->ne[0] == 112); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2<112, 112>(ctx, dst); break; case 128: GGML_ASSERT(V->ne[0] == 128); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2<128, 128>(ctx, dst); break; case 256: GGML_ASSERT(V->ne[0] == 256); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols2<256, 256>(ctx, dst); break; case 576: { // For Deepseek, go straight to the ncols1 switch to avoid compiling unnecessary kernels. GGML_ASSERT(V->ne[0] == 512); float max_bias = 0.0f; memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); const bool use_gqa_opt = mask && max_bias == 0.0f; GGML_ASSERT(use_gqa_opt); GGML_ASSERT(Q->ne[2] % K->ne[2] == 0); const int gqa_ratio = Q->ne[2] / K->ne[2]; GGML_ASSERT(gqa_ratio % 16 == 0); ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1<576, 512, 16>(ctx, dst); } break; default: GGML_ABORT("fatal error"); break; } } #define FATTN_VEC_CASE(D, type_K, type_V) \ { \ const bool type_K_okay = K->type == (type_K) || (K->type == GGML_TYPE_F32 && (type_K) == GGML_TYPE_F16); \ const bool type_V_okay = V->type == (type_V) || (V->type == GGML_TYPE_F32 && (type_V) == GGML_TYPE_F16); \ if (Q->ne[0] == (D) && type_K_okay && type_V_okay) { \ ggml_cuda_flash_attn_ext_vec_case(ctx, dst); \ return; \ } \ } \ #define FATTN_VEC_CASES_ALL_D(type_K, type_V) \ FATTN_VEC_CASE( 64, type_K, type_V) \ FATTN_VEC_CASE(128, type_K, type_V) \ FATTN_VEC_CASE(256, type_K, type_V) \ static void ggml_cuda_flash_attn_ext_vec(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_tensor * Q = dst->src[0]; ggml_tensor * K = dst->src[1]; ggml_tensor * V = dst->src[2]; #ifdef GGML_CUDA_FA_ALL_QUANTS FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q4_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q5_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q5_1) FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_Q8_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q8_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_1, GGML_TYPE_Q8_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_0, GGML_TYPE_Q8_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q5_1, GGML_TYPE_Q8_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q8_0) #else FATTN_VEC_CASES_ALL_D(GGML_TYPE_F16, GGML_TYPE_F16) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q4_0, GGML_TYPE_Q4_0) FATTN_VEC_CASES_ALL_D(GGML_TYPE_Q8_0, GGML_TYPE_Q8_0) #endif // GGML_CUDA_FA_ALL_QUANTS GGML_ABORT("fatal error"); } // Best FlashAttention kernel for a specific GPU: enum best_fattn_kernel { BEST_FATTN_KERNEL_NONE = 0, BEST_FATTN_KERNEL_TILE = 200, BEST_FATTN_KERNEL_VEC = 100, BEST_FATTN_KERNEL_WMMA_F16 = 300, BEST_FATTN_KERNEL_MMA_F16 = 400, }; static best_fattn_kernel ggml_cuda_get_best_fattn_kernel(const int device, const ggml_tensor * dst) { #ifndef FLASH_ATTN_AVAILABLE GGML_UNUSED(device); GGML_UNUSED(dst); return BEST_FATTN_KERNEL_NONE; #endif// FLASH_ATTN_AVAILABLE const ggml_tensor * KQV = dst; const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; const ggml_tensor * mask = dst->src[3]; const int gqa_ratio = Q->ne[2] / K->ne[2]; GGML_ASSERT(Q->ne[2] % K->ne[2] == 0); float max_bias = 0.0f; memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); // The effective batch size for the kernel can be increased by gqa_ratio. // The kernel versions without this optimization are also used for ALiBi, if there is no mask, or if the KV cache is not padded, const bool gqa_opt_applies = gqa_ratio % 2 == 0 && mask && max_bias == 0.0f && K->ne[1] % FATTN_KQ_STRIDE == 0; const int cc = ggml_cuda_info().devices[device].cc; switch (K->ne[0]) { case 40: case 64: case 72: case 80: case 96: case 128: case 112: case 256: if (V->ne[0] != K->ne[0]) { return BEST_FATTN_KERNEL_NONE; } break; case 576: if (V->ne[0] != 512) { return BEST_FATTN_KERNEL_NONE; } if (!gqa_opt_applies || gqa_ratio % 16 != 0) { return BEST_FATTN_KERNEL_NONE; } break; default: return BEST_FATTN_KERNEL_NONE; } #ifndef GGML_CUDA_FA_ALL_QUANTS if (K->type != V->type) { return BEST_FATTN_KERNEL_NONE; } #endif // GGML_CUDA_FA_ALL_QUANTS switch (K->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: break; case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: #ifndef GGML_CUDA_FA_ALL_QUANTS return BEST_FATTN_KERNEL_NONE; #endif // GGML_CUDA_FA_ALL_QUANTS case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: break; default: return BEST_FATTN_KERNEL_NONE; } if (mask && mask->ne[2] != 1) { return BEST_FATTN_KERNEL_NONE; } // For small batch sizes the vector kernel may be preferable over the kernels optimized for large batch sizes: const bool can_use_vector_kernel = Q->ne[0] <= 256 && Q->ne[0] % 64 == 0 && K->ne[1] % FATTN_KQ_STRIDE == 0; // If Turing tensor cores are available, use them: if (turing_mma_available(cc) && Q->ne[0] != 40 && Q->ne[0] != 72) { if (can_use_vector_kernel) { if (!ggml_is_quantized(K->type) && !ggml_is_quantized(V->type)) { if (cc >= GGML_CUDA_CC_ADA_LOVELACE && Q->ne[1] == 1 && Q->ne[3] == 1 && !(gqa_ratio > 4 && K->ne[1] >= 8192)) { return BEST_FATTN_KERNEL_VEC; } } else { if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { if (Q->ne[1] <= 2) { return BEST_FATTN_KERNEL_VEC; } } else { if (Q->ne[1] == 1) { return BEST_FATTN_KERNEL_VEC; } } } if (!gqa_opt_applies && Q->ne[1] == 1) { return BEST_FATTN_KERNEL_VEC; } } return BEST_FATTN_KERNEL_MMA_F16; } if (volta_mma_available(cc) && Q->ne[0] != 40 && Q->ne[0] != 72) { int gqa_ratio_eff = 1; const int ncols2_max = Q->ne[0] == 576 ? 16 : 8; while (gqa_ratio % (2*gqa_ratio_eff) == 0 && gqa_ratio_eff < ncols2_max) { gqa_ratio_eff *= 2; } if (can_use_vector_kernel && Q->ne[1] * gqa_ratio_eff <= 2) { return BEST_FATTN_KERNEL_VEC; } if (Q->ne[1] * gqa_ratio_eff <= 16) { return BEST_FATTN_KERNEL_TILE; // On Volta tensor cores are only faster for sufficiently large matrices. } return BEST_FATTN_KERNEL_MMA_F16; } // Use the WMMA kernel if possible: if (ggml_cuda_should_use_wmma_fattn(cc) && K->ne[1] % FATTN_KQ_STRIDE == 0 && Q->ne[0] != 40 && Q->ne[0] != 72 && Q->ne[0] != 576) { if (can_use_vector_kernel && Q->ne[1] <= 2) { return BEST_FATTN_KERNEL_VEC; } return BEST_FATTN_KERNEL_WMMA_F16; } // If there are no tensor cores available, use the generic tile kernel: if (can_use_vector_kernel) { if (!ggml_is_quantized(K->type) && !ggml_is_quantized(V->type)) { if (Q->ne[1] == 1) { if (!gqa_opt_applies) { return BEST_FATTN_KERNEL_VEC; } } } else { if (Q->ne[1] <= 2) { return BEST_FATTN_KERNEL_VEC; } } } return BEST_FATTN_KERNEL_TILE; } void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_set_device(ctx.device); switch (ggml_cuda_get_best_fattn_kernel(ggml_cuda_get_device(), dst)) { case BEST_FATTN_KERNEL_NONE: GGML_ABORT("fatal error"); case BEST_FATTN_KERNEL_TILE: ggml_cuda_flash_attn_ext_tile(ctx, dst); break; case BEST_FATTN_KERNEL_VEC: ggml_cuda_flash_attn_ext_vec(ctx, dst); break; case BEST_FATTN_KERNEL_WMMA_F16: ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); break; case BEST_FATTN_KERNEL_MMA_F16: ggml_cuda_flash_attn_ext_mma_f16(ctx, dst); break; } } bool ggml_cuda_flash_attn_ext_supported(int device, const ggml_tensor * dst) { return ggml_cuda_get_best_fattn_kernel(device, dst) != BEST_FATTN_KERNEL_NONE; } ggml-org-ggml-3678254/src/ggml-cuda/fattn.cuh000066400000000000000000000002711512524704700206300ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst); bool ggml_cuda_flash_attn_ext_supported(int device, const ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/fill.cu000066400000000000000000000021261512524704700202730ustar00rootroot00000000000000#include "fill.cuh" #include "convert.cuh" #define CUDA_FILL_BLOCK_SIZE 256 template static __global__ void fill_kernel(T * dst, const int64_t k, const T value) { const int64_t i = (int64_t)blockDim.x * blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = value; } void ggml_cuda_op_fill(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { void * dst_d = dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(dst)); float value; memcpy(&value, dst->op_params, sizeof(float)); const int64_t k = ggml_nelements(dst); const int64_t num_blocks = (k + CUDA_FILL_BLOCK_SIZE - 1) / CUDA_FILL_BLOCK_SIZE; switch (dst->type) { case GGML_TYPE_F32: fill_kernel<<>>((float *)dst_d, k, value); break; case GGML_TYPE_F16: fill_kernel<<>>((half *)dst_d, k, ggml_cuda_cast(value)); break; default: GGML_ABORT("unsupported type"); } } ggml-org-ggml-3678254/src/ggml-cuda/fill.cuh000066400000000000000000000001431512524704700204400ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_fill(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/getrows.cu000066400000000000000000000307431512524704700210450ustar00rootroot00000000000000#include "getrows.cuh" #include "dequantize.cuh" #include "convert.cuh" template static __global__ void k_get_rows( const void * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst, const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/ /*const int64_t ne10,*/ const int64_t ne11, const int64_t ne12, /*const int64_t ne13,*/ /*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3, /*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03, const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) { for (int64_t z = blockIdx.z; z < ne11*ne12; z += gridDim.z) { for (int64_t i00 = 2*(blockIdx.y*blockDim.x + threadIdx.x); i00 < ne00; i00 += gridDim.y*blockDim.x) { // The x and y dimensions of the grid are swapped because the maximum allowed grid size for x is higher. const int i10 = blockIdx.x; const int i11 = z / ne12; // TODO fastdiv const int i12 = z % ne12; const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; const void * src0_row = (const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03; const int ib = i00/qk; // block index const int iqs = (i00%qk)/qr; // quant index const int iybs = i00 - i00%qk; // dst block start index const int y_offset = qr == 1 ? 1 : qk/2; // dequantize float2 v; dequantize_kernel(src0_row, ib, iqs, v); dst_row[iybs + iqs + 0] = ggml_cuda_cast(v.x); dst_row[iybs + iqs + y_offset] = ggml_cuda_cast(v.y); } } } template static __global__ void k_get_rows_float( const src0_t * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst, const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/ /*const int64_t ne10,*/ const int64_t ne11, const int64_t ne12, /*const int64_t ne13,*/ /*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3, /*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03, const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) { for (int64_t z = blockIdx.z; z < ne11*ne12; z += gridDim.z) { for (int64_t i00 = blockIdx.y*blockDim.x + threadIdx.x; i00 < ne00; i00 += gridDim.y*blockDim.x) { // The x and y dimensions of the grid are swapped because the maximum allowed grid size for x is higher. const int i10 = blockIdx.x; const int i11 = z / ne12; // TODO fastdiv const int i12 = z % ne12; if (i00 >= ne00) { return; } const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; const src0_t * src0_row = (const src0_t *)((const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03); dst_row[i00] = ggml_cuda_cast(src0_row[i00]); } } } template static __global__ void k_get_rows_back_float( const grad_t * __restrict__ grad, const int32_t * __restrict__ rows, dst_t * __restrict__ dst, const int64_t ncols, const int64_t nrows_grad) { const int col = blockIdx.x*blockDim.x + threadIdx.x; if (col >= ncols) { return; } const int dst_row = blockIdx.y*blockDim.y + threadIdx.y; float sum = 0.0f; for (int64_t i = 0; i < nrows_grad; ++i) { if (rows[i] != dst_row) { continue; } sum += grad[i*ncols + col]; } dst[dst_row*ncols + col] = sum; } template static void get_rows_cuda_q( const void * src0_d, const int32_t * src1_d, dst_t * dst_d, const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb1, const size_t nb2, const size_t nb3, cudaStream_t stream) { const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); const int block_num_y = (ne00 + 2*CUDA_GET_ROWS_BLOCK_SIZE - 1) / (2*CUDA_GET_ROWS_BLOCK_SIZE); const dim3 block_nums(ne10, MIN(block_num_y, UINT16_MAX), MIN(ne11*ne12, UINT16_MAX)); // strides in elements // const size_t s0 = nb0 / sizeof(dst_t); const size_t s1 = nb1 / sizeof(dst_t); const size_t s2 = nb2 / sizeof(dst_t); const size_t s3 = nb3 / sizeof(dst_t); const size_t s10 = nb10 / sizeof(int32_t); const size_t s11 = nb11 / sizeof(int32_t); const size_t s12 = nb12 / sizeof(int32_t); // const size_t s13 = nb13 / sizeof(int32_t); GGML_ASSERT(ne00 % 2 == 0); k_get_rows<<>>( src0_d, src1_d, dst_d, ne00, /*ne01, ne02, ne03,*/ /*ne10,*/ ne11, ne12, /*ne13,*/ /* s0,*/ s1, s2, s3, /* nb00,*/ nb01, nb02, nb03, s10, s11, s12/*, s13*/); } template static void get_rows_cuda_float( const src0_t * src0_d, const int32_t * src1_d, dst_t * dst_d, const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb1, const size_t nb2, const size_t nb3, cudaStream_t stream) { const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); const int block_num_y = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE; const dim3 block_nums(ne10, MIN(block_num_y, UINT16_MAX), MIN(ne11*ne12, UINT16_MAX)); // strides in elements // const size_t s0 = nb0 / sizeof(dst_t); const size_t s1 = nb1 / sizeof(dst_t); const size_t s2 = nb2 / sizeof(dst_t); const size_t s3 = nb3 / sizeof(dst_t); const size_t s10 = nb10 / sizeof(int32_t); const size_t s11 = nb11 / sizeof(int32_t); const size_t s12 = nb12 / sizeof(int32_t); // const size_t s13 = nb13 / sizeof(int32_t); k_get_rows_float<<>>( src0_d, src1_d, dst_d, ne00, /*ne01, ne02, ne03,*/ /*ne10,*/ ne11, ne12, /*ne13,*/ /* s0,*/ s1, s2, s3, /* nb00,*/ nb01, nb02, nb03, s10, s11, s12/*, s13*/); } template static void ggml_cuda_get_rows_switch_src0_type( const void * src0_d, const ggml_type src0_type, const int32_t * src1_d, dst_t * dst_d, const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb1, const size_t nb2, const size_t nb3, cudaStream_t stream) { switch (src0_type) { case GGML_TYPE_F16: get_rows_cuda_float((const half *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_F32: get_rows_cuda_float((const float *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_I32: get_rows_cuda_float((const int32_t *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_BF16: get_rows_cuda_float((const nv_bfloat16 *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_Q4_0: get_rows_cuda_q(src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_Q4_1: get_rows_cuda_q(src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_Q5_0: get_rows_cuda_q(src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_Q5_1: get_rows_cuda_q(src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_Q8_0: get_rows_cuda_q(src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; default: // TODO: k-quants GGML_ABORT("%s: unsupported src0 type: %s\n", __func__, ggml_type_name(src0_type)); break; } } void get_rows_cuda( const void * src0_d, ggml_type src0_type, const int32_t * src1_d, void * dst_d, ggml_type dst_type, int64_t ne00, size_t nb01, size_t nb02, size_t nb03, int64_t ne10, int64_t ne11, int64_t ne12, size_t nb10, size_t nb11, size_t nb12, size_t nb1, size_t nb2, size_t nb3, cudaStream_t stream) { switch (dst_type) { case GGML_TYPE_F32: ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (float *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_I32: ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (int32_t *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_F16: ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (half *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; case GGML_TYPE_BF16: ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (nv_bfloat16 *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; default: GGML_ABORT("%s: unsupported dst type: %s\n", __func__, ggml_type_name(dst_type)); break; } } void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; cudaStream_t stream = ctx.stream(); GGML_TENSOR_BINARY_OP_LOCALS GGML_ASSERT(src1->type == GGML_TYPE_I32); GGML_ASSERT(ne13 == 1); GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); get_rows_cuda(src0->data, src0->type, (const int32_t *) src1->data, dst->data, dst->type, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); } void ggml_cuda_op_get_rows_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output const ggml_tensor * src1 = dst->src[1]; // src1 in forward pass GGML_TENSOR_BINARY_OP_LOCALS const float * src0_d = (const float *) src0->data; const int32_t * src1_d = (const int32_t *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ne02*ne03 == 1); GGML_ASSERT(ne12*ne13 == 1); GGML_ASSERT(ne2*ne3 == 1); const dim3 block_dims(CUDA_GET_ROWS_BACK_BLOCK_SIZE, 1, 1); const int block_num_x = (ne00 + CUDA_GET_ROWS_BACK_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BACK_BLOCK_SIZE; const dim3 block_nums(block_num_x, ne1, 1); k_get_rows_back_float<<>>(src0_d, src1_d, dst_d, ne00, ne10); } ggml-org-ggml-3678254/src/ggml-cuda/getrows.cuh000066400000000000000000000011561512524704700212110ustar00rootroot00000000000000#include "common.cuh" #define CUDA_GET_ROWS_BLOCK_SIZE 256 #define CUDA_GET_ROWS_BACK_BLOCK_SIZE 256 void get_rows_cuda( const void * src0_d, ggml_type src0_type, const int32_t * src1_d, void * dst_d, ggml_type dst_type, int64_t ne00, size_t nb01, size_t nb02, size_t nb03, int64_t ne10, int64_t ne11, int64_t ne12, size_t nb10, size_t nb11, size_t nb12, size_t nb1, size_t nb2, size_t nb3, cudaStream_t stream); void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_get_rows_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/ggml-cuda.cu000066400000000000000000006151201512524704700212110ustar00rootroot00000000000000#include "ggml-cuda.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-cuda/common.cuh" #include "ggml-cuda/acc.cuh" #include "ggml-cuda/add-id.cuh" #include "ggml-cuda/arange.cuh" #include "ggml-cuda/argmax.cuh" #include "ggml-cuda/argsort.cuh" #include "ggml-cuda/binbcast.cuh" #include "ggml-cuda/clamp.cuh" #include "ggml-cuda/concat.cuh" #include "ggml-cuda/conv-transpose-1d.cuh" #include "ggml-cuda/conv2d.cuh" #include "ggml-cuda/conv2d-dw.cuh" #include "ggml-cuda/conv2d-transpose.cuh" #include "ggml-cuda/convert.cuh" #include "ggml-cuda/count-equal.cuh" #include "ggml-cuda/cpy.cuh" #include "ggml-cuda/cross-entropy-loss.cuh" #include "ggml-cuda/diagmask.cuh" #include "ggml-cuda/diag.cuh" #include "ggml-cuda/fattn.cuh" #include "ggml-cuda/getrows.cuh" #include "ggml-cuda/im2col.cuh" #include "ggml-cuda/mmf.cuh" #include "ggml-cuda/mmq.cuh" #include "ggml-cuda/mmvf.cuh" #include "ggml-cuda/mmvq.cuh" #include "ggml-cuda/norm.cuh" #include "ggml-cuda/opt-step-adamw.cuh" #include "ggml-cuda/opt-step-sgd.cuh" #include "ggml-cuda/out-prod.cuh" #include "ggml-cuda/pad.cuh" #include "ggml-cuda/pool2d.cuh" #include "ggml-cuda/quantize.cuh" #include "ggml-cuda/rope.cuh" #include "ggml-cuda/roll.cuh" #include "ggml-cuda/scale.cuh" #include "ggml-cuda/softcap.cuh" #include "ggml-cuda/softmax.cuh" #include "ggml-cuda/ssm-conv.cuh" #include "ggml-cuda/ssm-scan.cuh" #include "ggml-cuda/sum.cuh" #include "ggml-cuda/sumrows.cuh" #include "ggml-cuda/mean.cuh" #include "ggml-cuda/tsembd.cuh" #include "ggml-cuda/topk-moe.cuh" #include "ggml-cuda/unary.cuh" #include "ggml-cuda/upscale.cuh" #include "ggml-cuda/wkv.cuh" #include "ggml-cuda/gla.cuh" #include "ggml-cuda/set.cuh" #include "ggml-cuda/set-rows.cuh" #include "ggml-cuda/pad_reflect_1d.cuh" #include "ggml-cuda/solve_tri.cuh" #include "ggml-cuda/tri.cuh" #include "ggml-cuda/cumsum.cuh" #include "ggml-cuda/fill.cuh" #include "ggml.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); [[noreturn]] void ggml_cuda_error(const char * stmt, const char * func, const char * file, int line, const char * msg) { int id = -1; // in case cudaGetDevice fails (void)cudaGetDevice(&id); GGML_LOG_ERROR(GGML_CUDA_NAME " error: %s\n", msg); GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func, file, line); GGML_LOG_ERROR(" %s\n", stmt); // abort with GGML_ABORT to get a stack trace GGML_ABORT(GGML_CUDA_NAME " error"); } // this is faster on Windows // probably because the Windows CUDA libraries forget to make this check before invoking the drivers void ggml_cuda_set_device(int device) { int current_device; CUDA_CHECK(cudaGetDevice(¤t_device)); if (device == current_device) { return; } CUDA_CHECK(cudaSetDevice(device)); } int ggml_cuda_get_device() { int id; CUDA_CHECK(cudaGetDevice(&id)); return id; } static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) { ggml_cuda_set_device(device); cudaError_t err; if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) { err = cudaMallocManaged(ptr, size); #if defined(GGML_USE_HIP) if (err == hipSuccess) { CUDA_CHECK(cudaMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device)); } // fall back to cudaMalloc if not supported (e.g. on Windows) if (err == hipErrorNotSupported) { static bool warned_unsupported = false; if (!warned_unsupported) { GGML_LOG_WARN("hipMallocManaged unsupported, falling back to hipMalloc.\n"); warned_unsupported = true; } err = cudaMalloc(ptr, size); } #endif // defined(GGML_USE_HIP) } else { err = cudaMalloc(ptr, size); } return err; } #if defined(GGML_USE_HIP) static int ggml_cuda_parse_id(char devName[]) { // A list of possible Target IDs can be found under the rocclr/clr repo in device.cpp // these values are not stable so this is susceptible to breakage // https://github.com/ROCm/clr/blob/amd-staging/rocclr/device/device.cpp int archMajor = 0x0; int archMinor = 0x0; int archNum = GGML_CUDA_CC_OFFSET_AMD; int archLen = strlen(devName); char archName[archLen + 1]; // strip leading 'gfx' while copying into our buffer if (archLen > 3) { strcpy(archName, &devName[3]); archLen -= 3; } // trim trailing :xnack- or :sramecc- statuses archLen = strcspn(archName, ":"); archName[archLen] = '\0'; // tease out the version information if (archLen > 8) { // versions labeled generic use '-' as delimiter // strip the trailing "-generic" then iterate through what remains if ((strstr(archName, "-generic"))) { archName[archLen - 8] = '\0'; char * pch; if ((pch = strtok(archName, "-"))) { archMajor = (int)strtoul(pch, 0, 16); if ((pch = strtok(NULL, "-"))) { archMinor = 0x10 * (int)strtoul(pch, 0, 16); } } } } else if (archLen >= 3) { // last two digits should be the minor * 0x10 + stepping archMinor = (int)strtoul(&archName[archLen - 2], 0, 16); archName[archLen - 2] = '\0'; // only the major version remains archMajor = (int)strtoul(archName, 0, 16); } archNum += archMajor * 0x100; archNum += archMinor; return archNum; } #endif // defined(GGML_USE_HIP) static ggml_cuda_device_info ggml_cuda_init() { ggml_cuda_device_info info = {}; cudaError_t err = cudaGetDeviceCount(&info.device_count); if (err != cudaSuccess) { GGML_LOG_ERROR("%s: failed to initialize " GGML_CUDA_NAME ": %s\n", __func__, cudaGetErrorString(err)); return info; } GGML_ASSERT(info.device_count <= GGML_CUDA_MAX_DEVICES); int64_t total_vram = 0; #ifdef GGML_CUDA_FORCE_MMQ GGML_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); #else GGML_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); #endif // GGML_CUDA_FORCE_MMQ #ifdef GGML_CUDA_FORCE_CUBLAS GGML_LOG_INFO("%s: GGML_CUDA_FORCE_CUBLAS: yes\n", __func__); #else GGML_LOG_INFO("%s: GGML_CUDA_FORCE_CUBLAS: no\n", __func__); #endif // GGML_CUDA_FORCE_CUBLAS GGML_LOG_INFO("%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, info.device_count); std::vector> turing_devices_without_mma; for (int id = 0; id < info.device_count; ++id) { int device_vmm = 0; #if defined(GGML_USE_VMM) CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); if (device_vmm) { CUmemAllocationProp alloc_prop = {}; alloc_prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; alloc_prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; alloc_prop.location.id = id; CU_CHECK(cuMemGetAllocationGranularity(&info.devices[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); } #endif // defined(GGML_USE_VMM) info.devices[id].vmm = !!device_vmm; cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; info.devices[id].integrated = false; // Temporarily disabled due to issues with corrupted output (e.g. #15034) info.devices[id].nsm = prop.multiProcessorCount; info.devices[id].smpb = prop.sharedMemPerBlock; info.devices[id].warp_size = prop.warpSize; #if defined(GGML_USE_HIP) info.devices[id].smpbo = prop.sharedMemPerBlock; info.devices[id].cc = ggml_cuda_parse_id(prop.gcnArchName); if ((info.devices[id].cc & 0xff00) == 0x0) { GGML_LOG_WARN("invalid architecture ID received for device %d %s: %s cc %d.%d\n", id, prop.name, prop.gcnArchName, prop.major, prop.minor); // Fallback to prop.major and prop.minor if (prop.major > 0) { info.devices[id].cc = GGML_CUDA_CC_OFFSET_AMD + prop.major * 0x100; info.devices[id].cc += prop.minor * 0x10; } } GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s, Wave Size: %d\n", id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, device_vmm ? "yes" : "no", prop.warpSize); #elif defined(GGML_USE_MUSA) // FIXME: Ensure compatibility with varying warp sizes across different MUSA archs. info.devices[id].warp_size = 32; info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = GGML_CUDA_CC_OFFSET_MTHREADS + prop.major * 0x100; info.devices[id].cc += prop.minor * 0x10; GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); #else info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = 100*prop.major + 10*prop.minor; GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); std::string device_name(prop.name); if (device_name == "NVIDIA GeForce MX450") { turing_devices_without_mma.push_back({ id, device_name }); } else if (device_name == "NVIDIA GeForce MX550") { turing_devices_without_mma.push_back({ id, device_name }); } else if (device_name.substr(0, 21) == "NVIDIA GeForce GTX 16") { turing_devices_without_mma.push_back({ id, device_name }); } // Temporary performance fix: // Setting device scheduling strategy for iGPUs with cc121 to "spinning" to avoid delays in cuda synchronize calls. // TODO: Check for future drivers the default scheduling strategy and // remove this call again when cudaDeviceScheduleSpin is default. if (prop.major == 12 && prop.minor == 1) { CUDA_CHECK(cudaSetDeviceFlags(cudaDeviceScheduleSpin)); } #endif // defined(GGML_USE_HIP) } if (ggml_cuda_highest_compiled_arch(GGML_CUDA_CC_TURING) >= GGML_CUDA_CC_TURING && !turing_devices_without_mma.empty()) { GGML_LOG_INFO("The following devices will have suboptimal performance due to a lack of tensor cores:\n"); for (size_t device_pos = 0; device_pos < turing_devices_without_mma.size(); device_pos++) { GGML_LOG_INFO( " Device %d: %s\n", turing_devices_without_mma[device_pos].first, turing_devices_without_mma[device_pos].second.c_str()); } GGML_LOG_INFO( "Consider compiling with CMAKE_CUDA_ARCHITECTURES=61-virtual;80-virtual and DGGML_CUDA_FORCE_MMQ to force the use of the Pascal code for Turing.\n"); } for (int id = 0; id < info.device_count; ++id) { info.default_tensor_split[id] /= total_vram; } // configure logging to stdout // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); return info; } const ggml_cuda_device_info & ggml_cuda_info() { static ggml_cuda_device_info info = ggml_cuda_init(); return info; } // #define DEBUG_CUDA_MALLOC // buffer pool for cuda (legacy) struct ggml_cuda_pool_leg : public ggml_cuda_pool { static const int MAX_BUFFERS = 256; int device; struct ggml_cuda_buffer { void * ptr = nullptr; size_t size = 0; }; ggml_cuda_buffer buffer_pool[MAX_BUFFERS] = {}; size_t pool_size = 0; explicit ggml_cuda_pool_leg(int device) : device(device) { } ~ggml_cuda_pool_leg() { ggml_cuda_set_device(device); for (int i = 0; i < MAX_BUFFERS; ++i) { ggml_cuda_buffer & b = buffer_pool[i]; if (b.ptr != nullptr) { CUDA_CHECK(cudaFree(b.ptr)); pool_size -= b.size; } } GGML_ASSERT(pool_size == 0); } void * alloc(size_t size, size_t * actual_size) override { #ifdef DEBUG_CUDA_MALLOC int nnz = 0; size_t max_size = 0; #endif size_t best_diff = 1ull << 36; int ibest = -1; for (int i = 0; i < MAX_BUFFERS; ++i) { ggml_cuda_buffer& b = buffer_pool[i]; if (b.ptr != nullptr) { #ifdef DEBUG_CUDA_MALLOC ++nnz; if (b.size > max_size) max_size = b.size; #endif if (b.size >= size) { size_t diff = b.size - size; if (diff < best_diff) { best_diff = diff; ibest = i; if (!best_diff) { void * ptr = b.ptr; *actual_size = b.size; b.ptr = nullptr; b.size = 0; return ptr; } } } } } if (ibest >= 0) { ggml_cuda_buffer& b = buffer_pool[ibest]; void * ptr = b.ptr; *actual_size = b.size; b.ptr = nullptr; b.size = 0; return ptr; } void * ptr; size_t look_ahead_size = (size_t) (1.05 * size); look_ahead_size = 256 * ((look_ahead_size + 255)/256); ggml_cuda_set_device(device); CUDA_CHECK(ggml_cuda_device_malloc(&ptr, look_ahead_size, device)); *actual_size = look_ahead_size; pool_size += look_ahead_size; #ifdef DEBUG_CUDA_MALLOC GGML_LOG_INFO("%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, device, nnz, (uint32_t)(max_size / 1024 / 1024), (uint32_t)(pool_size / 1024 / 1024), (uint32_t)(size / 1024 / 1024)); #endif return ptr; } void free(void * ptr, size_t size) override { for (int i = 0; i < MAX_BUFFERS; ++i) { ggml_cuda_buffer& b = buffer_pool[i]; if (b.ptr == nullptr) { b.ptr = ptr; b.size = size; return; } } GGML_LOG_DEBUG(GGML_CUDA_NAME " buffer pool full, increase MAX_CUDA_BUFFERS\n"); ggml_cuda_set_device(device); CUDA_CHECK(cudaFree(ptr)); pool_size -= size; } }; // pool with virtual memory #if defined(GGML_USE_VMM) struct ggml_cuda_pool_vmm : public ggml_cuda_pool { static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB int device; CUdeviceptr pool_addr = 0; size_t pool_used = 0; size_t pool_size = 0; size_t granularity; #if defined(GGML_USE_HIP) std::vector> mappings; #endif explicit ggml_cuda_pool_vmm(int device) : device(device), granularity(ggml_cuda_info().devices[device].vmm_granularity) { } ~ggml_cuda_pool_vmm() { if (pool_addr != 0) { #if defined(GGML_USE_HIP) // Workaround for https://github.com/ROCm/ROCR-Runtime/issues/285 for (std::pair & mapping : mappings) { CU_CHECK(cuMemUnmap(mapping.first, mapping.second)); } #else CU_CHECK(cuMemUnmap(pool_addr, pool_size)); #endif CU_CHECK(cuMemAddressFree(pool_addr, CUDA_POOL_VMM_MAX_SIZE)); } } void * alloc(size_t size, size_t * actual_size) override { // round up the allocation size to the alignment to ensure that all allocations are aligned for all data types const size_t alignment = 128; size = alignment * ((size + alignment - 1) / alignment); size_t avail = pool_size - pool_used; if (size > avail) { // round up to the next multiple of the granularity size_t reserve_size = size - avail; reserve_size = granularity * ((reserve_size + granularity - 1) / granularity); GGML_ASSERT(pool_size + reserve_size <= CUDA_POOL_VMM_MAX_SIZE); // allocate more physical memory CUmemAllocationProp prop = {}; prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; prop.location.id = device; CUmemGenericAllocationHandle handle; CU_CHECK(cuMemCreate(&handle, reserve_size, &prop, 0)); // reserve virtual address space (if not already reserved) if (pool_addr == 0) { CU_CHECK(cuMemAddressReserve(&pool_addr, CUDA_POOL_VMM_MAX_SIZE, 0, 0, 0)); } // map at the end of the pool CUdeviceptr start_ptr = (CUdeviceptr)((char *)(pool_addr) + pool_size); CU_CHECK(cuMemMap(start_ptr, reserve_size, 0, handle, 0)); #if defined(GGML_USE_HIP) mappings.push_back({start_ptr, reserve_size}); #endif // the memory allocation handle is no longer needed after mapping CU_CHECK(cuMemRelease(handle)); // set access CUmemAccessDesc access = {}; access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; access.location.id = device; access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; CU_CHECK(cuMemSetAccess((CUdeviceptr)((char *)(pool_addr) + pool_size), reserve_size, &access, 1)); // add to the pool pool_size += reserve_size; //printf("cuda pool[%d]: size increased to %llu MB (reserved %llu MB)\n", // device, (unsigned long long) (pool_size/1024/1024), // (unsigned long long) (reserve_size/1024/1024)); } GGML_ASSERT(pool_addr != 0); void * ptr = (void *) ((CUdeviceptr)((char *)(pool_addr) + pool_used)); *actual_size = size; pool_used += size; #ifdef DEBUG_CUDA_MALLOC printf("cuda pool[%d]: allocated %llu bytes at %llx\n", device, (unsigned long long) size, ptr); #endif return ptr; } void free(void * ptr, size_t size) override { #ifdef DEBUG_CUDA_MALLOC printf("cuda pool[%d]: freed %llu bytes at %llx\n", device, (unsigned long long) size, ptr); #endif pool_used -= size; // all deallocations must be in reverse order of the allocations GGML_ASSERT(ptr == (void *) ((char *)(pool_addr) + pool_used)); } }; #endif // defined(GGML_USE_VMM) std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(int device, [[maybe_unused]] int stream_no) { #if defined(GGML_USE_VMM) if (ggml_cuda_info().devices[device].vmm) { return std::unique_ptr(new ggml_cuda_pool_vmm(device)); } #endif // defined(GGML_USE_VMM) return std::unique_ptr(new ggml_cuda_pool_leg(device)); } // destroying a cuBLAS handle while a graph is being captured in a different thread can result in a CUDA error // this lock is used to ensure that no cuBLAS handle is destroyed while a graph is being captured static std::mutex ggml_cuda_lock; static std::condition_variable ggml_cuda_lock_cv; static std::atomic ggml_cuda_lock_counter; ggml_backend_cuda_context::~ggml_backend_cuda_context() { std::unique_lock lock(ggml_cuda_lock); ggml_cuda_lock_cv.wait(lock, []{ return ggml_cuda_lock_counter.load(std::memory_order_relaxed) == 0; }); if (copy_event != nullptr) { CUDA_CHECK(cudaEventDestroy(copy_event)); } for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { if (streams[i][j] != nullptr) { CUDA_CHECK(cudaStreamDestroy(streams[i][j])); } } if (cublas_handles[i] != nullptr) { CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); } } } // cuda buffer struct ggml_backend_cuda_buffer_context { int device; void * dev_ptr = nullptr; std::string name; ggml_backend_cuda_buffer_context(int device, void * dev_ptr) : device(device), dev_ptr(dev_ptr), name(GGML_CUDA_NAME + std::to_string(device)) { } ~ggml_backend_cuda_buffer_context() { CUDA_CHECK(cudaFree(dev_ptr)); } }; static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; delete ctx; } static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) { return buffer->iface.free_buffer == ggml_backend_cuda_buffer_free_buffer; } static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; return ctx->dev_ptr; } static enum ggml_status ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; if (tensor->view_src != NULL) { assert(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; } if (ggml_is_quantized(tensor->type) && tensor->view_src == nullptr && ggml_backend_buffer_get_usage(buffer) != GGML_BACKEND_BUFFER_USAGE_COMPUTE) { // initialize padding to 0 to avoid possible NaN values const size_t original_size = ggml_nbytes(tensor); const size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor); if (padded_size > original_size) { ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemset((char *)tensor->data + original_size, 0, padded_size - original_size)); } } return GGML_STATUS_SUCCESS; } static void ggml_backend_cuda_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemsetAsync((char *)tensor->data + offset, value, size, cudaStreamPerThread)); CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemcpyAsync((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice, cudaStreamPerThread)); CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, cudaStreamPerThread)); CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static bool ggml_backend_cuda_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { if (ggml_backend_buffer_is_cuda(src->buffer)) { ggml_backend_cuda_buffer_context * src_ctx = (ggml_backend_cuda_buffer_context *)src->buffer->context; ggml_backend_cuda_buffer_context * dst_ctx = (ggml_backend_cuda_buffer_context *)dst->buffer->context; if (src_ctx->device == dst_ctx->device) { CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(src), cudaMemcpyDeviceToDevice, cudaStreamPerThread)); } else { #ifdef GGML_CUDA_NO_PEER_COPY return false; #else CUDA_CHECK(cudaMemcpyPeerAsync(dst->data, dst_ctx->device, src->data, src_ctx->device, ggml_nbytes(src), cudaStreamPerThread)); #endif } CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); return true; } return false; GGML_UNUSED(buffer); } static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemsetAsync(ctx->dev_ptr, value, buffer->size, cudaStreamPerThread)); CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static const ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = { /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_buffer_get_base, /* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor, /* .memset_tensor = */ ggml_backend_cuda_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_cuda_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cuda_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_cuda_buffer_cpy_tensor, /* .clear = */ ggml_backend_cuda_buffer_clear, /* .reset = */ NULL, }; // cuda buffer type struct ggml_backend_cuda_buffer_type_context { int device; std::string name; }; static const char * ggml_backend_cuda_buffer_type_get_name(ggml_backend_buffer_type_t buft) { ggml_backend_cuda_buffer_type_context * ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; return ctx->name.c_str(); } static bool ggml_backend_buft_is_cuda(ggml_backend_buffer_type_t buft) { return buft->iface.get_name == ggml_backend_cuda_buffer_type_get_name; } static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; ggml_cuda_set_device(buft_ctx->device); void * dev_ptr; cudaError_t err = ggml_cuda_device_malloc(&dev_ptr, size, buft_ctx->device); if (err != cudaSuccess) { // clear the error (void)cudaGetLastError(); GGML_LOG_ERROR("%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size / 1024.0 / 1024.0, buft_ctx->device, cudaGetErrorString(err)); return nullptr; } ggml_backend_cuda_buffer_context * ctx = new ggml_backend_cuda_buffer_context(buft_ctx->device, dev_ptr); return ggml_backend_buffer_init(buft, ggml_backend_cuda_buffer_interface, ctx, size); } static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); } static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { size_t size = ggml_nbytes(tensor); int64_t ne0 = tensor->ne[0]; if (ggml_is_quantized(tensor->type)) { if (ne0 % MATRIX_ROW_PADDING != 0) { GGML_ASSERT(tensor->nb[0] == ggml_element_size(tensor)); size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } } return size; GGML_UNUSED(buft); } static const ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { /* .get_name = */ ggml_backend_cuda_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size, /* .is_host = */ NULL, }; ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { static std::mutex mutex; std::lock_guard lock(mutex); if (device >= ggml_backend_cuda_get_device_count()) { return nullptr; } static ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; static bool ggml_backend_cuda_buffer_type_initialized = false; if (!ggml_backend_cuda_buffer_type_initialized) { for (int i = 0; i < ggml_backend_cuda_get_device_count(); i++) { ggml_backend_cuda_buffer_types[i] = { /* .iface = */ ggml_backend_cuda_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), i), /* .context = */ new ggml_backend_cuda_buffer_type_context{i, GGML_CUDA_NAME + std::to_string(i)}, }; } ggml_backend_cuda_buffer_type_initialized = true; } return &ggml_backend_cuda_buffer_types[device]; } // cuda split buffer static int64_t get_row_rounding(const std::array & tensor_split) { int64_t row_rounding = 0; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { if (tensor_split[id] >= (id + 1 < ggml_backend_cuda_get_device_count() ? tensor_split[id + 1] : 1.0f)) { continue; } const int cc = ggml_cuda_info().devices[id].cc; row_rounding = std::max(row_rounding, (int64_t)get_mmq_y_host(cc)); } return row_rounding; } static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array & tensor_split, int id) { const int64_t nrows = ggml_nrows(tensor); const int64_t rounding = get_row_rounding(tensor_split); *row_low = id == 0 ? 0 : nrows*tensor_split[id]; *row_low -= *row_low % rounding; if (id == ggml_backend_cuda_get_device_count() - 1) { *row_high = nrows; } else { *row_high = nrows*tensor_split[id + 1]; *row_high -= *row_high % rounding; } } static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]); } struct ggml_backend_cuda_split_buffer_type_context { int main_device; std::array tensor_split; std::string name; }; struct ggml_backend_cuda_split_buffer_context { ~ggml_backend_cuda_split_buffer_context() { for (ggml_tensor_extra_gpu * extra : tensor_extras) { for (int id = 0; id < GGML_CUDA_MAX_DEVICES; ++id) { for (int64_t is = 0; is < GGML_CUDA_MAX_STREAMS; ++is) { if (extra->events[id][is] != nullptr) { CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); } } if (extra->data_device[id] != nullptr) { CUDA_CHECK(cudaFree(extra->data_device[id])); } } delete extra; } } std::vector tensor_extras; }; static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; delete ctx; } static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buffer) { // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced return (void *)0x1000; GGML_UNUSED(buffer); } static enum ggml_status ggml_backend_cuda_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors"); ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; ctx->tensor_extras.push_back(extra); for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } // FIXME: do not crash if cudaMalloc fails // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first ggml_cuda_set_device(id); char * buf; CUDA_CHECK(ggml_cuda_device_malloc((void**)&buf, size, id)); // set padding to 0 to avoid possible NaN values if (size > original_size) { CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size)); } extra->data_device[id] = buf; for (int64_t is = 0; is < GGML_CUDA_MAX_STREAMS; ++is) { CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming)); } } tensor->extra = extra; return GGML_STATUS_SUCCESS; } static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors"); ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; const size_t nb1 = tensor->nb[1]; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } const size_t offset_split = row_low*nb1; size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } const char * buf_host = (const char *)data + offset_split; CUDA_CHECK(cudaMemcpyAsync(extra->data_device[id], buf_host, original_size, cudaMemcpyHostToDevice, cudaStreamPerThread)); } for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } } static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors"); ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; const size_t nb1 = tensor->nb[1]; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } const size_t offset_split = row_low*nb1; size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } char * buf_host = (char *)data + offset_split; CUDA_CHECK(cudaMemcpyAsync(buf_host, extra->data_device[id], original_size, cudaMemcpyDeviceToHost, cudaStreamPerThread)); } for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } } static void ggml_backend_cuda_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { GGML_UNUSED(buffer); GGML_UNUSED(value); } static const ggml_backend_buffer_i ggml_backend_cuda_split_buffer_interface = { /* .free_buffer = */ ggml_backend_cuda_split_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_split_buffer_get_base, /* .init_tensor = */ ggml_backend_cuda_split_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_cuda_split_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cuda_split_buffer_get_tensor, /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_cuda_split_buffer_clear, /* .reset = */ NULL, }; // cuda split buffer type static const char * ggml_backend_cuda_split_buffer_type_get_name(ggml_backend_buffer_type_t buft) { ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context; return ctx->name.c_str(); } static bool ggml_backend_buft_is_cuda_split(ggml_backend_buffer_type_t buft) { return buft->iface.get_name == ggml_backend_cuda_split_buffer_type_get_name; } static ggml_backend_buffer_t ggml_backend_cuda_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point // instead, we allocate them for each tensor separately in init_tensor // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated, // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct. ggml_backend_cuda_split_buffer_context * ctx = new ggml_backend_cuda_split_buffer_context(); return ggml_backend_buffer_init(buft, ggml_backend_cuda_split_buffer_interface, ctx, size); } static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); } static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context; GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors"); size_t total_size = 0; const int64_t ne0 = tensor->ne[0]; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, id); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } total_size += ggml_nbytes_split(tensor, nrows_split); // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } } return total_size; } static bool ggml_backend_cuda_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static const ggml_backend_buffer_type_i ggml_backend_cuda_split_buffer_type_interface = { /* .get_name = */ ggml_backend_cuda_split_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cuda_split_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cuda_split_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cuda_split_buffer_type_get_alloc_size, /* .is_host = */ ggml_backend_cuda_split_buffer_type_is_host, }; ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split) { static std::mutex mutex; std::lock_guard lock(mutex); static std::map>, struct ggml_backend_buffer_type> buft_map; std::array tensor_split_arr = {}; bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_CUDA_MAX_DEVICES, [](float x) { return x == 0.0f; }); if (all_zero) { tensor_split_arr = ggml_cuda_info().default_tensor_split; } else { float split_sum = 0.0f; for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) { tensor_split_arr[i] = split_sum; split_sum += tensor_split[i]; } for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) { tensor_split_arr[i] /= split_sum; } } auto it = buft_map.find({main_device, tensor_split_arr}); if (it != buft_map.end()) { return &it->second; } auto * ctx = new ggml_backend_cuda_split_buffer_type_context{ main_device, tensor_split_arr, GGML_CUDA_NAME + std::to_string(main_device) + "_Split", }; struct ggml_backend_buffer_type buft { /* .iface = */ ggml_backend_cuda_split_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), main_device), /* .context = */ ctx, }; auto result = buft_map.emplace(std::make_pair(main_device, tensor_split_arr), buft); return &result.first->second; } // host buffer type static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_type_t buft) { return GGML_CUDA_NAME "_Host"; GGML_UNUSED(buft); } static bool ggml_backend_buft_is_cuda_host(ggml_backend_buffer_type_t buft) { return buft->iface.get_name == ggml_backend_cuda_host_buffer_type_name; } static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { CUDA_CHECK(cudaFreeHost(buffer->context)); } static void * ggml_cuda_host_malloc(size_t size) { if (getenv("GGML_CUDA_NO_PINNED") != nullptr) { return nullptr; } void * ptr = nullptr; cudaError_t err = cudaMallocHost((void **) &ptr, size); if (err != cudaSuccess) { // clear the error (void)cudaGetLastError(); GGML_LOG_DEBUG("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, cudaGetErrorString(err)); return nullptr; } return ptr; } static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * ptr = ggml_cuda_host_malloc(size); if (ptr == nullptr) { // fallback to cpu buffer return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; buffer->iface.free_buffer = ggml_backend_cuda_host_buffer_free_buffer; return buffer; } ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_type_host = { /* .iface = */ { /* .get_name = */ ggml_backend_cuda_host_buffer_type_name, /* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), 0), /* .context = */ nullptr, }; return &ggml_backend_cuda_buffer_type_host; } //static bool ggml_backend_buffer_is_cuda_host(ggml_backend_buffer_t buffer) { // return buffer->buft->iface.get_name == ggml_backend_cuda_host_buffer_type_name; //} /// kernels typedef void (*ggml_cuda_op_mul_mat_t)( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); #ifndef GGML_CUDA_PEER_MAX_BATCH_SIZE #define GGML_CUDA_PEER_MAX_BATCH_SIZE 128 #endif // GGML_CUDA_PEER_MAX_BATCH_SIZE #define MUL_MAT_SRC1_COL_STRIDE 128 static cudaError_t ggml_cuda_cpy_tensor_2d( void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) { const char * src_ptr = (const char *) src->data; char * dst_ptr = (char *) dst; const int64_t ne0 = src->ne[0]; const int64_t nb0 = src->nb[0]; const int64_t nb1 = src->nb[1]; const int64_t nb2 = src->nb[2]; const int64_t nb3 = src->nb[3]; const enum ggml_type type = src->type; const int64_t ts = ggml_type_size(type); const int64_t bs = ggml_blck_size(type); const int64_t i1_diff = i1_high - i1_low; const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3; if (nb0 == ts && nb1 == ts*ne0/bs) { return cudaMemcpyAsync(dst_ptr, x, i1_diff*nb1, cudaMemcpyDeviceToDevice, stream); } else if (nb0 == ts) { return cudaMemcpy2DAsync(dst_ptr, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, cudaMemcpyDeviceToDevice, stream); } else { for (int64_t i1 = 0; i1 < i1_diff; i1++) { const void * rx = (const void *) ((const char *) x + i1*nb1); void * rd = (void *) (dst_ptr + i1*ts*ne0/bs); // pretend the row is a matrix with cols=1 cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, cudaMemcpyDeviceToDevice, stream); if (r != cudaSuccess) { return r; } } return cudaSuccess; } } static void ggml_cuda_op_mul_mat_cublas( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream) { GGML_ASSERT(src0_dd_i != nullptr); GGML_ASSERT(src1_ddf_i != nullptr); GGML_ASSERT(dst_dd_i != nullptr); const int64_t ne00 = src0->ne[0]; const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; int id = ggml_cuda_get_device(); // the main device has a larger memory buffer to hold the results from all GPUs // ldc == nrows of the matrix that cuBLAS writes into int64_t ldc = id == ctx.device ? ne0 : row_diff; const int cc = ggml_cuda_info().devices[id].cc; const bool supports_bf16 = GGML_CUDA_CC_IS_NVIDIA(cc) || GGML_CUDA_CC_IS_AMD(cc) || (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; if (supports_bf16 && src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { ggml_cuda_pool_alloc src1_as_bf16(ctx.pool(id)); if (src1->type != GGML_TYPE_BF16) { const to_bf16_cuda_t to_bf16_cuda = ggml_get_to_bf16_cuda(src1->type); GGML_ASSERT(to_bf16_cuda != nullptr); size_t ne = src1_ncols*ne10; src1_as_bf16.alloc(ne); to_bf16_cuda(src1_ddf_i, src1_as_bf16.get(), ne, stream); } const nv_bfloat16 * src1_ptr = src1->type == GGML_TYPE_BF16 ? (const nv_bfloat16 *) src1_ddf_i : src1_as_bf16.get(); const nv_bfloat16 * src0_ptr = (const nv_bfloat16 *)src0_dd_i; ggml_cuda_pool_alloc dst_bf16(ctx.pool(id), row_diff*src1_ncols); const float alpha_f32 = 1.0f; const float beta_f32 = 0.0f; CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, &alpha_f32, src0_ptr, CUDA_R_16BF, ne00, src1_ptr, CUDA_R_16BF, ne10, &beta_f32, dst_bf16.get(), CUDA_R_16BF, ldc, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_BF16); to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff*src1_ncols, stream); } else if (fast_fp16_hardware_available(cc) && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = row_diff*ne00; src0_as_f16.alloc(ne); to_fp16_cuda(src0_dd_i, src0_as_f16.get(), ne, stream); } const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16.get(); ggml_cuda_pool_alloc src1_as_f16(ctx.pool(id)); if (src1->type != GGML_TYPE_F16) { const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = src1_ncols*ne10; src1_as_f16.alloc(ne); to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream); } const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get(); CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, &alpha, src0_ptr, CUDA_R_16F, ne00, src1_ptr, CUDA_R_16F, ne10, &beta, dst_dd_i, CUDA_R_32F, ldc, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { ggml_cuda_pool_alloc dst_f16(ctx.pool(id), row_diff*src1_ncols); const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, &alpha_f16, src0_ptr, CUDA_R_16F, ne00, src1_ptr, CUDA_R_16F, ne10, &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); } } else { ggml_cuda_pool_alloc src0_ddq_as_f32(ctx.pool(id)); ggml_cuda_pool_alloc src1_ddq_as_f32(ctx.pool(id)); if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); GGML_ASSERT(to_fp32_cuda != nullptr); src0_ddq_as_f32.alloc(row_diff*ne00); to_fp32_cuda(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); } if (src1->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src1->type); GGML_ASSERT(to_fp32_cuda != nullptr); src1_ddq_as_f32.alloc(src1_ncols*ne10); to_fp32_cuda(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream); } const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get(); const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); CUBLAS_CHECK( cublasSgemm(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, &alpha, src0_ddf_i, ne00, src1_ddf1_i, ne10, &beta, dst_dd_i, ldc)); } GGML_UNUSED_VARS(dst, src1_ddq_i, src1_padded_row_size); } static void ggml_cuda_set_peer_access(const int n_tokens, int main_device) { static bool peer_access_enabled = false; const bool enable_peer_access = n_tokens <= GGML_CUDA_PEER_MAX_BATCH_SIZE; if (peer_access_enabled == enable_peer_access) { return; } #ifdef NDEBUG for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { ggml_cuda_set_device(id); CUDA_CHECK(cudaDeviceSynchronize()); } for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { ggml_cuda_set_device(id); for (int id_other = 0; id_other < ggml_backend_cuda_get_device_count(); ++id_other) { if (id == id_other) { continue; } if (id != main_device && id_other != main_device) { continue; } int can_access_peer; CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access_peer, id, id_other)); if (can_access_peer) { if (enable_peer_access) { cudaError_t err = cudaDeviceEnablePeerAccess(id_other, 0); if (err != cudaErrorPeerAccessAlreadyEnabled) { CUDA_CHECK(err); } else { // reset the error (void)cudaGetLastError(); } } else { cudaError_t err = cudaDeviceDisablePeerAccess(id_other); if (err != cudaErrorPeerAccessNotEnabled) { CUDA_CHECK(err); } else { // reset the error (void)cudaGetLastError(); } } } } } ggml_cuda_set_device(main_device); #endif // NDEBUG peer_access_enabled = enable_peer_access; GGML_UNUSED(main_device); } static cudaError_t ggml_cuda_Memcpy2DPeerAsync( void * dst, int dstDevice, size_t dpitch, void * src, int srcDevice, size_t spitch, size_t width, size_t height, cudaStream_t stream) { #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) // cudaMemcpy2DAsync may fail with copies between vmm pools of different devices cudaMemcpy3DPeerParms p = {}; p.dstDevice = dstDevice; p.dstPtr = make_cudaPitchedPtr(dst, dpitch, dpitch, height); p.srcDevice = srcDevice; p.srcPtr = make_cudaPitchedPtr(src, spitch, spitch, height); p.extent = make_cudaExtent(width, height, 1); return cudaMemcpy3DPeerAsync(&p, stream); #else // HIP does not support cudaMemcpy3DPeerAsync or vmm pools GGML_UNUSED(dstDevice); GGML_UNUSED(srcDevice); return cudaMemcpy2DAsync(dst, dpitch, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream); #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) } static void ggml_cuda_op_mul_mat( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_cuda_op_mul_mat_t op, quantize_cuda_t quantize_src1) { const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; const int64_t ne03 = src0->ne[3]; const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; const int64_t ne12 = src1->ne[2]; const int64_t ne13 = src1->ne[3]; const int64_t nrows1 = ggml_nrows(src1); const int64_t ne0 = dst->ne[0]; const int64_t ne1 = dst->ne[1]; // const int64_t nb10 = src1->nb[0]; const int64_t nb11 = src1->nb[1]; const int64_t nb12 = src1->nb[2]; const int64_t nb13 = src1->nb[3]; const int64_t nb2 = dst->nb[2]; const int64_t nb3 = dst->nb[3]; ggml_backend_cuda_buffer_context * src1_ctx = (ggml_backend_cuda_buffer_context *) src1->buffer->context; ggml_backend_cuda_buffer_context * dst_ctx = (ggml_backend_cuda_buffer_context *) dst->buffer->context; GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1)); GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); const int64_t i02_divisor = ne12 / ne02; const int64_t i03_divisor = ne13 / ne03; const size_t src0_ts = ggml_type_size(src0->type); const size_t src0_bs = ggml_blck_size(src0->type); const size_t q8_1_ts = sizeof(block_q8_1); const size_t q8_1_bs = QK8_1; const bool src0_is_contiguous = ggml_is_contiguous(src0); const bool src1_is_contiguous = ggml_is_contiguous(src1); const int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING); const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft); GGML_ASSERT(!(split && ne02 > 1)); GGML_ASSERT(!(split && ne03 > 1)); GGML_ASSERT(!(split && ne02 < ne12)); GGML_ASSERT(!(split && ne03 < ne13)); ggml_tensor_extra_gpu * src0_extra = split ? (ggml_tensor_extra_gpu *) src0->extra : nullptr; std::array tensor_split; if (split) { ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; tensor_split = buft_ctx->tensor_split; } struct dev_data { int cc; ggml_cuda_pool_alloc src0_dd_alloc; ggml_cuda_pool_alloc src1_ddf_alloc; ggml_cuda_pool_alloc src1_ddq_alloc; ggml_cuda_pool_alloc dst_dd_alloc; char * src0_dd = nullptr; float * src1_ddf = nullptr; // float char * src1_ddq = nullptr; // q8_1 float * dst_dd = nullptr; int64_t row_low; int64_t row_high; }; dev_data dev[GGML_CUDA_MAX_DEVICES]; int used_devices = 0; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { dev[id].cc = ggml_cuda_info().devices[id].cc; // by default, use all rows dev[id].row_low = 0; dev[id].row_high = ne01; // for multi GPU, get the row boundaries from tensor split // and round to mul_mat_q tile sizes if (split) { const int64_t rounding = get_row_rounding(tensor_split); if (id != 0) { dev[id].row_low = ne01*tensor_split[id]; if (dev[id].row_low < ne01) { dev[id].row_low -= dev[id].row_low % rounding; } } if (id != ggml_backend_cuda_get_device_count() - 1) { dev[id].row_high = ne01*tensor_split[id + 1]; if (dev[id].row_high < ne01) { dev[id].row_high -= dev[id].row_high % rounding; } } } } for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { if ((!split && id != ctx.device) || dev[id].row_low == dev[id].row_high) { continue; } used_devices++; const bool src1_on_device = id == src1_ctx->device; const bool dst_on_device = id == dst_ctx->device; ggml_cuda_set_device(id); cudaStream_t stream = ctx.stream(id, 0); if (src0_is_contiguous) { dev[id].src0_dd = split ? (char *) src0_extra->data_device[id] : (char *) src0->data; } else { // If src0 is not contiguous it will be copied to a temporary buffer. // This buffer needs to be cleared entirely because multiple regions will function as padding. const size_t nbytes_data = ggml_nbytes(src0); const size_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING); dev[id].src0_dd = dev[id].src0_dd_alloc.alloc(ctx.pool(id), nbytes_data + nbytes_padding); CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd, 0, nbytes_data + nbytes_padding, stream)); } // If src0 is on a temporary compute buffer (partial offloading) there may be some padding that needs to be cleared: if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_is_quantized(src0->type) && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) { GGML_ASSERT(ggml_is_contiguously_allocated(src0)); GGML_ASSERT(!src0->view_src); const size_t nbytes_data = ggml_row_size(src0->type, (dev[id].row_high - dev[id].row_low)*ne00); const size_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING); CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data, 0, nbytes_padding, stream)); } if (src1_on_device && src1_is_contiguous) { dev[id].src1_ddf = (float *) src1->data; } else { dev[id].src1_ddf = dev[id].src1_ddf_alloc.alloc(ctx.pool(id), ggml_nelements(src1)); } if (quantize_src1) { size_t src_1_ddq_size = nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs; if (quantize_src1 == quantize_mmq_q8_1_cuda) { src_1_ddq_size += get_mmq_x_max_host(dev[id].cc)*sizeof(block_q8_1_mmq); } dev[id].src1_ddq = dev[id].src1_ddq_alloc.alloc(ctx.pool(id), src_1_ddq_size); if (src1_on_device && src1_is_contiguous) { quantize_src1( dev[id].src1_ddf, nullptr, dev[id].src1_ddq, src0->type, ne10, nb11/sizeof(float), nb12/sizeof(float), nb13/sizeof(float), src1_padded_col_size, ne11, ne12, ne13, stream); CUDA_CHECK(cudaGetLastError()); } } if (dst_on_device) { dev[id].dst_dd = (float *) dst->data; } else { const size_t size_dst_ddf = split ? (dev[id].row_high - dev[id].row_low)*ne1 : ggml_nelements(dst); dev[id].dst_dd = dev[id].dst_dd_alloc.alloc(ctx.pool(id), size_dst_ddf); } } // if multiple devices are used they need to wait for the main device // here an event is recorded that signals that the main device has finished calculating the input data if (split && used_devices > 1) { ggml_cuda_set_device(ctx.device); CUDA_CHECK(cudaEventRecord(src0_extra->events[ctx.device][0], ctx.stream())); } const int64_t src1_col_stride = split && used_devices > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11; for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) { const int64_t is = split ? (src1_col_0/src1_col_stride) % GGML_CUDA_MAX_STREAMS : 0; const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { if ((!split && id != ctx.device) || dev[id].row_low == dev[id].row_high) { continue; } const bool src1_on_device = id == src1_ctx->device; const bool dst_on_device = id == dst_ctx->device; const int64_t row_diff = dev[id].row_high - dev[id].row_low; ggml_cuda_set_device(id); cudaStream_t stream = ctx.stream(id, is); // wait for main GPU data if necessary if (split && (id != ctx.device || is != 0)) { CUDA_CHECK(cudaStreamWaitEvent(stream, src0_extra->events[ctx.device][0], 0)); } for (int64_t i0 = 0; i0 < ne13*ne12; ++i0) { const int64_t i03 = i0 / ne12; const int64_t i02 = i0 % ne12; size_t src1_ddq_i_offset = i0*ne11 * src1_padded_col_size*q8_1_ts/q8_1_bs; if (quantize_src1 == quantize_mmq_q8_1_cuda) { src1_ddq_i_offset += src1_col_0 * sizeof(block_q8_1_mmq); } else { src1_ddq_i_offset += src1_col_0 * src1_padded_col_size*q8_1_ts/q8_1_bs; } // for split tensors the data begins at i0 == i0_offset_low const size_t nbytes_src0_matrix = ne01*ne00*src0_ts / src0_bs; char * src0_dd_i = dev[id].src0_dd + ((i03/i03_divisor)*ne02 + (i02/i02_divisor)) * nbytes_src0_matrix; float * src1_ddf_i = dev[id].src1_ddf + (i0*ne11 + src1_col_0) * ne10; char * src1_ddq_i = dev[id].src1_ddq + src1_ddq_i_offset; float * dst_dd_i = dev[id].dst_dd + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff); // the main device memory buffer can be on VRAM scratch, with space for all partial results // in that case an offset on dst_ddf_i is needed if (id == ctx.device) { dst_dd_i += dev[id].row_low; // offset is 0 if no tensor split } // copy src0, src1 to device if necessary if (src1_is_contiguous) { if (id != ctx.device) { if (quantize_src1) { char * src1_ddq_i_source = dev[ctx.device].src1_ddq + src1_ddq_i_offset; if (quantize_src1 == quantize_mmq_q8_1_cuda) { const size_t pitch = ne11*sizeof(block_q8_1_mmq); const size_t width = src1_ncols*sizeof(block_q8_1_mmq); const size_t height = src1_padded_col_size/(4*QK8_1); CUDA_CHECK(ggml_cuda_Memcpy2DPeerAsync(src1_ddq_i, id, pitch, src1_ddq_i_source, ctx.device, pitch, width, height, stream)); } else { CUDA_CHECK(cudaMemcpyPeerAsync( src1_ddq_i, id, src1_ddq_i_source, ctx.device, src1_ncols*src1_padded_col_size*q8_1_ts/q8_1_bs, stream)); } } else { float * src1_ddf_i_source = (float *) src1->data; src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10; CUDA_CHECK(cudaMemcpyPeerAsync(src1_ddf_i, id, src1_ddf_i_source, ctx.device, src1_ncols*ne10*sizeof(float), stream)); } } } else if (src1_on_device && !src1_is_contiguous) { CUDA_CHECK(ggml_cuda_cpy_tensor_2d( src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream)); } else { GGML_ABORT("fatal error"); } if (quantize_src1 && !src1_is_contiguous) { quantize_src1( src1_ddf_i, nullptr, src1_ddq_i, src0->type, ne10, ne10, ne11*ne10, ne12*ne11*ne10, src1_padded_col_size, src1_ncols, 1, 1, stream); CUDA_CHECK(cudaGetLastError()); } if (src1_col_0 == 0 && !src0_is_contiguous && i03 % i03_divisor == 0 && i02 % i02_divisor == 0) { CUDA_CHECK(ggml_cuda_cpy_tensor_2d( src0_dd_i, src0, i03/i03_divisor, i02/i02_divisor, dev[id].row_low, dev[id].row_high, stream)); } // do the computation op(ctx, src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i, dev[id].row_low, dev[id].row_high, src1_ncols, src1_padded_col_size, stream); CUDA_CHECK(cudaGetLastError()); // copy dst to host or other device if necessary if (!dst_on_device) { void * dst_off_device = dst->data; if (split) { // src0 = weight matrix is saved as a transposed matrix for better memory layout. // dst is NOT transposed. // The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU. // Instead they need to be copied to the correct slice in ne0 = dst row index. // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); dhf_dst_i += src1_col_0*ne0 + dev[id].row_low; CUDA_CHECK(ggml_cuda_Memcpy2DPeerAsync( dhf_dst_i, ctx.device, ne0*sizeof(float), dst_dd_i, id, row_diff*sizeof(float), row_diff*sizeof(float), src1_ncols, stream)); } else { float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3); GGML_ASSERT(dst->nb[1] == ne0*sizeof(float)); dhf_dst_i += src1_col_0*ne0; CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_dd_i, src1_ncols*ne0*sizeof(float), cudaMemcpyDeviceToDevice, stream)); } } // add event for the main device to wait on until other device is done if (split && (id != ctx.device || is != 0)) { CUDA_CHECK(cudaEventRecord(src0_extra->events[id][is], stream)); } } } } // main device waits for all other devices to be finished if (split && ggml_backend_cuda_get_device_count() > 1) { int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE; is_max = is_max <= GGML_CUDA_MAX_STREAMS ? is_max : GGML_CUDA_MAX_STREAMS; ggml_cuda_set_device(ctx.device); for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { if (dev[id].row_low == dev[id].row_high) { continue; } for (int64_t is = 0; is < is_max; ++is) { CUDA_CHECK(cudaStreamWaitEvent(ctx.stream(), src0_extra->events[id][is], 0)); } } } } static __global__ void k_compute_batched_ptrs( const void * src0_as_f16, const void * src1_as_f16, char * dst, const void ** ptrs_src, void ** ptrs_dst, int64_t ne12, int64_t ne13, int64_t ne23, size_t nb02, size_t nb03, size_t nb12, size_t nb13, size_t nbd2, size_t nbd3, int64_t r2, int64_t r3) { const int64_t i13 = blockIdx.x * blockDim.x + threadIdx.x; const int64_t i12 = blockIdx.y * blockDim.y + threadIdx.y; if (i13 >= ne13 || i12 >= ne12) { return; } const int64_t i03 = i13 / r3; const int64_t i02 = i12 / r2; ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12 + i13*nb13; ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; } // Type traits for mapping ggml types to CUDA/cuBLAS types template struct batched_mul_mat_traits; template<> struct batched_mul_mat_traits { using cuda_type = float; static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; static inline const cudaDataType_t data_type = CUDA_R_32F; static inline const ggml_type ggml_type_val = GGML_TYPE_F32; static inline const float alpha = 1.0f; static inline const float beta = 0.0f; static inline const void* get_alpha() { static const float val = alpha; return &val; } static inline const void* get_beta() { static const float val = beta; return &val; } static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp32_nc_cuda(src_type); } }; template<> struct batched_mul_mat_traits { using cuda_type = nv_bfloat16; static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; static inline const cudaDataType_t data_type = CUDA_R_16BF; static inline const ggml_type ggml_type_val = GGML_TYPE_BF16; static inline const float alpha = 1.0f; static inline const float beta = 0.0f; static inline const void* get_alpha() { static const float val = alpha; return &val; } static inline const void* get_beta() { static const float val = beta; return &val; } static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_bf16_nc_cuda(src_type); } }; template<> struct batched_mul_mat_traits { using cuda_type = half; static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_16F; static inline const cudaDataType_t data_type = CUDA_R_16F; static inline const ggml_type ggml_type_val = GGML_TYPE_F16; static inline const half alpha = 1.0; static inline const half beta = 0.0; static inline const void* get_alpha() { static const half val = alpha; return &val; } static inline const void* get_beta() { static const half val = beta; return &val; } static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp16_nc_cuda(src_type); } }; template static void ggml_cuda_mul_mat_batched_cublas_impl(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { using traits = batched_mul_mat_traits; using cuda_t = typename traits::cuda_type; GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); GGML_ASSERT(!ggml_backend_buft_is_cuda_split(src0->buffer->buft)); GGML_ASSERT(src0->type == src0_type); GGML_ASSERT(ggml_is_contiguous(dst)); // Byte offsets and tensor dimensions are currently used in an inconsistent way for dst. // As long as dst is contiguous this does not matter though. GGML_TENSOR_BINARY_OP_LOCALS const int64_t ne_dst = ggml_nelements(dst); cudaStream_t main_stream = ctx.stream(); CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(), main_stream)); float * dst_ddf = (float *) dst->data; const size_t ts_src1 = ggml_type_size(src1->type); GGML_ASSERT(nb10 == ts_src1); int64_t s11 = nb11 / ts_src1; int64_t s12 = nb12 / ts_src1; int64_t s13 = nb13 / ts_src1; const cuda_t * src0_ptr = nullptr; const cuda_t * src1_ptr = nullptr; ggml_cuda_pool_alloc src0_alloc(ctx.pool()); ggml_cuda_pool_alloc src1_alloc(ctx.pool()); bool is_src0_cont_2 = ggml_is_contiguous_2(src0); bool is_src1_cont_2 = ggml_is_contiguous_2(src1); // Handle src0 src0_ptr = (const cuda_t *) src0->data; // Handle src1 - convert if necessary if (src1->type == src0_type) { src1_ptr = (const cuda_t *) src1->data; } else { // Convert src1 to target type using traits conversion functions const int64_t ne_src1 = ggml_nelements(src1); src1_alloc.alloc(ne_src1); const auto convert_func = traits::get_nc_converter(src1->type); GGML_ASSERT(convert_func != nullptr); convert_func(src1->data, src1_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, main_stream); src1_ptr = src1_alloc.get(); s11 = ne10; s12 = ne11*s11; s13 = ne12*s12; is_src1_cont_2 = true; } // Setup destination buffer ggml_cuda_pool_alloc dst_temp(ctx.pool()); char * dst_t; size_t nbd2 = dst->nb[2]; size_t nbd3 = dst->nb[3]; cublasComputeType_t cu_compute_type = traits::compute_type; cudaDataType_t cu_data_type = traits::data_type; cudaDataType_t cu_data_type_a = traits::data_type; cudaDataType_t cu_data_type_b = traits::data_type; const void * alpha = traits::get_alpha(); const void * beta = traits::get_beta(); const float alpha_f32 = 1.0f; const float beta_f32 = 0.0f; if (dst->op_params[0] == GGML_PREC_DEFAULT) { if constexpr (src0_type == GGML_TYPE_F32) { dst_t = (char *) dst_ddf; // Direct F32 output } else { dst_t = (char *) dst_temp.alloc(ne_dst); nbd2 /= sizeof(float) / sizeof(cuda_t); nbd3 /= sizeof(float) / sizeof(cuda_t); } } else { dst_t = (char *) dst_ddf; cu_compute_type = CUBLAS_COMPUTE_32F; cu_data_type = CUDA_R_32F; alpha = &alpha_f32; beta = &beta_f32; } int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; beta = &beta_f32; } GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); // broadcast factors const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; if (r2 == 1 && r3 == 1 && is_src0_cont_2 && is_src1_cont_2) { // with a [0, 2, 1, 3] perm. and ne02==1 the matrix strides need to be determined from dim 3: const int64_t sma = ne02 == 1 ? nb03/nb00 : nb02/nb00; const int64_t smb = ne12 == 1 ? s13 : s12; // there is no broadcast and src0, src1 are contiguous across dims 2, 3 // use cublasGemmStridedBatchedEx CUBLAS_CHECK( cublasGemmStridedBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, alpha, src0_ptr, cu_data_type_a, nb01/nb00, sma, // strideA src1_ptr, cu_data_type_b, s11, smb, // strideB beta, dst_t, cu_data_type, ne0, ne1*ne0, // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { // use cublasGemmBatchedEx const int64_t ne23 = ne12*ne13; ggml_cuda_pool_alloc ptrs_src(ctx.pool(), 2*ne23); ggml_cuda_pool_alloc< void *> ptrs_dst(ctx.pool(), 1*ne23); size_t src1_stride_size = sizeof(cuda_t); const int threads_x = 16; const int threads_y = 16; dim3 block_dims(threads_x, threads_y); dim3 grid_dims( (ne13 + threads_x - 1) / threads_x, (ne12 + threads_y - 1) / threads_y ); k_compute_batched_ptrs<<>>( src0_ptr, src1_ptr, dst_t, ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, (src1->type == src0_type) ? nb12 : s12*src1_stride_size, (src1->type == src0_type) ? nb13 : s13*src1_stride_size, nbd2, nbd3, r2, r3); CUDA_CHECK(cudaGetLastError()); CUBLAS_CHECK( cublasGemmBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, alpha, (const void **) (ptrs_src.get() + 0*ne23), cu_data_type_a, nb01/nb00, (const void **) (ptrs_src.get() + 1*ne23), cu_data_type_b, s11, beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne0, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } // Convert output back to F32 if needed if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type != CUDA_R_32F) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(traits::ggml_type_val); to_fp32_cuda(dst_temp.get(), dst_ddf, ne_dst, main_stream); } } static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || src0->type == GGML_TYPE_F32); switch (src0->type) { case GGML_TYPE_F32: ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); break; case GGML_TYPE_BF16: ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); break; case GGML_TYPE_F16: ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); break; default: GGML_ABORT("Unsupported type"); } } static bool ggml_cuda_should_fuse_mul_mat(const ggml_tensor * ffn_up, const ggml_tensor * ffn_gate, const ggml_tensor * glu, const ggml_tensor * ffn_up_bias = nullptr, const ggml_tensor * ffn_gate_bias = nullptr) { const bool has_bias = ffn_up_bias != nullptr || ffn_gate_bias != nullptr; if (has_bias && (!ffn_up_bias || !ffn_gate_bias)) { return false; } const bool is_mul_mat = ffn_up->op == GGML_OP_MUL_MAT && ffn_gate->op == GGML_OP_MUL_MAT && glu->op == GGML_OP_GLU; const bool is_mul_mat_id = ffn_up->op == GGML_OP_MUL_MAT_ID && ffn_gate->op == GGML_OP_MUL_MAT_ID && glu->op == GGML_OP_GLU; GGML_ASSERT(ffn_up && ffn_gate && glu); if (!is_mul_mat && !is_mul_mat_id) { return false; } const ggml_op expected_bias_op = is_mul_mat ? GGML_OP_ADD : GGML_OP_ADD_ID; if (has_bias) { if (ffn_up_bias->op != expected_bias_op || ffn_gate_bias->op != expected_bias_op) { return false; } if (glu->src[0] != ffn_gate_bias || glu->src[1] != ffn_up_bias) { return false; } if (expected_bias_op == GGML_OP_ADD) { const bool up_has_mul = ffn_up_bias->src[0] == ffn_up || ffn_up_bias->src[1] == ffn_up; const bool gate_has_mul = ffn_gate_bias->src[0] == ffn_gate || ffn_gate_bias->src[1] == ffn_gate; if (!up_has_mul || !gate_has_mul) { return false; } } else { // GGML_OP_ADD_ID if (ffn_up_bias->src[0] != ffn_up || ffn_gate_bias->src[0] != ffn_gate) { return false; } if (ffn_up_bias->src[2] != ffn_up->src[2] || ffn_gate_bias->src[2] != ffn_gate->src[2]) { return false; } } } else { if (glu->src[0] != ffn_gate && glu->src[1] != ffn_up) { return false; } } if (ffn_up->src[0]->type != ffn_gate->src[0]->type || !ggml_are_same_shape(ffn_up->src[0], ffn_gate->src[0]) || !ggml_are_same_stride(ffn_up->src[0], ffn_gate->src[0])) { return false; } if (ffn_up->src[1] != ffn_gate->src[1]) { return false; } if (ffn_up->src[2] && (ffn_up->src[2] != ffn_gate->src[2])) { return false; } static constexpr std::array valid_glu_ops = { GGML_GLU_OP_SWIGLU, GGML_GLU_OP_GEGLU, GGML_GLU_OP_SWIGLU_OAI }; if (std::find(valid_glu_ops.begin(), valid_glu_ops.end(), ggml_get_glu_op(glu)) == valid_glu_ops.end()) { return false; } if (const bool swapped = ggml_get_op_params_i32(glu, 1); swapped) { return false; } const bool split = ggml_backend_buft_is_cuda_split(ffn_up->src[0]->buffer->buft) || ggml_backend_buft_is_cuda_split(ffn_gate->src[0]->buffer->buft); //TODO: add support for fusion for split buffers if (split) { return false; } return true; } static bool ggml_cuda_should_fuse_mul_mat_vec_f(const ggml_tensor * tensor) { ggml_tensor * src0 = tensor->src[0]; ggml_tensor * src1 = tensor->src[1]; const ggml_tensor * dst = tensor; const bool is_mul_mat_id = tensor->op == GGML_OP_MUL_MAT_ID; bool use_mul_mat_vec_f = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, is_mul_mat_id ? src1->ne[2] : src1->ne[1]); const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft) || ggml_backend_buft_is_cuda_split(src1->buffer->buft); //TODO: add support for fusion for split buffers if (split) { return false; } //we only support fusion for ncols_dst = 1 if (tensor->op == GGML_OP_MUL_MAT && dst->ne[1] != 1) { return false; } if (tensor->op == GGML_OP_MUL_MAT_ID && dst->ne[2] != 1) { return false; } return use_mul_mat_vec_f; } static bool ggml_cuda_should_fuse_mul_mat_vec_q(const ggml_tensor * tensor) { ggml_tensor * src0 = tensor->src[0]; ggml_tensor * src1 = tensor->src[1]; const ggml_tensor * dst = tensor; const bool bad_padding_clear = ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src; bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; // fusion is not universally faster on Pascal const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; if (cc <= GGML_CUDA_CC_PASCAL) { return false; } //we only support fusion for ncols_dst = 1 if (tensor->op == GGML_OP_MUL_MAT && dst->ne[1] != 1) { return false; } if (tensor->op == GGML_OP_MUL_MAT_ID && dst->ne[2] != 1) { return false; } const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft) || ggml_backend_buft_is_cuda_split(src1->buffer->buft); //TODO: add support for fusion for split buffers if (split) { return false; } return use_mul_mat_vec_q; } static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft); // If src0 is a temporary compute buffer it may have some padding that needs to be cleared for mul_mat_vec_q or mul_mat_q. // But if src0 is also a view of another tensor then this cannot be done safely because it may overwrite valid tensor data. // Therefore, in such cases use cuBLAS. const bool bad_padding_clear = ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src; bool use_mul_mat_vec_f = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; bool use_mul_mat_f = !ggml_is_quantized(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; bool any_gpus_with_slow_fp16 = false; if (split) { ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; auto & tensor_split = buft_ctx->tensor_split; for (int id = 0; id < ggml_backend_cuda_get_device_count(); ++id) { // skip devices that are not going to do any work: if (tensor_split[id] >= (id + 1 < ggml_backend_cuda_get_device_count() ? tensor_split[id + 1] : 1.0f)) { continue; } const int cc = ggml_cuda_info().devices[id].cc; const int warp_size = ggml_cuda_info().devices[id].warp_size; use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1], /*n_experts=*/0); use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src0->nb, src1->ne[1], /*mul_mat_id=*/false); use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, src1->ne[1]); any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } } else { const int cc = ggml_cuda_info().devices[ctx.device].cc; const int warp_size = ggml_cuda_info().devices[ctx.device].warp_size; use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1], /*n_experts=*/0); use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src0->nb, src1->ne[1], /*mul_mat_id=*/false); use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src0->nb, src1->ne[1]); any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } // debug helpers //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); //printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]); //printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]); //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); //TODO update for generic tensor parallelism const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; bool use_batched_cublas_f16 = src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16); bool use_batched_cublas_bf16 = src0->type == GGML_TYPE_BF16 && bf16_mma_hardware_available(cc); bool use_batched_cublas_f32 = src0->type == GGML_TYPE_F32; if (!split && use_mul_mat_vec_f) { // the custom F16 vector kernel can be used over batched cuBLAS GEMM // but this is only faster for GPUs without tensor cores or with a thin src0 matrix (particularly KQV in attention) ggml_cuda_mul_mat_vec_f(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_f) { ggml_cuda_mul_mat_f(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_vec_q) { ggml_cuda_mul_mat_vec_q(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_q) { ggml_cuda_mul_mat_q(ctx, src0, src1, nullptr, dst); } else if (!split && (use_batched_cublas_f16 || use_batched_cublas_bf16 || use_batched_cublas_f32) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // general KQ + KQV multi-batch without FlashAttention ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst); } else if (use_mul_mat_vec_f) { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_f, nullptr); } else if (use_mul_mat_vec_q) { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, quantize_row_q8_1_cuda); } else if (use_mul_mat_q) { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_q, quantize_mmq_q8_1_cuda); } else { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_cublas, nullptr); } } static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * ids = dst->src[2]; GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(!ggml_backend_buft_is_cuda_split(src0->buffer->buft) && "mul_mat_id does not support split buffers"); GGML_TENSOR_BINARY_OP_LOCALS const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { if (ne2 == 1) { if (ggml_is_quantized(src0->type)) { ggml_cuda_mul_mat_vec_q(ctx, src0, src1, ids, dst); } else { ggml_cuda_mul_mat_vec_f(ctx, src0, src1, ids, dst); } return; } if (ggml_cuda_should_use_mmq(src0->type, cc, ne12, /*n_experts=*/ne02)) { ggml_cuda_mul_mat_q(ctx, src0, src1, ids, dst); return; } if (ggml_cuda_should_use_mmf(src0->type, cc, WARP_SIZE, src0->ne, src0->nb, src1->ne[2], /*mul_mat_id=*/true)) { ggml_cuda_mul_mat_f(ctx, src0, src1, ids, dst); return; } } cudaStream_t stream = ctx.stream(); GGML_ASSERT(nb12 % nb11 == 0); GGML_ASSERT(nb2 % nb1 == 0); const ggml_type type_src1_sorted = (src0->type == GGML_TYPE_F16 && !fast_fp16_hardware_available(cc)) || ggml_is_quantized(src0->type) ? GGML_TYPE_F32 : src0->type; const ggml_type type_dst_sorted = GGML_TYPE_F32; const size_t ts_src1_sorted = ggml_type_size(type_src1_sorted); const size_t ts_dst_sorted = ggml_type_size(type_dst_sorted); const int64_t n_expert_used = ids->ne[0]; const int64_t ne_get_rows = ne12 * n_expert_used; std::vector ids_to_sorted_host; ids_to_sorted_host.reserve(2*ne_get_rows); std::vector ids_from_sorted_host(ne_get_rows); ggml_cuda_pool_alloc ids_buf_dev(ctx.pool(), 2*ne_get_rows); std::vector tokens_per_expert(ne02); ggml_cuda_pool_alloc src1_sorted(ctx.pool(), ne12*n_expert_used*ne10*ts_src1_sorted); ggml_cuda_pool_alloc dst_sorted(ctx.pool(), ne2 *n_expert_used* ne0*ts_dst_sorted); std::vector ids_host(ggml_nbytes(ids)); CUDA_CHECK(cudaMemcpyAsync(ids_host.data(), ids->data, ggml_nbytes(ids), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int64_t i02 = 0; i02 < ne02; ++i02) { // expert matrices for (int64_t i12 = 0; i12 < ne12; ++i12) { // tokens for (int64_t iex = 0; iex < n_expert_used; ++iex) { const int32_t expert_to_use = *(const int32_t *)(ids_host.data() + i12*ids->nb[1] + iex*ids->nb[0]); assert(expert_to_use >= 0 && expert_to_use < ne02); if (expert_to_use == i02) { ids_from_sorted_host[i12*n_expert_used + iex] = ids_to_sorted_host.size(); ids_to_sorted_host.push_back(i12*ne11 + iex % ne11); tokens_per_expert[i02]++; break; } } } } GGML_ASSERT(ids_to_sorted_host.size() == size_t(ne_get_rows)); ids_to_sorted_host.insert(ids_to_sorted_host.end(), ids_from_sorted_host.begin(), ids_from_sorted_host.end()); CUDA_CHECK(cudaMemcpyAsync(ids_buf_dev.ptr, ids_to_sorted_host.data(), 2*ne_get_rows*sizeof(int32_t), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); const int32_t * ids_to_sorted = ids_buf_dev.ptr + 0*ne_get_rows; const int32_t * ids_from_sorted = ids_buf_dev.ptr + 1*ne_get_rows; get_rows_cuda(src1->data, src1->type, ids_to_sorted, src1_sorted.ptr, type_src1_sorted, ne10, nb11, nb12, nb13, ne_get_rows, 1, 1, sizeof(int32_t), ne_get_rows*sizeof(int32_t), ne_get_rows*sizeof(int32_t), ne10*ts_src1_sorted, ne_get_rows*ne10*ts_src1_sorted, ne_get_rows*ne10*ts_src1_sorted, stream); CUDA_CHECK(cudaGetLastError()); char * src1_data_cur = (char *) src1_sorted.ptr; char * dst_data_cur = (char *) dst_sorted.ptr; for (int64_t i02 = 0; i02 < ne02; ++i02) { if (tokens_per_expert[i02] == 0) { continue; } ggml_tensor src0_slice = *src0; src0_slice.ne[2] = 1; src0_slice.nb[3] = src0_slice.nb[2]; src0_slice.op = GGML_OP_VIEW; src0_slice.view_src = dst->src[0]; // non-const pointer to src0 src0_slice.data = (char *) src0->data + i02*nb02; ggml_tensor src1_slice; memset(&src1_slice, 0, sizeof(src1_slice)); src1_slice.buffer = src1->buffer; src1_slice.type = type_src1_sorted; src1_slice.ne[0] = ne10; src1_slice.ne[1] = tokens_per_expert[i02]; src1_slice.ne[2] = 1; src1_slice.ne[3] = 1; src1_slice.nb[0] = ts_src1_sorted; src1_slice.nb[1] = src1_slice.ne[0] * src1_slice.nb[0]; src1_slice.nb[2] = src1_slice.ne[1] * src1_slice.nb[1]; src1_slice.nb[3] = src1_slice.ne[2] * src1_slice.nb[2]; src1_slice.data = src1_data_cur; ggml_tensor dst_slice; memset(&dst_slice, 0, sizeof(dst_slice)); dst_slice.buffer = dst->buffer; dst_slice.type = type_dst_sorted; dst_slice.ne[0] = ne0; dst_slice.ne[1] = tokens_per_expert[i02]; dst_slice.ne[2] = 1; dst_slice.ne[3] = 1; dst_slice.nb[0] = ts_dst_sorted; dst_slice.nb[1] = dst_slice.ne[0] * dst_slice.nb[0]; dst_slice.nb[2] = dst_slice.ne[1] * dst_slice.nb[1]; dst_slice.nb[3] = dst_slice.ne[2] * dst_slice.nb[2]; dst_slice.data = dst_data_cur; ggml_cuda_mul_mat(ctx, &src0_slice, &src1_slice, &dst_slice); CUDA_CHECK(cudaGetLastError()); src1_data_cur += src1_slice.nb[2]; dst_data_cur += dst_slice.nb[2]; } get_rows_cuda(dst_sorted.ptr, type_dst_sorted, ids_from_sorted, dst->data, dst->type, ne0, ne0*ts_dst_sorted, ne_get_rows*ne0*ts_dst_sorted, ne_get_rows*ne0*ts_dst_sorted, ne_get_rows, 1, 1, sizeof(int32_t), ne_get_rows*sizeof(int32_t), ne_get_rows*sizeof(int32_t), nb1, nb2, nb3, stream); } static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct ggml_tensor * dst) { // why is this here instead of mul_mat? if (dst->src[0] != nullptr && ggml_backend_buft_is_cuda_split(dst->src[0]->buffer->buft)) { ggml_cuda_set_peer_access(dst->src[1]->ne[1], ctx.device); } switch (dst->op) { case GGML_OP_ARGMAX: ggml_cuda_argmax(ctx, dst); break; case GGML_OP_COUNT_EQUAL: ggml_cuda_count_equal(ctx, dst); break; case GGML_OP_REPEAT: ggml_cuda_op_repeat(ctx, dst); break; case GGML_OP_REPEAT_BACK: ggml_cuda_op_repeat_back(ctx, dst); break; case GGML_OP_GET_ROWS: ggml_cuda_op_get_rows(ctx, dst); break; case GGML_OP_GET_ROWS_BACK: ggml_cuda_op_get_rows_back(ctx, dst); break; case GGML_OP_SET_ROWS: ggml_cuda_op_set_rows(ctx, dst); break; case GGML_OP_SET: ggml_cuda_op_set(ctx, dst); break; case GGML_OP_DUP: ggml_cuda_dup(ctx, dst); break; case GGML_OP_CPY: ggml_cuda_cpy(ctx, dst->src[0], dst->src[1]); break; case GGML_OP_CONT: ggml_cuda_dup(ctx, dst); break; case GGML_OP_ADD: case GGML_OP_ADD1: // TODO: more efficient implementation ggml_cuda_op_add(ctx, dst); break; case GGML_OP_ADD_ID: ggml_cuda_op_add_id(ctx, dst); break; case GGML_OP_SUB: ggml_cuda_op_sub(ctx, dst); break; case GGML_OP_ACC: ggml_cuda_op_acc(ctx, dst); break; case GGML_OP_MUL: ggml_cuda_op_mul(ctx, dst); break; case GGML_OP_DIV: ggml_cuda_op_div(ctx, dst); break; case GGML_OP_UNARY: switch (ggml_get_unary_op(dst)) { case GGML_UNARY_OP_ABS: ggml_cuda_op_abs(ctx, dst); break; case GGML_UNARY_OP_SGN: ggml_cuda_op_sgn(ctx, dst); break; case GGML_UNARY_OP_NEG: ggml_cuda_op_neg(ctx, dst); break; case GGML_UNARY_OP_STEP: ggml_cuda_op_step(ctx, dst); break; case GGML_UNARY_OP_GELU: ggml_cuda_op_gelu(ctx, dst); break; case GGML_UNARY_OP_SILU: ggml_cuda_op_silu(ctx, dst); break; case GGML_UNARY_OP_GELU_ERF: ggml_cuda_op_gelu_erf(ctx, dst); break; case GGML_UNARY_OP_GELU_QUICK: ggml_cuda_op_gelu_quick(ctx, dst); break; case GGML_UNARY_OP_TANH: ggml_cuda_op_tanh(ctx, dst); break; case GGML_UNARY_OP_RELU: ggml_cuda_op_relu(ctx, dst); break; case GGML_UNARY_OP_SIGMOID: ggml_cuda_op_sigmoid(ctx, dst); break; case GGML_UNARY_OP_HARDSIGMOID: ggml_cuda_op_hardsigmoid(ctx, dst); break; case GGML_UNARY_OP_HARDSWISH: ggml_cuda_op_hardswish(ctx, dst); break; case GGML_UNARY_OP_EXP: ggml_cuda_op_exp(ctx, dst); break; case GGML_UNARY_OP_ELU: ggml_cuda_op_elu(ctx, dst); break; case GGML_UNARY_OP_XIELU: ggml_cuda_op_xielu(ctx, dst); break; case GGML_UNARY_OP_FLOOR: ggml_cuda_op_floor(ctx, dst); break; case GGML_UNARY_OP_CEIL: ggml_cuda_op_ceil(ctx, dst); break; case GGML_UNARY_OP_ROUND: ggml_cuda_op_round(ctx, dst); break; case GGML_UNARY_OP_TRUNC: ggml_cuda_op_trunc(ctx, dst); break; case GGML_UNARY_OP_EXPM1: ggml_cuda_op_expm1(ctx, dst); break; case GGML_UNARY_OP_SOFTPLUS: ggml_cuda_op_softplus(ctx, dst); break; default: return false; } break; case GGML_OP_GLU: switch (ggml_get_glu_op(dst)) { case GGML_GLU_OP_REGLU: ggml_cuda_op_reglu(ctx, dst); break; case GGML_GLU_OP_GEGLU: ggml_cuda_op_geglu(ctx, dst); break; case GGML_GLU_OP_SWIGLU: ggml_cuda_op_swiglu(ctx, dst); break; case GGML_GLU_OP_SWIGLU_OAI: ggml_cuda_op_swiglu_oai(ctx, dst); break; case GGML_GLU_OP_GEGLU_ERF: ggml_cuda_op_geglu_erf(ctx, dst); break; case GGML_GLU_OP_GEGLU_QUICK: ggml_cuda_op_geglu_quick(ctx, dst); break; default: return false; } break; case GGML_OP_NORM: ggml_cuda_op_norm(ctx, dst); break; case GGML_OP_GROUP_NORM: ggml_cuda_op_group_norm(ctx, dst); break; case GGML_OP_L2_NORM: ggml_cuda_op_l2_norm(ctx, dst); break; case GGML_OP_CONCAT: ggml_cuda_op_concat(ctx, dst); break; case GGML_OP_UPSCALE: ggml_cuda_op_upscale(ctx, dst); break; case GGML_OP_PAD: ggml_cuda_op_pad(ctx, dst); break; case GGML_OP_PAD_REFLECT_1D: ggml_cuda_op_pad_reflect_1d(ctx, dst); break; case GGML_OP_ARANGE: ggml_cuda_op_arange(ctx, dst); break; case GGML_OP_TIMESTEP_EMBEDDING: ggml_cuda_op_timestep_embedding(ctx, dst); break; case GGML_OP_LEAKY_RELU: ggml_cuda_op_leaky_relu(ctx, dst); break; case GGML_OP_SILU_BACK: ggml_cuda_op_silu_back(ctx, dst); break; case GGML_OP_RMS_NORM: ggml_cuda_op_rms_norm(ctx, dst); break; case GGML_OP_RMS_NORM_BACK: ggml_cuda_op_rms_norm_back(ctx, dst); break; case GGML_OP_MUL_MAT: ggml_cuda_mul_mat(ctx, dst->src[0], dst->src[1], dst); break; case GGML_OP_MUL_MAT_ID: ggml_cuda_mul_mat_id(ctx, dst); break; case GGML_OP_OUT_PROD: ggml_cuda_out_prod(ctx, dst); break; case GGML_OP_SCALE: ggml_cuda_op_scale(ctx, dst); break; case GGML_OP_SQR: ggml_cuda_op_sqr(ctx, dst); break; case GGML_OP_SQRT: ggml_cuda_op_sqrt(ctx, dst); break; case GGML_OP_SIN: ggml_cuda_op_sin(ctx, dst); break; case GGML_OP_COS: ggml_cuda_op_cos(ctx, dst); break; case GGML_OP_CLAMP: ggml_cuda_op_clamp(ctx, dst); break; case GGML_OP_LOG: ggml_cuda_op_log(ctx, dst); break; case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: break; case GGML_OP_DIAG: ggml_cuda_op_diag(ctx, dst); break; case GGML_OP_DIAG_MASK_INF: ggml_cuda_op_diag_mask_inf(ctx, dst); break; case GGML_OP_SOFT_MAX: ggml_cuda_op_soft_max(ctx, dst); break; case GGML_OP_SOFT_MAX_BACK: ggml_cuda_op_soft_max_back(ctx, dst); break; case GGML_OP_ROPE: ggml_cuda_op_rope(ctx, dst); break; case GGML_OP_ROPE_BACK: ggml_cuda_op_rope_back(ctx, dst); break; case GGML_OP_ROLL: ggml_cuda_op_roll(ctx, dst); break; case GGML_OP_IM2COL: ggml_cuda_op_im2col(ctx, dst); break; case GGML_OP_IM2COL_3D: ggml_cuda_op_im2col_3d(ctx, dst); break; case GGML_OP_CONV_2D: ggml_cuda_op_conv2d(ctx, dst); break; case GGML_OP_CONV_2D_DW: ggml_cuda_op_conv2d_dw(ctx, dst); break; case GGML_OP_CONV_TRANSPOSE_2D: ggml_cuda_conv_2d_transpose_p0(ctx, dst); break; case GGML_OP_CONV_TRANSPOSE_1D: ggml_cuda_op_conv_transpose_1d(ctx,dst); break; case GGML_OP_POOL_2D: ggml_cuda_op_pool2d(ctx, dst); break; case GGML_OP_SUM: ggml_cuda_op_sum(ctx, dst); break; case GGML_OP_SUM_ROWS: ggml_cuda_op_sum_rows(ctx, dst); break; case GGML_OP_MEAN: ggml_cuda_op_mean(ctx, dst); break; case GGML_OP_SSM_CONV: ggml_cuda_op_ssm_conv(ctx, dst); break; case GGML_OP_SSM_SCAN: ggml_cuda_op_ssm_scan(ctx, dst); break; case GGML_OP_ARGSORT: ggml_cuda_op_argsort(ctx, dst); break; case GGML_OP_FLASH_ATTN_EXT: ggml_cuda_flash_attn_ext(ctx, dst); break; case GGML_OP_CROSS_ENTROPY_LOSS: ggml_cuda_cross_entropy_loss(ctx, dst); break; case GGML_OP_CUMSUM: ggml_cuda_op_cumsum(ctx, dst); break; case GGML_OP_TRI: ggml_cuda_op_tri(ctx, dst); break; case GGML_OP_RWKV_WKV6: ggml_cuda_op_rwkv_wkv6(ctx, dst); break; case GGML_OP_GATED_LINEAR_ATTN: ggml_cuda_op_gated_linear_attn(ctx, dst); break; case GGML_OP_RWKV_WKV7: ggml_cuda_op_rwkv_wkv7(ctx, dst); break; case GGML_OP_CROSS_ENTROPY_LOSS_BACK: ggml_cuda_cross_entropy_loss_back(ctx, dst); break; case GGML_OP_OPT_STEP_ADAMW: ggml_cuda_opt_step_adamw(ctx, dst); break; case GGML_OP_OPT_STEP_SGD: ggml_cuda_opt_step_sgd(ctx, dst); break; case GGML_OP_SOLVE_TRI: ggml_cuda_op_solve_tri(ctx, dst); break; case GGML_OP_FILL: ggml_cuda_op_fill(ctx, dst); break; default: return false; } cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { GGML_LOG_ERROR("%s: %s failed\n", __func__, ggml_op_desc(dst)); CUDA_CHECK(err); } return true; } //////////////////////////////////////////////////////////////////////////////// // backend static const char * ggml_backend_cuda_get_name(ggml_backend_t backend) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; return cuda_ctx->name.c_str(); } static void ggml_backend_cuda_free(ggml_backend_t backend) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; delete cuda_ctx; delete backend; } static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; GGML_ASSERT(buf->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); CUDA_CHECK(cudaMemcpyAsync((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice, cuda_ctx->stream())); } static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; GGML_ASSERT(buf->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, cuda_ctx->stream())); } static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) { ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer; ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer; if (!ggml_backend_is_cuda(backend_src) || !ggml_backend_is_cuda(backend_dst)) { return false; } if (!ggml_backend_buffer_is_cuda(src->buffer) || !ggml_backend_buffer_is_cuda(dst->buffer)) { return false; } // device -> device copy ggml_backend_cuda_context * cuda_ctx_src = (ggml_backend_cuda_context *)backend_src->context; ggml_backend_cuda_context * cuda_ctx_dst = (ggml_backend_cuda_context *)backend_dst->context; ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context; ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context; if (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__); #endif return false; } if (backend_src != backend_dst) { // copy on src stream if (cuda_ctx_src->device == cuda_ctx_dst->device) { CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream())); } else { #ifdef GGML_CUDA_NO_PEER_COPY return false; #else CUDA_CHECK(cudaMemcpyPeerAsync(dst->data, cuda_ctx_dst->device, src->data, cuda_ctx_src->device, ggml_nbytes(dst), cuda_ctx_src->stream())); #endif } // record event on src stream after the copy if (!cuda_ctx_src->copy_event) { ggml_cuda_set_device(cuda_ctx_src->device); CUDA_CHECK(cudaEventCreateWithFlags(&cuda_ctx_src->copy_event, cudaEventDisableTiming)); } CUDA_CHECK(cudaEventRecord(cuda_ctx_src->copy_event, cuda_ctx_src->stream())); // wait on dst stream for the copy to complete CUDA_CHECK(cudaStreamWaitEvent(cuda_ctx_dst->stream(), cuda_ctx_src->copy_event, 0)); } else { // src and dst are on the same backend CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream())); } return true; } static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; CUDA_CHECK(cudaStreamSynchronize(cuda_ctx->stream())); GGML_UNUSED(backend); } #ifdef USE_CUDA_GRAPH static bool check_node_graph_compatibility(ggml_cgraph * cgraph, bool use_cuda_graph) { // Loop over nodes in GGML graph to obtain info needed for CUDA graph const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected"; const std::string gemma3n_per_layer_proj_src1_name = "per_layer_proj"; const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased"; const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased"; const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased"; const std::string nemotron_h_block_out_prefix = "nemotron_h_block_out"; const std::string mamba2_y_add_d_prefix = "mamba2_y_add_d"; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } if (node->src[0] && node->src[0]->buffer && ggml_backend_buft_is_cuda_split(node->src[0]->buffer->buft)) { use_cuda_graph = false; // Split buffers are not supported by CUDA graph capture #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to split buffer\n", __func__); #endif } if (node->op == GGML_OP_MUL_MAT_ID && node->ne[2] != 1) { use_cuda_graph = false; // This node type is not supported by CUDA graph capture #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to unsupported node type\n", __func__); #endif } if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1 && (node->src[0] ? node->src[0]->name != gemma3n_per_layer_proj_src0_name : true) && (node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) && strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 && strncmp(node->name, ffn_moe_up_bias_prefix.c_str(), ffn_moe_up_bias_prefix.size()) != 0 && strncmp(node->name, ffn_moe_down_bias_prefix.c_str(), ffn_moe_down_bias_prefix.size()) != 0 && strncmp(node->name, nemotron_h_block_out_prefix.c_str(), nemotron_h_block_out_prefix.size()) != 0 && strncmp(node->name, mamba2_y_add_d_prefix.c_str(), mamba2_y_add_d_prefix.size()) != 0) { // disable CUDA graphs for batch size > 1 for now while excluding the matrix-matrix addition as part of Gemma3n's `project_per_layer_input` operation // by means of matching node names. See // https://github.com/ggml-org/llama.cpp/blob/f9a31eea06a859e34cecb88b4d020c7f03d86cc4/src/llama-model.cpp#L10199-L10241 and // https://github.com/huggingface/transformers/blob/bda75b4011239d065de84aa3e744b67ebfa7b245/src/transformers/models/gemma3n/modeling_gemma3n.py#L1773, // Generally, changes in batch size or context size can cause changes to the grid size of some kernels. use_cuda_graph = false; #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); #endif } if (!use_cuda_graph) { break; } } return use_cuda_graph; } static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { graph_node_properties->node_address = node->data; graph_node_properties->node_op = node->op; for (int i = 0; i < GGML_MAX_DIMS; i++) { graph_node_properties->ne[i] = node->ne[i]; graph_node_properties->nb[i] = node->nb[i]; } for (int i = 0; i < GGML_MAX_SRC; i++) { graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr; } memcpy(graph_node_properties->op_params, node->op_params, GGML_MAX_OP_PARAMS); } static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { if (node->data != graph_node_properties->node_address && node->op != GGML_OP_VIEW) { return false; } if (node->op != graph_node_properties->node_op) { return false; } for (int i = 0; i < GGML_MAX_DIMS; i++) { if (node->ne[i] != graph_node_properties->ne[i]) { return false; } if (node->nb[i] != graph_node_properties->nb[i]) { return false; } } for (int i = 0; i < GGML_MAX_SRC; i++) { if (node->src[i] && node->src[i]->data != graph_node_properties->src_address[i] && node->op != GGML_OP_VIEW ) { return false; } } if ((node->op == GGML_OP_SCALE || node->op == GGML_OP_GLU) && memcmp(graph_node_properties->op_params, node->op_params, GGML_MAX_OP_PARAMS) != 0) { return false; } return true; } static bool is_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph) { bool cuda_graph_update_required = false; if (cuda_ctx->cuda_graph->instance == nullptr) { cuda_graph_update_required = true; } // Check if the graph size has changed if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) { cuda_graph_update_required = true; cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes); } // Loop over nodes in GGML graph to determine if CUDA graph update is required // and store properties to allow this comparison for the next token for (int i = 0; i < cgraph->n_nodes; i++) { bool has_matching_properties = true; if (!cuda_graph_update_required) { has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); } if (!has_matching_properties) { cuda_graph_update_required = true; } set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); } return cuda_graph_update_required; } static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { #if CUDART_VERSION >= 12000 cudaGraphExecUpdateResultInfo result_info; cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info); #else cudaGraphNode_t errorNode; cudaGraphExecUpdateResult result_info; cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &errorNode, &result_info); #endif // CUDART_VERSION >= 12000 if (stat == cudaErrorGraphExecUpdateFailure) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: CUDA graph update failed\n", __func__); #endif // The pre-existing graph exec cannot be updated due to violated constraints // so instead clear error and re-instantiate (void)cudaGetLastError(); CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance)); cuda_ctx->cuda_graph->instance = nullptr; CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); } else { GGML_ASSERT(stat == cudaSuccess); } } #endif static bool ggml_cuda_should_fuse_rope_set_rows(const ggml_tensor * rope, const ggml_tensor * view, const ggml_tensor * set_rows) { if (rope->op != GGML_OP_ROPE || view->op != GGML_OP_VIEW || set_rows->op != GGML_OP_SET_ROWS) { return false; } // ne3 not tested if (rope->src[0]->ne[3] != 1) { return false; } if (set_rows->type != GGML_TYPE_F32 && set_rows->type != GGML_TYPE_F16) { return false; } if (set_rows->src[1]->type != GGML_TYPE_I64) { return false; } // The view should flatten two dims of rope into one dim if (!ggml_is_contiguous(view) || view->ne[0] != rope->ne[0] * rope->ne[1]) { return false; } // Only norm/neox shaders have the fusion code const int mode = ((const int32_t *) rope->op_params)[2]; if (mode != GGML_ROPE_TYPE_NORMAL && mode != GGML_ROPE_TYPE_NEOX) { return false; } return true; } static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops, std::initializer_list unary_ops) { #ifndef NDEBUG const size_t num_unary = std::count(ops.begin(), ops.end(), GGML_OP_UNARY); GGML_ASSERT(unary_ops.size() == num_unary); #endif //TODO: remove special case once ggml_can_fuse can handle empty nodes std::initializer_list topk_moe_ops = ggml_cuda_topk_moe_ops(/*with_norm*/ false, /*delayed_softmax=*/false); std::initializer_list topk_moe_ops_with_norm = ggml_cuda_topk_moe_ops(/*with_norm=*/true, /*delayed_softmax=*/false); std::initializer_list topk_moe_ops_delayed_softmax = ggml_cuda_topk_moe_ops(/*with_norm=*/false, /*delayed_softmax=*/true); const auto is_equal = [](const std::initializer_list & list1, const std::initializer_list & list2) { return std::equal(list1.begin(), list1.end(), list2.begin(), list2.end()); }; if (is_equal(topk_moe_ops_with_norm, ops) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 9 })) { ggml_tensor * softmax = cgraph->nodes[node_idx]; ggml_tensor * weights = cgraph->nodes[node_idx + 9]; ggml_tensor * get_rows = cgraph->nodes[node_idx + 4]; ggml_tensor * argsort = cgraph->nodes[node_idx + 2]; int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0]; if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) { return true; } } if (is_equal(topk_moe_ops, ops) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 4 })) { ggml_tensor * softmax = cgraph->nodes[node_idx]; ggml_tensor * weights = cgraph->nodes[node_idx + 4]; ggml_tensor * get_rows = cgraph->nodes[node_idx + 4]; ggml_tensor * argsort = cgraph->nodes[node_idx + 2]; int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0]; if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) { return true; } } if (is_equal(topk_moe_ops_delayed_softmax, ops) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 1, node_idx + 5 })) { ggml_tensor * softmax = cgraph->nodes[node_idx + 4]; ggml_tensor * weights = cgraph->nodes[node_idx + 5]; ggml_tensor * get_rows = cgraph->nodes[node_idx + 2]; ggml_tensor * argsort = cgraph->nodes[node_idx + 0]; int n_expert = cgraph->nodes[node_idx]->src[0]->ne[0]; if (ggml_cuda_should_use_topk_moe(softmax, weights, get_rows, argsort, nullptr, n_expert)) { return true; } } std::initializer_list mul_mat_bias_glu_ops = { GGML_OP_MUL_MAT, GGML_OP_ADD, GGML_OP_MUL_MAT, GGML_OP_ADD, GGML_OP_GLU }; std::initializer_list mul_mat_id_bias_glu_ops = { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID, GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID, GGML_OP_GLU }; std::initializer_list mul_mat_id_glu_ops = { GGML_OP_MUL_MAT_ID, GGML_OP_MUL_MAT_ID, GGML_OP_GLU }; std::initializer_list mul_mat_glu_ops = { GGML_OP_MUL_MAT, GGML_OP_MUL_MAT, GGML_OP_GLU }; if ((is_equal(mul_mat_bias_glu_ops, ops) || is_equal(mul_mat_id_bias_glu_ops, ops)) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 4 })) { const ggml_tensor * ffn_gate = cgraph->nodes[node_idx]; const ggml_tensor * ffn_gate_bias = cgraph->nodes[node_idx + 1]; const ggml_tensor * ffn_up = cgraph->nodes[node_idx + 2]; const ggml_tensor * ffn_up_bias = cgraph->nodes[node_idx + 3]; const ggml_tensor * glu = cgraph->nodes[node_idx + 4]; if (ggml_cuda_should_fuse_mul_mat(ffn_up, ffn_gate, glu, ffn_up_bias, ffn_gate_bias)) { return true; } } if ((is_equal(mul_mat_id_glu_ops, ops) || is_equal(mul_mat_glu_ops, ops)) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 2 })) { const ggml_tensor * ffn_gate = cgraph->nodes[node_idx]; const ggml_tensor * ffn_up = cgraph->nodes[node_idx + 1]; const ggml_tensor * glu = cgraph->nodes[node_idx + 2]; if (ggml_cuda_should_fuse_mul_mat(ffn_up, ffn_gate, glu)) { return true; } } std::initializer_list rope_set_rows_ops = { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }; if (is_equal(rope_set_rows_ops, ops) && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 2 })) { const ggml_tensor * rope = cgraph->nodes[node_idx]; const ggml_tensor * view = cgraph->nodes[node_idx + 1]; const ggml_tensor * set_rows = cgraph->nodes[node_idx + 2]; if (ggml_cuda_should_fuse_rope_set_rows(rope, view, set_rows)) { return true; } } if (!ggml_can_fuse(cgraph, node_idx, ops)) { return false; } if ((ops.size() == 2 || ops.size() == 3) && ops.begin()[0] == GGML_OP_RMS_NORM && ops.begin()[1] == GGML_OP_MUL) { const ggml_tensor *rms_norm = cgraph->nodes[node_idx]; const ggml_tensor *mul = cgraph->nodes[node_idx+1]; const ggml_tensor *add = nullptr; if (ops.size() == 3 && ops.begin()[2] == GGML_OP_ADD) { add = cgraph->nodes[node_idx+2]; } GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(rms_norm->type == GGML_TYPE_F32); //rms norm only supports F32 if (mul->src[0]->type != GGML_TYPE_F32 || mul->src[1]->type != GGML_TYPE_F32 || mul->type != GGML_TYPE_F32) { return false; } if (add && (add->src[0]->type != GGML_TYPE_F32 || add->src[1]->type != GGML_TYPE_F32 || add->type != GGML_TYPE_F32) ) { return false; } //if rms norm is the B operand, then we don't handle broadcast if (rms_norm == mul->src[1] && !ggml_are_same_shape(mul->src[0], rms_norm)) { return false; } //rms_norm kernel assumes contigous rows if (!ggml_is_contiguous_rows(mul->src[0]) || !ggml_is_contiguous_rows(mul->src[1])) { return false; } if (add && (!ggml_is_contiguous(add->src[0]) || !ggml_is_contiguous_rows(add->src[1]))) { return false; } return true; } if (ops.size() == 3 && ops.begin()[0] == GGML_OP_SCALE && ops.begin()[1] == GGML_OP_UNARY && ops.begin()[2] == GGML_OP_SCALE && unary_ops.size() == 1 && unary_ops.begin()[0] == GGML_UNARY_OP_TANH) { const ggml_tensor *scale = cgraph->nodes[node_idx]; const ggml_tensor *tanh = cgraph->nodes[node_idx+1]; const ggml_tensor *scale2 = cgraph->nodes[node_idx+2]; GGML_ASSERT(scale->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(scale->type == GGML_TYPE_F32); if (ggml_get_unary_op(tanh) != GGML_UNARY_OP_TANH) { return false; } // Check for bias if (ggml_get_op_params_f32(scale, 1) != 0.0f || ggml_get_op_params_f32(scale2, 1) != 0.0f) { return false; } return true; } return false; } static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) { // flag used to determine whether it is an integrated_gpu const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated; ggml_cuda_stream_context & stream_ctx = cuda_ctx->stream_context(); bool is_concurrent_event_active = false; ggml_cuda_concurrent_event * concurrent_event = nullptr; bool should_launch_concurrent_events = false; const auto try_launch_concurrent_event = [&](const ggml_tensor * node) { if (stream_ctx.concurrent_events.find(node) != stream_ctx.concurrent_events.end()) { concurrent_event = &stream_ctx.concurrent_events[node]; is_concurrent_event_active = true; GGML_LOG_DEBUG("Launching %d streams at %s\n", concurrent_event->n_streams, node->name); cudaStream_t main_stream = cuda_ctx->stream(); // this should be stream 0 GGML_ASSERT(cuda_ctx->curr_stream_no == 0); CUDA_CHECK(cudaEventRecord(concurrent_event->fork_event, main_stream)); for (int i = 1; i <= concurrent_event->n_streams; ++i) { cudaStream_t stream = cuda_ctx->stream(cuda_ctx->device, i); CUDA_CHECK(cudaStreamWaitEvent(stream, concurrent_event->fork_event)); } } }; while (!graph_evaluated_or_captured) { // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. // With the use of CUDA graphs, the execution will be performed by the graph launch. if (!use_cuda_graph || cuda_graph_update_required) { [[maybe_unused]] int prev_i = 0; if (stream_ctx.concurrent_events.size() > 0) { should_launch_concurrent_events = true; for (const auto & [tensor, event] : stream_ctx.concurrent_events) { should_launch_concurrent_events = should_launch_concurrent_events && event.is_valid(); } } if (should_launch_concurrent_events) { // Restore original node order within each concurrent region to enable fusion within streams std::unordered_map node_to_idx; node_to_idx.reserve(cgraph->n_nodes); for (int i = 0; i < cgraph->n_nodes; ++i) { node_to_idx[cgraph->nodes[i]] = i; } for (auto & [fork_node, event] : stream_ctx.concurrent_events) { // Find positions of all nodes from this event in the current graph std::vector positions; positions.reserve(event.original_order.size()); bool all_found = true; for (const ggml_tensor * orig_node : event.original_order) { auto it = node_to_idx.find(orig_node); if (it != node_to_idx.end()) { positions.push_back(it->second); } else { all_found = false; break; } } if (!all_found || positions.size() != event.original_order.size()) { continue; } // Sort positions to get contiguous range std::vector sorted_positions = positions; std::sort(sorted_positions.begin(), sorted_positions.end()); bool is_contiguous = true; for (size_t i = 1; i < sorted_positions.size(); ++i) { if (sorted_positions[i] != sorted_positions[i-1] + 1) { is_contiguous = false; break; } } if (!is_contiguous) { continue; } // Restore original order at the sorted positions int start_pos = sorted_positions[0]; for (size_t i = 0; i < event.original_order.size(); ++i) { cgraph->nodes[start_pos + i] = const_cast(event.original_order[i]); } } } for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; if (is_concurrent_event_active) { GGML_ASSERT(concurrent_event); if (node == concurrent_event->join_node) { cuda_ctx->curr_stream_no = 0; for (int i = 1; i <= concurrent_event->n_streams; ++i) { // Wait on join events of forked streams in the main stream CUDA_CHECK(cudaEventRecord(concurrent_event->join_events[i - 1], cuda_ctx->stream(cuda_ctx->device, i))); CUDA_CHECK(cudaStreamWaitEvent(cuda_ctx->stream(), concurrent_event->join_events[i - 1])); } is_concurrent_event_active = false; concurrent_event = nullptr; } else { GGML_ASSERT (concurrent_event->stream_mapping.find(node) != concurrent_event->stream_mapping.end()); cuda_ctx->curr_stream_no = concurrent_event->stream_mapping[node]; GGML_LOG_DEBUG("Setting stream no to %d for node %s\n", cuda_ctx->curr_stream_no, node->name); } } else if (i - prev_i > 1) { //the previous node was fused const ggml_tensor * prev_node = cgraph->nodes[i - 1]; try_launch_concurrent_event(prev_node); if (is_concurrent_event_active) { cuda_ctx->curr_stream_no = concurrent_event->stream_mapping[node]; GGML_LOG_DEBUG("Setting stream no to %d for node %s\n", cuda_ctx->curr_stream_no, node->name); } } #ifdef GGML_CUDA_DEBUG const int nodes_fused = i - prev_i - 1; if (nodes_fused > 0) { GGML_LOG_INFO("nodes_fused: %d\n", nodes_fused); } #endif prev_i = i; if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } // start of fusion operations static bool disable_fusion = (getenv("GGML_CUDA_DISABLE_FUSION") != nullptr); if (!disable_fusion) { if (ggml_cuda_can_fuse(cgraph, i, ggml_cuda_topk_moe_ops(/*with norm*/ true), {})) { ggml_tensor * weights = cgraph->nodes[i + 9]; ggml_tensor * selected_experts = cgraph->nodes[i + 3]; ggml_tensor * clamp = cgraph->nodes[i + 7]; ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, selected_experts, /*with norm*/ true, /*delayed softmax*/ false, clamp); i += 9; continue; } if (ggml_cuda_can_fuse(cgraph, i, ggml_cuda_topk_moe_ops(/*with norm*/ false), {})) { ggml_tensor * weights = cgraph->nodes[i + 4]; ggml_tensor * selected_experts = cgraph->nodes[i + 3]; ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, selected_experts, /*with norm*/ false, /*delayed softmax*/ false); i += 4; continue; } if (ggml_cuda_can_fuse(cgraph, i, ggml_cuda_topk_moe_ops(/*with norm*/ false, /*delayed softmax*/ true), {})) { ggml_tensor * weights = cgraph->nodes[i + 5]; ggml_tensor * ids = cgraph->nodes[i + 1]; ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, ids, /*with norm*/ false, /*delayed_softmax*/ true); i += 5; continue; } if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, {})) { ggml_tensor * rope = cgraph->nodes[i]; ggml_tensor * set_rows = cgraph->nodes[i + 2]; ggml_cuda_op_rope_fused(*cuda_ctx, rope, set_rows); i += 2; continue; } if (node->op == GGML_OP_ADD) { int n_fuse = 0; ggml_op ops[8]; std::fill(ops, ops + 8, GGML_OP_ADD); for (; n_fuse <= 6; ++n_fuse){ if (!ggml_can_fuse(cgraph, i + n_fuse, ops + n_fuse, 2)) { break; } if (cgraph->nodes[i + n_fuse] != cgraph->nodes[i + n_fuse + 1]->src[0]) { break; } if (!ggml_are_same_layout(cgraph->nodes[i + n_fuse]->src[1], cgraph->nodes[i + n_fuse + 1]->src[1])) { break; } } n_fuse++; if (n_fuse > 1) { for (int j = 0; j < n_fuse - 1; ++j) { node->src[j + 2] = cgraph->nodes[i + j + 1]->src[1]; } cgraph->nodes[i + n_fuse - 1]->data = node->data; ggml_cuda_op_fused_add(*cuda_ctx, node, n_fuse); i += n_fuse - 1; continue; } } bool fused_mul_mat_vec = false; int fused_node_count = 0; for (ggml_op op : { GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID }) { const ggml_op bias_op = op == GGML_OP_MUL_MAT ? GGML_OP_ADD : GGML_OP_ADD_ID; if (ggml_cuda_can_fuse(cgraph, i, { op, bias_op, op, bias_op, GGML_OP_GLU }, {})) { ggml_tensor * glu = cgraph->nodes[i + 4]; ggml_tensor * gate_bias_n = glu->src[0]; ggml_tensor * up_bias_n = glu->src[1]; //we don't assume the order for {gate, up}. Instead infer it from the bias tensor ggml_tensor * gate_n = nullptr; ggml_tensor * up_n = nullptr; if (gate_bias_n->src[0] == cgraph->nodes[i] || gate_bias_n->src[1] == cgraph->nodes[i]) { gate_n = cgraph->nodes[i]; up_n = cgraph->nodes[i + 2]; } else if (gate_bias_n->src[0] == cgraph->nodes[i + 2] || gate_bias_n->src[1] == cgraph->nodes[i + 2]) { gate_n = cgraph->nodes[i + 2]; up_n = cgraph->nodes[i]; } else { continue; } auto get_bias_tensor = [](const ggml_tensor * bias_node, const ggml_tensor * mul_node, ggml_op op_bias) { if (op_bias == GGML_OP_ADD) { if (bias_node->src[0] == mul_node) { return bias_node->src[1]; } if (bias_node->src[1] == mul_node) { return bias_node->src[0]; } return (ggml_tensor *) nullptr; } GGML_ASSERT(op_bias == GGML_OP_ADD_ID); GGML_ASSERT(bias_node->src[0] == mul_node); return bias_node->src[1]; }; ggml_tensor * up_bias_tensor = get_bias_tensor(up_bias_n, up_n, bias_op); ggml_tensor * gate_bias_tensor = get_bias_tensor(gate_bias_n, gate_n, bias_op); if (!up_bias_tensor || !gate_bias_tensor) { continue; } // we don't support repeating adds if (bias_op == GGML_OP_ADD && (!ggml_are_same_shape(gate_bias_n->src[0], gate_bias_n->src[1]) || !ggml_are_same_shape(up_bias_n->src[0], up_bias_n->src[1]))) { continue; } const ggml_tensor * src0 = up_n->src[0]; const ggml_tensor * src1 = up_n->src[1]; const ggml_tensor * ids = up_n->src[2]; if (ggml_cuda_should_fuse_mul_mat_vec_f(up_n)) { ggml_cuda_mm_fusion_args_host fusion_data{}; fusion_data.gate = gate_n->src[0]; fusion_data.x_bias = up_bias_tensor; fusion_data.gate_bias = gate_bias_tensor; fusion_data.glu_op = ggml_get_glu_op(glu); ggml_cuda_mul_mat_vec_f(*cuda_ctx, src0, src1, ids, glu, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 5; break; } if (ggml_cuda_should_fuse_mul_mat_vec_q(up_n)) { ggml_cuda_mm_fusion_args_host fusion_data{}; fusion_data.gate = gate_n->src[0]; fusion_data.x_bias = up_bias_tensor; fusion_data.gate_bias = gate_bias_tensor; fusion_data.glu_op = ggml_get_glu_op(glu); ggml_cuda_mul_mat_vec_q(*cuda_ctx, src0, src1, ids, glu, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 5; break; } } else if (ggml_cuda_can_fuse(cgraph, i, { op, op, GGML_OP_GLU }, {})) { ggml_tensor * glu = cgraph->nodes[i + 2]; ggml_tensor * gate = glu->src[0]; ggml_tensor * up = glu->src[1]; bool ok = (gate == cgraph->nodes[i] && up == cgraph->nodes[i + 1]) || (gate == cgraph->nodes[i + 1] && up == cgraph->nodes[i]); if (!ok) continue; const ggml_tensor * src0 = up->src[0]; const ggml_tensor * src1 = up->src[1]; const ggml_tensor * ids = up->src[2]; if (ggml_cuda_should_fuse_mul_mat_vec_f(up)) { ggml_cuda_mm_fusion_args_host fusion_data{}; fusion_data.gate = gate->src[0]; fusion_data.glu_op = ggml_get_glu_op(glu); ggml_cuda_mul_mat_vec_f(*cuda_ctx, src0, src1, ids, glu, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 3; break; } if (ggml_cuda_should_fuse_mul_mat_vec_q(up)) { ggml_cuda_mm_fusion_args_host fusion_data{}; fusion_data.gate = gate->src[0]; fusion_data.glu_op = ggml_get_glu_op(glu); ggml_cuda_mul_mat_vec_q(*cuda_ctx, src0, src1, ids, glu, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 3; break; } } } if (fused_mul_mat_vec) { i += fused_node_count - 1; continue; } fused_mul_mat_vec = false; fused_node_count = 0; for (ggml_op op : { GGML_OP_MUL_MAT, GGML_OP_MUL_MAT_ID }) { const ggml_op bias_op = op == GGML_OP_MUL_MAT ? GGML_OP_ADD : GGML_OP_ADD_ID; if (!ggml_can_fuse(cgraph, i, { op, bias_op })) { continue; } ggml_tensor * mm_node = cgraph->nodes[i]; ggml_tensor * bias_node = cgraph->nodes[i + 1]; ggml_tensor * bias_tensor = nullptr; if (bias_op == GGML_OP_ADD) { if (bias_node->src[0] == mm_node) { bias_tensor = bias_node->src[1]; } else if (bias_node->src[1] == mm_node) { bias_tensor = bias_node->src[0]; } else { continue; } } else { if (bias_node->src[0] != mm_node) { continue; } bias_tensor = bias_node->src[1]; } const ggml_tensor * src0 = mm_node->src[0]; const ggml_tensor * src1 = mm_node->src[1]; const ggml_tensor * ids = mm_node->src[2]; if (bias_op == GGML_OP_ADD_ID && bias_node->src[2] != ids) { continue; } if (bias_op == GGML_OP_ADD && !ggml_are_same_shape(bias_node->src[0], bias_node->src[1])) { continue; } ggml_cuda_mm_fusion_args_host fusion_data{}; fusion_data.x_bias = bias_tensor; if (ggml_cuda_should_fuse_mul_mat_vec_f(mm_node)) { ggml_cuda_mul_mat_vec_f(*cuda_ctx, src0, src1, ids, bias_node, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 2; break; } if (ggml_cuda_should_fuse_mul_mat_vec_q(mm_node)) { ggml_cuda_mul_mat_vec_q(*cuda_ctx, src0, src1, ids, bias_node, &fusion_data); fused_mul_mat_vec = true; fused_node_count = 2; break; } } if (fused_mul_mat_vec) { i += fused_node_count - 1; continue; } if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL, GGML_OP_ADD}, {})) { ggml_cuda_op_rms_norm_fused_add(*cuda_ctx, node, cgraph->nodes[i+1], cgraph->nodes[i+2]); i += 2; continue; } if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL}, {})) { ggml_cuda_op_rms_norm_fused(*cuda_ctx, node, cgraph->nodes[i+1]); i++; continue; } if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_SCALE, GGML_OP_UNARY, GGML_OP_SCALE }, { GGML_UNARY_OP_TANH })) { i += 2; ggml_cuda_op_softcap(*cuda_ctx, cgraph->nodes[i], node); continue; } } #ifndef NDEBUG assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); for (int j = 0; j < GGML_MAX_SRC; j++) { if (node->src[j] != nullptr) { assert(node->src[j]->buffer); assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } #else GGML_UNUSED(integrated); #endif // NDEBUG bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); if (!ok) { GGML_LOG_ERROR("%s: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); } GGML_ASSERT(ok); if (!is_concurrent_event_active) { try_launch_concurrent_event(node); } } } #ifdef USE_CUDA_GRAPH if (use_cuda_graph && cuda_graph_update_required) { // End CUDA graph capture if (cuda_ctx->cuda_graph->graph != nullptr) { CUDA_CHECK(cudaGraphDestroy(cuda_ctx->cuda_graph->graph)); cuda_ctx->cuda_graph->graph = nullptr; } CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph)); graph_evaluated_or_captured = true; // CUDA graph has been captured std::lock_guard lock(ggml_cuda_lock); if (ggml_cuda_lock_counter.fetch_sub(1, std::memory_order_relaxed) == 1) { ggml_cuda_lock_cv.notify_all(); } } else { graph_evaluated_or_captured = true; // ggml graph has been directly evaluated } } if (use_cuda_graph) { if (cuda_ctx->cuda_graph->instance == nullptr) { // Create executable graph from captured graph. CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); } if (cuda_graph_update_required) { // Update graph executable update_cuda_graph_executable(cuda_ctx); } // Launch graph CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream())); #else graph_evaluated_or_captured = true; #endif // USE_CUDA_GRAPH } } static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_cuda_set_device(cuda_ctx->device); #ifdef USE_CUDA_GRAPH static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr); // Objects required for CUDA Graph if (cuda_ctx->cuda_graph == nullptr) { cuda_ctx->cuda_graph.reset(new ggml_cuda_graph()); } bool use_cuda_graph = true; bool cuda_graph_update_required = false; if (cuda_ctx->cuda_graph->graph == nullptr) { if (ggml_cuda_info().devices[cuda_ctx->device].cc < GGML_CUDA_CC_AMPERE) { cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true; #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to GPU architecture\n", __func__); #endif } } // Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly, // or previous graph capture failure. // Also disable for multi-gpu for now. TO DO investigate if (disable_cuda_graphs_due_to_env || cuda_ctx->cuda_graph->disable_due_to_gpu_arch || cuda_ctx->cuda_graph->disable_due_to_too_many_updates || cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) { use_cuda_graph = false; } if (use_cuda_graph) { cuda_graph_update_required = is_cuda_graph_update_required(cuda_ctx, cgraph); use_cuda_graph = check_node_graph_compatibility(cgraph, use_cuda_graph); // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates. if (use_cuda_graph && cuda_graph_update_required) { cuda_ctx->cuda_graph->number_consecutive_updates++; } else { cuda_ctx->cuda_graph->number_consecutive_updates = 0; } if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) { cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true; #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to too many consecutive updates\n", __func__); #endif } } if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture { std::lock_guard lock(ggml_cuda_lock); ggml_cuda_lock_counter.fetch_add(1, std::memory_order_relaxed); } CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed)); } #else bool use_cuda_graph = false; bool cuda_graph_update_required = false; #endif // USE_CUDA_GRAPH bool graph_evaluated_or_captured = false; evaluate_and_capture_cuda_graph(cuda_ctx, cgraph, graph_evaluated_or_captured, use_cuda_graph, cuda_graph_update_required); return GGML_STATUS_SUCCESS; } static void ggml_backend_cuda_event_record(ggml_backend_t backend, ggml_backend_event_t event) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; CUDA_CHECK(cudaEventRecord((cudaEvent_t)event->context, cuda_ctx->stream())); } static void ggml_backend_cuda_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; if (ggml_backend_is_cuda(backend)) { CUDA_CHECK(cudaStreamWaitEvent(cuda_ctx->stream(), (cudaEvent_t)event->context, 0)); } else { #if 0 // untested auto wait_fn = [](void * user_data) { ggml_backend_event_t event = (ggml_backend_event_t)user_data; ggml_backend_event_synchronize(event); }; CUDA_CHECK(cudaLaunchHostFunc(cuda_ctx->stream(), wait_fn, event)); #endif GGML_ABORT("fatal error"); } } static void ggml_backend_cuda_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context; static bool enable_graph_optimization = [] { const char * env = getenv("GGML_CUDA_GRAPH_OPT"); return env != nullptr && atoi(env) == 1; }(); if (!enable_graph_optimization) { return; } GGML_ASSERT(ggml_backend_cuda_get_device_count() == 1 && "compute graph optimization is only supported on single GPU in the CUDA backend"); GGML_LOG_DEBUG("Optimizing CUDA graph %p with %d nodes\n", cgraph->nodes, cgraph->n_nodes); ggml_cuda_stream_context & stream_context = cuda_ctx->stream_context(); stream_context.reset(); // number of out-degrees for a particular node std::unordered_map fan_out; // reverse mapping of node to index in the cgraph std::unordered_map node_indices; const auto & is_noop = [](const ggml_tensor * node) -> bool { return ggml_is_empty(node) || node->op == GGML_OP_NONE || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE; }; const auto & depends_on = [](const ggml_tensor * dst, const ggml_tensor * src) -> bool { for (uint32_t s = 0; s < GGML_MAX_SRC; ++s) { if (dst->src[s] == src) { return true; } } // implicit dependency if they view the same tensor const ggml_tensor * dst2 = dst->view_src ? dst->view_src : dst; const ggml_tensor * src2 = src->view_src ? src->view_src : src; if (dst2 == src2) { return true; } return false; }; for (int node_idx = 0; node_idx < cgraph->n_nodes; node_idx++) { const ggml_tensor * node = cgraph->nodes[node_idx]; node_indices[node] = node_idx; if (is_noop(node)) { continue; } for (int src_idx = 0; src_idx < GGML_MAX_SRC; ++src_idx) { const ggml_tensor * src = cgraph->nodes[node_idx]->src[src_idx]; //TODO: check why nrows > 1 fails if (node && !is_noop(node) && ggml_nrows(node) <= 1) { fan_out[src] += 1; } } } // Target Q, K, V for concurrency // this is a more general way to find nodes which can be candidates for concurrency (although it has not been tested for anything else): // 1. find fan-out (fork) nodes where the same input is used at least N times (in QKV, it would be "attn-norm") // 2. find the join node, where 2 or more of the outputs are required (in QKV, this would "KQ" or "flash-attn") // 3. account for all branches from the fork to the join // 4. To extend lifetimes of the tensors, we interleave the branches (see below for more details) // 5. save the original cgraph and restore it in graph_compute, to enable fusion within streams // See discussion: https://github.com/ggml-org/llama.cpp/pull/16991#issuecomment-3522620030 const int min_fan_out = 3; const int max_fan_out = 3; // store {fork_idx, join_idx} std::vector> concurrent_node_ranges; for (const auto & [root_node, count] : fan_out) { if (count >= min_fan_out && count <= max_fan_out) { const int root_node_idx = node_indices[root_node]; bool is_part_of_event = false; for (const auto & [start, end] : concurrent_node_ranges) { if (root_node_idx >= start && root_node_idx <= end) { is_part_of_event = true; } } if (is_part_of_event) { continue; } std::vector> nodes_per_branch; for (int i = root_node_idx + 1; i < cgraph->n_nodes; ++i) { const ggml_tensor * node = cgraph->nodes[i]; if (!is_noop(node) && depends_on(node, root_node)) { nodes_per_branch.push_back({ node }); } } GGML_ASSERT(nodes_per_branch.size() == (size_t) count); //find the join point const ggml_tensor * join_node = nullptr; const auto & belongs_to_branch = [&](const ggml_tensor * node, const std::vector & branch) -> bool { for (const ggml_tensor * n : branch) { if (depends_on(node, n)) { return true; } } return false; }; for (int i = root_node_idx + 1; i < cgraph->n_nodes; ++i) { const ggml_tensor * curr_node = cgraph->nodes[i]; int num_joins = 0; for (size_t branch_idx = 0; branch_idx < nodes_per_branch.size(); branch_idx++) { if (belongs_to_branch(curr_node, nodes_per_branch[branch_idx])) { num_joins++; } } if (num_joins >= 2) { join_node = curr_node; break; } bool found_branch = false; for (size_t branch_idx = 0; branch_idx < nodes_per_branch.size(); branch_idx++) { std::vector & branch_vec = nodes_per_branch[branch_idx]; if (belongs_to_branch(curr_node, branch_vec)) { //continue accumulating if (std::find(branch_vec.begin(), branch_vec.end(), curr_node) == branch_vec.end()) { branch_vec.push_back(curr_node); } found_branch = true; } } if (!found_branch && is_noop(curr_node)) { // we can put it in any branch because it will be ignored nodes_per_branch[0].push_back({ curr_node }); } } if (join_node) { //Create ggml_cuda_concurrent_event ggml_cuda_concurrent_event concurrent_event(nodes_per_branch.size()); concurrent_event.join_node = join_node; for (size_t branch_idx = 0; branch_idx < nodes_per_branch.size(); branch_idx++) { for (const ggml_tensor * n : nodes_per_branch[branch_idx]) { concurrent_event.stream_mapping[n] = branch_idx + 1; } } int fork_node_idx = node_indices[root_node]; int join_node_idx = node_indices[join_node]; int current_branch_idx = 0; int current_node_idx = fork_node_idx + 1; const int n_branches = nodes_per_branch.size(); int total_branch_nodes = 0; for (std::vector branch_nodes : nodes_per_branch) { total_branch_nodes += branch_nodes.size(); } // there are other nodes in the middle which are unaccounted for // usually (cpy) nodes, then ignore this fork if (join_node_idx - fork_node_idx - 1 != total_branch_nodes) { GGML_LOG_DEBUG( "Skipping %s because the number of nodes in the middle is not equal to the total number of " "branch nodes %d != %d\n", root_node->name, join_node_idx - fork_node_idx - 1, total_branch_nodes); continue; } // Save the original order of nodes in this region before interleaving // This is used later to restore grouping for fusion within streams concurrent_event.original_order.reserve(total_branch_nodes); for (int i = fork_node_idx + 1; i < join_node_idx; ++i) { concurrent_event.original_order.push_back(cgraph->nodes[i]); } std::unordered_map & concurrent_events = cuda_ctx->stream_context().concurrent_events; GGML_ASSERT(concurrent_events.find(root_node) == concurrent_events.end()); concurrent_events.emplace(root_node, std::move(concurrent_event)); GGML_LOG_DEBUG("Adding stream at node %s %p\n", root_node->name, root_node); concurrent_node_ranges.emplace_back(fork_node_idx, join_node_idx); // interleave tensors to extend lifetimes so that ggml graph doesn't recycle them // example transformation: // [attn-norm, QMul, QNorm, QRope, KMul, KNorm, KRope, VMul, attn] -> // [attn-norm, QMul, KMul, VMul, QNorm, VNorm, QRope, KRope, attn] while (current_node_idx < join_node_idx) { std::vector & branch_nodes = nodes_per_branch[current_branch_idx]; bool has_node = false; for (std::vector branch_node : nodes_per_branch) { has_node |= branch_node.size() > 0; } GGML_ASSERT(has_node); if (branch_nodes.empty()) { current_branch_idx = (current_branch_idx + 1) % n_branches; continue; } cgraph->nodes[current_node_idx] = const_cast(branch_nodes.front()); current_node_idx++; branch_nodes.erase(branch_nodes.begin()); // append all empty nodes while (!branch_nodes.empty() && is_noop(branch_nodes.front())) { cgraph->nodes[current_node_idx] = const_cast(branch_nodes.front()); current_node_idx++; branch_nodes.erase(branch_nodes.begin()); } current_branch_idx = (current_branch_idx + 1) % n_branches; } } } } } static const ggml_backend_i ggml_backend_cuda_interface = { /* .get_name = */ ggml_backend_cuda_get_name, /* .free = */ ggml_backend_cuda_free, /* .set_tensor_async = */ ggml_backend_cuda_set_tensor_async, /* .get_tensor_async = */ ggml_backend_cuda_get_tensor_async, /* .cpy_tensor_async = */ ggml_backend_cuda_cpy_tensor_async, /* .synchronize = */ ggml_backend_cuda_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_cuda_graph_compute, /* .event_record = */ ggml_backend_cuda_event_record, /* .event_wait = */ ggml_backend_cuda_event_wait, /* .graph_optimize = */ ggml_backend_cuda_graph_optimize, }; static ggml_guid_t ggml_backend_cuda_guid() { static ggml_guid guid = { 0x2c, 0xdd, 0xe8, 0x1c, 0x65, 0xb3, 0x65, 0x73, 0x6a, 0x12, 0x88, 0x61, 0x1c, 0xc9, 0xdc, 0x25 }; return &guid; } bool ggml_backend_is_cuda(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_cuda_guid()); } int ggml_backend_cuda_get_device_count() { return ggml_cuda_info().device_count; } void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size) { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); snprintf(description, description_size, "%s", prop.name); } void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total) { ggml_cuda_set_device(device); CUDA_CHECK(cudaMemGetInfo(free, total)); } bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size) { if (getenv("GGML_CUDA_REGISTER_HOST") == nullptr) { return false; } #if CUDART_VERSION >= 11010 || defined(GGML_USE_MUSA) || defined(GGML_USE_HIP) cudaError_t err = cudaHostRegister(buffer, size, cudaHostRegisterPortable | cudaHostRegisterReadOnly); if (err != cudaSuccess) { // clear the error (void)cudaGetLastError(); GGML_LOG_DEBUG("%s: failed to register %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, cudaGetErrorString(err)); return false; } return true; #else GGML_UNUSED(buffer); GGML_UNUSED(size); return false; #endif // CUDART_VERSION >= 11010 || defined(GGML_USE_MUSA) } void ggml_backend_cuda_unregister_host_buffer(void * buffer) { if (getenv("GGML_CUDA_REGISTER_HOST") == nullptr) { return; } cudaError_t err = cudaHostUnregister(buffer); if (err != cudaSuccess) { // clear the error (void)cudaGetLastError(); } } // backend device struct ggml_backend_cuda_device_context { int device; std::string name; std::string description; std::string pci_bus_id; }; static const char * ggml_backend_cuda_device_get_name(ggml_backend_dev_t dev) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; return ctx->name.c_str(); } static const char * ggml_backend_cuda_device_get_description(ggml_backend_dev_t dev) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; return ctx->description.c_str(); } #if defined(__linux__) // Helper function to get available memory from /proc/meminfo for UMA systems static bool ggml_backend_cuda_get_available_uma_memory(long * available_memory_kb, long * free_swap_kb) { FILE * meminfo_file = nullptr; // 2KB buffer for reading /proc/meminfo since it does not report size info, should be enough const size_t BUFFER_SIZE = 2048; auto file_buffer = std::make_unique(BUFFER_SIZE); size_t bytes_read = 0; long huge_tlb_total_pages = -1; long huge_tlb_free_pages = -1; long huge_tlb_page_size = -1; if (available_memory_kb == nullptr || free_swap_kb == nullptr) { return false; } meminfo_file = fopen("/proc/meminfo", "r"); if (meminfo_file == nullptr) { GGML_LOG_ERROR("%s: failed to open /proc/meminfo\n", __func__); return false; } // Read file into buffer bytes_read = fread(file_buffer.get(), 1, BUFFER_SIZE - 1, meminfo_file); fclose(meminfo_file); if (bytes_read == 0) { GGML_LOG_ERROR("%s: failed to read from /proc/meminfo\n", __func__); return false; } file_buffer[bytes_read] = '\0'; *available_memory_kb = -1; *free_swap_kb = -1; // Parse the file buffer line by line char * line = file_buffer.get(); char * line_next; while (line < file_buffer.get() + bytes_read) { // Find the end of the current line line_next = strchr(line, '\n'); if (line_next != nullptr) { *line_next = '\0'; line_next++; } else { line_next = file_buffer.get() + bytes_read; } long value; if (sscanf(line, "MemAvailable: %ld kB", &value) == 1) { *available_memory_kb = value; } else if (sscanf(line, "SwapFree: %ld kB", &value) == 1) { *free_swap_kb = value; } else if (sscanf(line, "HugePages_Total: %ld", &value) == 1) { huge_tlb_total_pages = value; } else if (sscanf(line, "HugePages_Free: %ld", &value) == 1) { huge_tlb_free_pages = value; } else if (sscanf(line, "Hugepagesize: %ld kB", &value) == 1) { huge_tlb_page_size = value; } line = line_next; } if (huge_tlb_total_pages != 0 && huge_tlb_total_pages != -1) { *available_memory_kb = huge_tlb_free_pages * huge_tlb_page_size; // Hugetlbfs pages are not swappable. *free_swap_kb = 0; } GGML_LOG_DEBUG("%s: final available_memory_kb: %ld\n", __func__, *available_memory_kb); return true; } #endif // defined(__linux__) static void ggml_backend_cuda_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaMemGetInfo(free, total)); // ref: https://github.com/ggml-org/llama.cpp/pull/17368 #if defined(__linux__) // Check if this is a UMA (Unified Memory Architecture) system cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, ctx->device)); // Check if UMA is explicitly enabled via environment variable bool uma_env = getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr; bool is_uma = prop.integrated > 0 || uma_env; if (is_uma) { // For UMA systems (like DGX Spark), use system memory info long available_memory_kb = 0; long free_swap_kb = 0; if (ggml_backend_cuda_get_available_uma_memory(&available_memory_kb, &free_swap_kb) && available_memory_kb > 0) { *free = (size_t)available_memory_kb * 1024; } else { GGML_LOG_ERROR("%s: /proc/meminfo reading failed, using cudaMemGetInfo\n", __func__); } } #endif // defined(__linux__) } static enum ggml_backend_dev_type ggml_backend_cuda_device_get_type(ggml_backend_dev_t dev) { GGML_UNUSED(dev); return GGML_BACKEND_DEVICE_TYPE_GPU; } static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; props->name = ggml_backend_cuda_device_get_name(dev); props->description = ggml_backend_cuda_device_get_description(dev); props->type = ggml_backend_cuda_device_get_type(dev); props->device_id = ctx->pci_bus_id.empty() ? nullptr : ctx->pci_bus_id.c_str(); ggml_backend_cuda_device_get_memory(dev, &props->memory_free, &props->memory_total); bool host_buffer = getenv("GGML_CUDA_NO_PINNED") == nullptr; #ifdef GGML_CUDA_NO_PEER_COPY bool events = false; #else bool events = true; #endif props->caps = { /* .async = */ true, /* .host_buffer = */ host_buffer, /* .buffer_from_host_ptr = */ false, /* .events = */ events, }; } static ggml_backend_t ggml_backend_cuda_device_init_backend(ggml_backend_dev_t dev, const char * params) { GGML_UNUSED(params); ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; return ggml_backend_cuda_init(ctx->device); } static ggml_backend_buffer_type_t ggml_backend_cuda_device_get_buffer_type(ggml_backend_dev_t dev) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; return ggml_backend_cuda_buffer_type(ctx->device); } static ggml_backend_buffer_type_t ggml_backend_cuda_device_get_host_buffer_type(ggml_backend_dev_t dev) { GGML_UNUSED(dev); return ggml_backend_cuda_host_buffer_type(); } // TODO: move these functions here static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; // split buffers can only be used with GGML_OP_MUL_MAT if (op->op != GGML_OP_MUL_MAT) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (op->src[i] && op->src[i]->buffer && ggml_backend_buft_is_cuda_split(op->src[i]->buffer->buft)) { return false; } } } // check if all the sources are allocated on this device for (int i = 0; i < GGML_MAX_SRC; i++) { if (op->src[i] && op->src[i]->buffer && ggml_backend_buft_is_cuda(op->src[i]->buffer->buft)) { ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)op->src[i]->buffer->buft->context; if (buft_ctx->device != dev_ctx->device) { return false; } } } switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_STEP: case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_EXPM1: case GGML_UNARY_OP_SOFTPLUS: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_XIELU: case GGML_UNARY_OP_FLOOR: case GGML_UNARY_OP_CEIL: case GGML_UNARY_OP_ROUND: case GGML_UNARY_OP_TRUNC: return ggml_is_contiguous(op->src[0]); default: return false; } break; case GGML_OP_GLU: switch (ggml_get_glu_op(op)) { case GGML_GLU_OP_REGLU: case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_SWIGLU: case GGML_GLU_OP_SWIGLU_OAI: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: return ggml_is_contiguous_1(op->src[0]); default: return false; } break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { struct ggml_tensor * a = op->src[0]; struct ggml_tensor * b = op->src[1]; if (a->buffer && ggml_backend_buft_is_cuda_split(a->buffer->buft)) { if (a->ne[2] > 1 || a->ne[3] > 1) { return false; } // for small weight matrices the active device can end up without any rows, don't use row split in those cases // this avoids some edge cases (and the performance would not be good anyways) ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) a->buffer->buft->context; int64_t row_low; int64_t row_high; get_row_split(&row_low, &row_high, a, buft_ctx->tensor_split, dev_ctx->device); if (row_low == row_high) { return false; } } if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16) { return false; } #ifdef GGML_USE_MUSA const int cc = ggml_cuda_info().devices[dev_ctx->device].cc; if (b->ne[2]*b->ne[3] > 1 && !ggml_is_transposed(a) && !ggml_is_transposed(b)) { if (GGML_CUDA_CC_IS_QY1(cc) && op->op == GGML_OP_MUL_MAT && a->type == GGML_TYPE_F16 && b->type == GGML_TYPE_F16) { return false; } if (GGML_CUDA_CC_IS_QY2(cc) && op->op == GGML_OP_MUL_MAT_ID && a->type == GGML_TYPE_Q2_K && b->type == GGML_TYPE_F32) { return false; } } #endif // GGML_USE_MUSA switch (a->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_Q8_K: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_BF16: return true; default: return false; } } break; case GGML_OP_OUT_PROD: return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; case GGML_OP_GET_ROWS: { switch (op->src[0]->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: case GGML_TYPE_BF16: case GGML_TYPE_I32: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: return true; default: return false; } } break; case GGML_OP_GET_ROWS_BACK: { return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1; } break; case GGML_OP_SET_ROWS: { return (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_BF16 || op->type == GGML_TYPE_Q4_0 || op->type == GGML_TYPE_Q4_1 || op->type == GGML_TYPE_Q5_0 || op->type == GGML_TYPE_Q5_1 || op->type == GGML_TYPE_Q8_0 || op->type == GGML_TYPE_IQ4_NL) && op->src[0]->type == GGML_TYPE_F32 && (op->src[1]->type == GGML_TYPE_I64 || op->src[1]->type == GGML_TYPE_I32); } break; case GGML_OP_SET: { const ggml_type t = op->type; return (t == GGML_TYPE_F32 || t == GGML_TYPE_I32) && t == op->src[0]->type && t == op->src[1]->type; } break; case GGML_OP_CPY: { ggml_type src0_type = op->src[0]->type; ggml_type src1_type = op->src[1]->type; if ((src0_type == GGML_TYPE_F32 || src0_type == GGML_TYPE_BF16 || src0_type == GGML_TYPE_F16) && (src1_type == GGML_TYPE_F32 || src1_type == GGML_TYPE_BF16 || src1_type == GGML_TYPE_F16) ) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q8_0) { return true; } if (src0_type == GGML_TYPE_Q8_0 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_0) { return true; } if (src0_type == GGML_TYPE_Q4_0 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_1) { return true; } if (src0_type == GGML_TYPE_Q4_1 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q5_0) { return true; } if (src0_type == GGML_TYPE_Q5_0 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q5_1) { return true; } if (src0_type == GGML_TYPE_Q5_1 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_IQ4_NL) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_I32) { return true; } if (src0_type == GGML_TYPE_I32 && src1_type == GGML_TYPE_F32) { return true; } if (src0_type == GGML_TYPE_I32 && src1_type == GGML_TYPE_I32) { return true; } if (src0_type == src1_type && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1])) { return true; } return false; } break; case GGML_OP_DUP: { ggml_type src0_type = op->src[0]->type; return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; } break; case GGML_OP_ARGMAX: case GGML_OP_COUNT_EQUAL: { return true; } break; case GGML_OP_REPEAT: { ggml_type src0_type = op->src[0]->type; return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; } break; case GGML_OP_REPEAT_BACK: return op->type == GGML_TYPE_F32 && (op->src[0]->ne[2]*op->src[0]->ne[3]) <= (1 << 15); case GGML_OP_CONCAT: { ggml_type src0_type = op->src[0]->type; return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; } break; case GGML_OP_CONV_TRANSPOSE_1D: { ggml_type src0_type = op->src[0]->type; ggml_type src1_type = op->src[1]->type; if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { return true; } return false; } break; case GGML_OP_SILU_BACK: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; break; case GGML_OP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_L2_NORM: return true; case GGML_OP_RMS_NORM_BACK: return ggml_is_contiguous(op->src[0]) && op->ne[0] % WARP_SIZE == 0; break; case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: case GGML_OP_ADD: case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_SCALE: case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: case GGML_OP_CLAMP: case GGML_OP_LOG: return true; case GGML_OP_SSM_SCAN: { if (op->src[3]->ne[0] == 1) { // Mamba2 // (kernel only supports (d_state == 128 || d_state == 256) && d_head % 16 == 0) return (op->src[0]->ne[0] == 128 || op->src[0]->ne[0] == 256) && op->src[0]->ne[1] % 16 == 0; } else { // Mamba // (kernel only supports d_state == 16, d_head == 1, n_head % 128 == 0, n_group == 1) return op->src[0]->ne[0] == 16 && op->src[0]->ne[1] == 1 && op->src[0]->ne[2] % 128 == 0 && op->src[4]->ne[1] == 1; } } case GGML_OP_SSM_CONV: { // assumes d_inner % threads == 0 return op->src[0]->ne[1] % 128 == 0; } case GGML_OP_CONT: return true; case GGML_OP_DIAG_MASK_INF: return true; case GGML_OP_SOFT_MAX: return true; case GGML_OP_SOFT_MAX_BACK: { float max_bias = 0.0f; memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float)); return max_bias == 0.0f; } case GGML_OP_ROLL: if(op->src[0]->type == GGML_TYPE_F32) { return true; } return false; case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: { return op->src[0]->nb[0] == ggml_type_size(op->src[0]->type) && ggml_is_contiguous_2(op->src[0]); } case GGML_OP_IM2COL: case GGML_OP_IM2COL_3D: case GGML_OP_CONV_2D: case GGML_OP_CONV_2D_DW: case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_POOL_2D: case GGML_OP_ACC: return true; case GGML_OP_SUM: return ggml_is_contiguous_rows(op->src[0]); case GGML_OP_ARGSORT: #ifndef GGML_CUDA_USE_CUB return op->src[0]->ne[0] <= 1024; #else return true; #endif case GGML_OP_SUM_ROWS: case GGML_OP_MEAN: case GGML_OP_GROUP_NORM: case GGML_OP_PAD: return ggml_is_contiguous(op->src[0]); case GGML_OP_UPSCALE: case GGML_OP_PAD_REFLECT_1D: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: case GGML_OP_RWKV_WKV6: case GGML_OP_GATED_LINEAR_ATTN: case GGML_OP_RWKV_WKV7: return true; case GGML_OP_FLASH_ATTN_EXT: return ggml_cuda_flash_attn_ext_supported(dev_ctx->device, op); case GGML_OP_CROSS_ENTROPY_LOSS: case GGML_OP_CROSS_ENTROPY_LOSS_BACK: case GGML_OP_OPT_STEP_ADAMW: case GGML_OP_OPT_STEP_SGD: case GGML_OP_FILL: case GGML_OP_CUMSUM: case GGML_OP_TRI: case GGML_OP_DIAG: case GGML_OP_SOLVE_TRI: return true; default: return false; } } static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; return (((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); } static int64_t get_op_batch_size(const ggml_tensor * op) { switch (op->op) { case GGML_OP_GET_ROWS: return 0; case GGML_OP_MUL_MAT: return op->ne[1]; case GGML_OP_MUL_MAT_ID: case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: return op->ne[2]; default: return ggml_nrows(op); } } static bool ggml_backend_cuda_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { const int min_batch_size = 32; return get_op_batch_size(op) >= min_batch_size; GGML_UNUSED(dev); } static ggml_backend_event_t ggml_backend_cuda_device_event_new(ggml_backend_dev_t dev) { #ifdef GGML_CUDA_NO_PEER_COPY return nullptr; #else ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *)dev->context; ggml_cuda_set_device(dev_ctx->device); cudaEvent_t event; CUDA_CHECK(cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); return new ggml_backend_event { /* .device = */ dev, /* .context = */ event, }; #endif } static void ggml_backend_cuda_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) { GGML_UNUSED(dev); CUDA_CHECK(cudaEventDestroy((cudaEvent_t)event->context)); delete event; } static void ggml_backend_cuda_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) { GGML_UNUSED(dev); CUDA_CHECK(cudaEventSynchronize((cudaEvent_t)event->context)); } static const ggml_backend_device_i ggml_backend_cuda_device_interface = { /* .get_name = */ ggml_backend_cuda_device_get_name, /* .get_description = */ ggml_backend_cuda_device_get_description, /* .get_memory = */ ggml_backend_cuda_device_get_memory, /* .get_type = */ ggml_backend_cuda_device_get_type, /* .get_props = */ ggml_backend_cuda_device_get_props, /* .init_backend = */ ggml_backend_cuda_device_init_backend, /* .get_buffer_type = */ ggml_backend_cuda_device_get_buffer_type, /* .get_host_buffer_type = */ ggml_backend_cuda_device_get_host_buffer_type, /* .buffer_from_host_ptr = */ NULL, /* .supports_op = */ ggml_backend_cuda_device_supports_op, /* .supports_buft = */ ggml_backend_cuda_device_supports_buft, /* .offload_op = */ ggml_backend_cuda_device_offload_op, /* .event_new = */ ggml_backend_cuda_device_event_new, /* .event_free = */ ggml_backend_cuda_device_event_free, /* .event_synchronize = */ ggml_backend_cuda_device_event_synchronize, }; // backend reg struct ggml_backend_cuda_reg_context { std::vector devices; }; static const char * ggml_backend_cuda_reg_get_name(ggml_backend_reg_t reg) { GGML_UNUSED(reg); return GGML_CUDA_NAME; } static size_t ggml_backend_cuda_reg_get_device_count(ggml_backend_reg_t reg) { ggml_backend_cuda_reg_context * ctx = (ggml_backend_cuda_reg_context *)reg->context; return ctx->devices.size(); } static ggml_backend_dev_t ggml_backend_cuda_reg_get_device(ggml_backend_reg_t reg, size_t index) { ggml_backend_cuda_reg_context * ctx = (ggml_backend_cuda_reg_context *)reg->context; GGML_ASSERT(index < ctx->devices.size()); return ctx->devices[index]; } static ggml_backend_feature * ggml_backend_cuda_get_features(ggml_backend_reg_t reg) { static std::vector features = []() { std::vector features; #define _STRINGIFY(...) #__VA_ARGS__ #define STRINGIFY(...) _STRINGIFY(__VA_ARGS__) #ifdef __CUDA_ARCH_LIST__ features.push_back({ "ARCHS", STRINGIFY(__CUDA_ARCH_LIST__) }); #endif #ifdef GGML_CUDA_FORCE_MMQ features.push_back({ "FORCE_MMQ", "1" }); #endif #ifdef GGML_CUDA_FORCE_CUBLAS features.push_back({ "FORCE_CUBLAS", "1" }); #endif #ifndef GGML_USE_VMM features.push_back({ "NO_VMM", "1" }); #endif #ifdef GGML_CUDA_NO_PEER_COPY features.push_back({ "NO_PEER_COPY", "1" }); #endif #ifdef GGML_CUDA_USE_GRAPHS features.push_back({ "USE_GRAPHS", "1" }); #endif #ifdef GGML_CUDA_PEER_MAX_BATCH_SIZE features.push_back({ "PEER_MAX_BATCH_SIZE", STRINGIFY(GGML_CUDA_PEER_MAX_BATCH_SIZE) }); #endif #ifdef GGML_CUDA_FA_ALL_QUANTS features.push_back({ "FA_ALL_QUANTS", "1" }); #endif { const auto & info = ggml_cuda_info(); for (int id = 0; id < info.device_count; ++id) { if (blackwell_mma_available(info.devices[id].cc)) { features.push_back({ "BLACKWELL_NATIVE_FP4", "1"}); break; } } } #undef _STRINGIFY #undef STRINGIFY features.push_back({ nullptr, nullptr }); return features; }(); return features.data(); GGML_UNUSED(reg); } static void * ggml_backend_cuda_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { GGML_UNUSED(reg); if (strcmp(name, "ggml_backend_split_buffer_type") == 0) { return (void *)ggml_backend_cuda_split_buffer_type; } if (strcmp(name, "ggml_backend_register_host_buffer") == 0) { return (void *)ggml_backend_cuda_register_host_buffer; } if (strcmp(name, "ggml_backend_unregister_host_buffer") == 0) { return (void *)ggml_backend_cuda_unregister_host_buffer; } if (strcmp(name, "ggml_backend_get_features") == 0) { return (void *)ggml_backend_cuda_get_features; } return nullptr; } static const ggml_backend_reg_i ggml_backend_cuda_reg_interface = { /* .get_name = */ ggml_backend_cuda_reg_get_name, /* .get_device_count = */ ggml_backend_cuda_reg_get_device_count, /* .get_device = */ ggml_backend_cuda_reg_get_device, /* .get_proc_address = */ ggml_backend_cuda_reg_get_proc_address, }; // backend registry ggml_backend_reg_t ggml_backend_cuda_reg() { static ggml_backend_reg reg; static bool initialized = false; { static std::mutex mutex; std::lock_guard lock(mutex); if (!initialized) { ggml_backend_cuda_reg_context * ctx = new ggml_backend_cuda_reg_context; for (int i = 0; i < ggml_cuda_info().device_count; i++) { ggml_backend_cuda_device_context * dev_ctx = new ggml_backend_cuda_device_context; dev_ctx->device = i; dev_ctx->name = GGML_CUDA_NAME + std::to_string(i); cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); dev_ctx->description = prop.name; char pci_bus_id[16] = {}; snprintf(pci_bus_id, sizeof(pci_bus_id), "%04x:%02x:%02x.0", prop.pciDomainID, prop.pciBusID, prop.pciDeviceID); dev_ctx->pci_bus_id = pci_bus_id; ggml_backend_dev_t dev = new ggml_backend_device { /* .iface = */ ggml_backend_cuda_device_interface, /* .reg = */ ®, /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } reg = ggml_backend_reg { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_cuda_reg_interface, /* .context = */ ctx }; } initialized = true; } return ® } ggml_backend_t ggml_backend_cuda_init(int device) { if (device < 0 || device >= ggml_backend_cuda_get_device_count()) { GGML_LOG_ERROR("%s: invalid device %d\n", __func__, device); return nullptr; } ggml_backend_cuda_context * ctx = new ggml_backend_cuda_context(device); if (ctx == nullptr) { GGML_LOG_ERROR("%s: failed to allocate context\n", __func__); return nullptr; } ggml_backend_t cuda_backend = new ggml_backend { /* .guid = */ ggml_backend_cuda_guid(), /* .iface = */ ggml_backend_cuda_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), device), /* .context = */ ctx, }; return cuda_backend; } GGML_BACKEND_DL_IMPL(ggml_backend_cuda_reg) ggml-org-ggml-3678254/src/ggml-cuda/gla.cu000066400000000000000000000060451512524704700201140ustar00rootroot00000000000000#include "common.cuh" #include "gla.cuh" template static __global__ void gated_linear_attn_f32(const int B, const int T, const int C, const int H, const float scale, const float * k, const float * v, const float * r, const float * td, const float * s, float * dst) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int head_size = HEAD_SIZE; const int batch_i = bid / H; const int head_i = bid % H; const int state_size = C * head_size; const int n_seq_tokens = T / B; float state[head_size]; __shared__ float _k[head_size], _r[head_size], _td[head_size]; #pragma unroll for (int i = 0; i < head_size; i++) { state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; } for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { __syncthreads(); _k[tid] = k[t]; _r[tid] = r[t]; _td[tid] = td[t]; __syncthreads(); const float _v = v[t]; float y = 0; for (int j = 0; j < head_size; j += 4) { const float4 & k = (float4 &)(_k[j]); const float4 & r = (float4 &)(_r[j]); const float4 & td = (float4 &)(_td[j]); float4 & s = (float4 &)(state[j]); float4 kv; kv.x = k.x * _v; kv.y = k.y * _v; kv.z = k.z * _v; kv.w = k.w * _v; s.x = s.x * td.x + kv.x; s.y = s.y * td.y + kv.y; s.z = s.z * td.z + kv.z; s.w = s.w * td.w + kv.w; y += r.x * s.x; y += r.y * s.y; y += r.z * s.z; y += r.w * s.w; } dst[t] = y * scale; } #pragma unroll for (int i = 0; i < head_size; i++) { dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; } } void ggml_cuda_op_gated_linear_attn(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const float * k_d = (const float *)dst->src[0]->data; const float * v_d = (const float *)dst->src[1]->data; const float * r_d = (const float *)dst->src[2]->data; const float * td_d = (const float *)dst->src[3]->data; const float * s_d = (const float *)dst->src[4]->data; const int64_t B = dst->src[4]->ne[1]; const int64_t T = dst->src[0]->ne[2]; const int64_t C = dst->ne[0]; const int64_t H = dst->src[0]->ne[1]; float scale; memcpy(&scale, (float*)dst->op_params, sizeof(float)); float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(dst->src[4]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); GGML_ASSERT(C / H == 64 || C / H == 128); if (C / H == 64) { gated_linear_attn_f32<64><<>>(B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d); } else { gated_linear_attn_f32<128><<>>(B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d); } } ggml-org-ggml-3678254/src/ggml-cuda/gla.cuh000066400000000000000000000001601512524704700202540ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_gated_linear_attn(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/im2col.cu000066400000000000000000000302611512524704700205330ustar00rootroot00000000000000#include "im2col.cuh" #define MAX_GRIDDIM_Z 65535 template static __global__ void im2col_kernel( const float * x, T * dst, int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t IC_IH_IW, int64_t IH_IW, int64_t N_OH, int64_t KH_KW, int64_t IC_KH_KW, int s0, int s1, int p0, int p1, int d0, int d1) { const int64_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= IC_KH_KW) { return; } const int64_t iic = i / (KH_KW); const int64_t rem = i - iic * KH_KW; const int64_t ikh = rem / KW; const int64_t ikw = rem - ikh * KW; const int64_t iow = blockIdx.y; for (int64_t iz = blockIdx.z; iz < N_OH; iz+=MAX_GRIDDIM_Z) { const int64_t in = iz / OH; const int64_t ioh = iz - in * OH; const int64_t iiw = iow * s0 + ikw * d0 - p0; const int64_t iih = ioh * s1 + ikh * d1 - p1; const int64_t offset_dst = ((in * OH + ioh) * OW + iow) * IC_KH_KW + iic * KH_KW + ikh * KW + ikw; if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = 0.0f; } else { const int64_t offset_src = iic * IC_IH_IW + in * IH_IW; dst[offset_dst] = x[offset_src + iih * IW + iiw]; } } GGML_UNUSED(IC); GGML_UNUSED(KH); } // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] template static void im2col_cuda(const float * x, T* dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { const int64_t IC_KH_KW = IC * KH * KW; const int64_t num_blocks = (IC_KH_KW + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; const int64_t N_OH = N * OH; const int64_t KH_KW = KW*KH; dim3 block_nums(num_blocks, OW, MIN(N_OH, MAX_GRIDDIM_Z)); im2col_kernel<<>>(x, dst, IC, IW, IH, OH, OW, KW, KH, IC_IH_IW, IH_IW, N_OH, KH_KW, IC_KH_KW, s0, s1, p0, p1, d0, d1); } static void im2col_cuda_f16(const float * x, half * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } static void im2col_cuda_f32(const float * x, float * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const float * src1_d = (const float *)src1->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; const int64_t IC = src1->ne[is_2D ? 2 : 1]; const int64_t IH = is_2D ? src1->ne[1] : 1; const int64_t IW = src1->ne[0]; const int64_t KH = is_2D ? src0->ne[1] : 1; const int64_t KW = src0->ne[0]; const int64_t OH = is_2D ? dst->ne[2] : 1; const int64_t OW = dst->ne[1]; const int64_t IC_IH_IW = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 const int64_t N = src1->ne[is_2D ? 3 : 2]; const int64_t IH_IW = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32 if(dst->type == GGML_TYPE_F16) { im2col_cuda_f16(src1_d, (half *) dst_d, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } else { im2col_cuda_f32(src1_d, (float *) dst_d, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } } // [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] template static __global__ void im2col_3d_kernel( const float * src, T * dst, int64_t N, int64_t IC, int64_t ID, int64_t IH, int64_t IW, int64_t OC, int64_t KD, int64_t KH, int64_t KW, int64_t OD, int64_t OH, int64_t OW, int64_t OH_OW, int64_t KD_KH_KW, int64_t ID_IH_IW, int64_t KH_KW, int64_t IH_IW, int64_t IC_ID_IH_IW, int64_t IC_KD_KH_KW, int64_t OW_KD_KH_KW, int64_t OD_OH_OW_IC_KD_KH_KW, int64_t OH_OW_IC_KD_KH_KW, int64_t OW_IC_KD_KH_KW, int64_t N_OD_OH, int64_t OD_OH, int64_t stride_q, int64_t stride_z, int64_t stride_y, int64_t stride_x, int s0, int s1, int s2, int p0, int p1, int p2, int d0, int d1, int d2) { const int64_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= IC_KD_KH_KW) { return; } GGML_UNUSED(N); GGML_UNUSED(OC); GGML_UNUSED(OH_OW); GGML_UNUSED(OD); GGML_UNUSED(OW); GGML_UNUSED(KD); GGML_UNUSED(KH); GGML_UNUSED(ID_IH_IW); GGML_UNUSED(IH_IW); GGML_UNUSED(IC_ID_IH_IW); GGML_UNUSED(OW_KD_KH_KW); const int64_t iic = i / KD_KH_KW; const int64_t ikd = (i - iic * KD_KH_KW) / KH_KW; const int64_t ikh = (i - iic * KD_KH_KW - ikd * KH_KW) / KW; const int64_t ikw = i % KW; const int64_t iow = blockIdx.y; for (int64_t iz = blockIdx.z; iz < N_OD_OH; iz+=MAX_GRIDDIM_Z) { const int64_t in = iz / OD_OH; const int64_t iod = (iz - in*OD_OH) / OH; const int64_t ioh = iz % OH; const int64_t iiw = iow * s0 + ikw * d0 - p0; const int64_t iih = ioh * s1 + ikh * d1 - p1; const int64_t iid = iod * s2 + ikd * d2 - p2; const int64_t offset_dst = in*OD_OH_OW_IC_KD_KH_KW + iod*OH_OW_IC_KD_KH_KW + ioh*OW_IC_KD_KH_KW + iow*IC_KD_KH_KW + iic*KD_KH_KW + ikd * KH_KW + ikh*KW + ikw; if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || iid < 0 || iid >= ID) { dst[offset_dst] = 0.0f; } else { const int64_t offset_src = ((in * IC + iic) * stride_q) + (iid * stride_z) + (iih * stride_y) + (iiw * stride_x); dst[offset_dst] = src[offset_src]; } } } // [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] template static void im2col_3d_cuda(const float * src, T* dst, int64_t N, int64_t IC, int64_t ID, int64_t IH, int64_t IW, int64_t OC, int64_t KD, int64_t KH, int64_t KW, int64_t OD, int64_t OH, int64_t OW, int64_t stride_q, int64_t stride_z, int64_t stride_y, int64_t stride_x, int s0, int s1, int s2, int p0, int p1, int p2, int d0, int d1, int d2, cudaStream_t stream) { const int64_t OH_OW = OH*OW; const int64_t KD_KH_KW = KD*KH*KW; const int64_t ID_IH_IW = ID*IH*IW; const int64_t KH_KW = KH*KW; const int64_t IH_IW = IH*IW; const int64_t IC_KD_KH_KW = IC*KD*KH*KW; const int64_t OW_KD_KH_KW = OW*KD*KH*KW; const int64_t N_OD_OH = N*OD*OH; const int64_t OD_OH = OD*OH; const int64_t IC_ID_IH_IW = IC*ID*IH*IW; const int64_t OD_OH_OW_IC_KD_KH_KW = OD*OH*OW*IC*KD*KH*KW; const int64_t OH_OW_IC_KD_KH_KW = OH*OW*IC*KD*KH*KW; const int64_t OW_IC_KD_KH_KW = OW*IC*KD*KH*KW; const int64_t num_blocks = (IC_KD_KH_KW + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; dim3 block_nums(num_blocks, OW, MIN(N_OD_OH, MAX_GRIDDIM_Z)); im2col_3d_kernel<<>>(src, dst, N, IC, ID, IH, IW, OC, KD, KH, KW, OD, OH, OW, OH_OW, KD_KH_KW, ID_IH_IW, KH_KW, IH_IW, IC_ID_IH_IW, IC_KD_KH_KW, OW_KD_KH_KW, OD_OH_OW_IC_KD_KH_KW, OH_OW_IC_KD_KH_KW, OW_IC_KD_KH_KW, N_OD_OH, OD_OH, stride_q, stride_z, stride_y, stride_x, s0, s1, s2, p0, p1, p2, d0, d1, d2); } static void im2col_3d_cuda_f16(const float * src, half * dst, int64_t N, int64_t IC, int64_t ID, int64_t IH, int64_t IW, int64_t OC, int64_t KD, int64_t KH, int64_t KW, int64_t OD, int64_t OH, int64_t OW, int64_t stride_q, int64_t stride_z, int64_t stride_y, int64_t stride_x, int s0, int s1, int s2, int p0, int p1, int p2, int d0, int d1, int d2, cudaStream_t stream) { im2col_3d_cuda(src, dst, N, IC, ID, IH, IW, OC, KD, KH, KW, OD, OH, OW, stride_q, stride_z, stride_y, stride_x, s0, s1, s2, p0, p1, p2, d0, d1, d2, stream); } static void im2col_3d_cuda_f32(const float * src, float * dst, int64_t N, int64_t IC, int64_t ID, int64_t IH, int64_t IW, int64_t OC, int64_t KD, int64_t KH, int64_t KW, int64_t OD, int64_t OH, int64_t OW, int64_t stride_q, int64_t stride_z, int64_t stride_y, int64_t stride_x, int s0, int s1, int s2, int p0, int p1, int p2, int d0, int d1, int d2, cudaStream_t stream) { im2col_3d_cuda(src, dst, N, IC, ID, IH, IW, OC, KD, KH, KW, OD, OH, OW, stride_q, stride_z, stride_y, stride_x, s0, s1, s2, p0, p1, p2, d0, d1, d2, stream); } void ggml_cuda_op_im2col_3d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const float * src1_d = (const float *)src1->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; const int32_t s2 = ((const int32_t *)(dst->op_params))[2]; const int32_t p0 = ((const int32_t *)(dst->op_params))[3]; const int32_t p1 = ((const int32_t *)(dst->op_params))[4]; const int32_t p2 = ((const int32_t *)(dst->op_params))[5]; const int32_t d0 = ((const int32_t *)(dst->op_params))[6]; const int32_t d1 = ((const int32_t *)(dst->op_params))[7]; const int32_t d2 = ((const int32_t *)(dst->op_params))[8]; const int32_t IC = ((const int32_t *)(dst->op_params))[9]; const int64_t N = ne13 / IC; const int64_t ID = ne12; const int64_t IH = ne11; const int64_t IW = ne10; const int64_t OC = ne03 / IC; const int64_t KD = ne02; const int64_t KH = ne01; const int64_t KW = ne00; const int64_t OD = ne3 / N; const int64_t OH = ne2; const int64_t OW = ne1; const size_t es = ggml_element_size(src1); const int64_t stride_x = src1->nb[0] / es; const int64_t stride_y = src1->nb[1] / es; const int64_t stride_z = src1->nb[2] / es; const int64_t stride_q = src1->nb[3] / es; if(dst->type == GGML_TYPE_F16) { im2col_3d_cuda_f16(src1_d, (half *) dst_d, N, IC, ID, IH, IW, OC, KD, KH, KW, OD, OH, OW, stride_q, stride_z, stride_y, stride_x, s0, s1, s2, p0, p1, p2, d0, d1, d2, stream); } else { im2col_3d_cuda_f32(src1_d, (float *) dst_d, N, IC, ID, IH, IW, OC, KD, KH, KW, OD, OH, OW, stride_q, stride_z, stride_y, stride_x, s0, s1, s2, p0, p1, p2, d0, d1, d2, stream); } } ggml-org-ggml-3678254/src/ggml-cuda/im2col.cuh000066400000000000000000000003321512524704700206770ustar00rootroot00000000000000#include "common.cuh" #define CUDA_IM2COL_BLOCK_SIZE 256 void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_im2col_3d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/mean.cu000066400000000000000000000055441512524704700202740ustar00rootroot00000000000000#include "mean.cuh" #include "reduce_rows.cuh" #ifdef GGML_CUDA_USE_CUB #include using namespace cub; #endif // GGML_CUDA_USE_CUB template __global__ void divide_by_count(T * result, size_t count) { *result /= static_cast(count); } void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); const int64_t ncols = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); // Special case for reducing vectors #ifdef GGML_CUDA_USE_CUB #ifdef USE_CUDA_GRAPH cudaStreamCaptureStatus iscapturing; CUDA_CHECK(cudaStreamIsCapturing(stream, &iscapturing)); #endif // USE_CUDA_GRAPH if ((nrows == 1) && #ifdef USE_CUDA_GRAPH // CUDA_GRAPHS_DISABLED ((ncols > 65536) && ((ctx.cuda_graph->instance == nullptr) && (iscapturing == cudaStreamCaptureStatusNone) || ctx.cuda_graph->disable_due_to_gpu_arch || ctx.cuda_graph->disable_due_to_too_many_updates || ctx.cuda_graph->disable_due_to_failed_graph_capture)) || // CUDA_GRAPHS ENABLED ((ncols > 32768) && !((ctx.cuda_graph->instance == nullptr) && (iscapturing == cudaStreamCaptureStatusNone) || ctx.cuda_graph->disable_due_to_gpu_arch || ctx.cuda_graph->disable_due_to_too_many_updates || ctx.cuda_graph->disable_due_to_failed_graph_capture))) { #else (ncols > 65536)) { #endif // USE_CUDA_GRAPH // Single row - use device-wide reduction size_t tmp_size = 0; ggml_cuda_pool & pool = ctx.pool(); DeviceReduce::Sum(nullptr, tmp_size, src0_d, dst_d, ncols, stream); ggml_cuda_pool_alloc tmp_alloc(pool, tmp_size); DeviceReduce::Sum(tmp_alloc.ptr, tmp_size, src0_d, dst_d, ncols, stream); // Divide by ncols divide_by_count<<<1, 1, 0, stream>>>(dst_d, ncols); return; } #endif // GGML_CUDA_USE_CUB const dim3 block_nums(nrows, 1, 1); const int id = ggml_cuda_get_device(); const int nsm = ggml_cuda_info().devices[id].nsm; // Heuristic for block size selection to optimize occupancy. // See discussion in: https://github.com/ggml-org/llama.cpp/pull/15132 if ((nrows / nsm) < 2) { const dim3 block_dims(512, 1, 1); reduce_rows_f32<<>>(src0_d, dst_d, ncols); } else { const dim3 block_dims(ncols < 1024 ? 32 : 128, 1, 1); reduce_rows_f32<<>>(src0_d, dst_d, ncols); } } ggml-org-ggml-3678254/src/ggml-cuda/mean.cuh000066400000000000000000000001431512524704700204320ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/mma.cuh000066400000000000000000001451021512524704700202710ustar00rootroot00000000000000#pragma once // This file contains primitives that expose the tensor core PTX instructions for CUDA code. // The primitives can be used in a similar way as the nvcuda::wmma interface but with a well-defined memory layout. // The documentation for the PTX instructions can be found under: // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-multiply-accumulate-operation-using-mma-instruction // // Like with nvcuda::wmma there are three types of matrix tiles: A, B, and C with A @ B = C. // A is a row-major matrix with shape M x K. // B is a column-major matrix with shape K x N. // C is a column-major matrix with shape M x N. // A, B, and C are represented using the same fundamental data type: a row-major matrix with I rows and J columns. // Note that J is measured in physical 32 bit elements instead of logical elements. // The methods get_i and get_j can be used to get the physical 32 bit index of the lth element of a thread within a tile. // All matrix tiles have ne physical 32 bit elements per warp. // // As described in the PTX documentation, all pointers for load_ldmatrix must be to shared memory and aligned to 16 bytes. // The API in this file also assumes that the pointers for load_generic are aligned to 16 bytes, unaligned pointers are considered undefined behavior. #include "common.cuh" // On Volta each warp is doing 4 8x8 mma operations in parallel. // The basic memory layout for a 32x8 output tile is to stack 4 input tiles in I direction and to mirror the B tile. // However, the i indices in this file are by default permuted to simplify the index calculations. // #define GGML_CUDA_MMA_NO_VOLTA_PERM #if CUDART_VERSION >= 11080 static __device__ __forceinline__ int ggml_cuda_movmatrix(const int x) { int ret = 0; #ifdef TURING_MMA_AVAILABLE asm("movmatrix.sync.aligned.m8n8.trans.b16 %0, %1;" : "=r"(ret) : "r"(x)); #else GGML_UNUSED(x); NO_DEVICE_CODE; #endif // defined(TURING_MMA_AVAILABLE) return ret; } #else static __device__ __forceinline__ int ggml_cuda_movmatrix(const int x) { // Imagine transposing row-major matrix to column-major matrix. const int src_i_low = 2 * (threadIdx.x % 4); const int src_i_high = src_i_low + 1; const int src_j = threadIdx.x / 4; const int src_laneid_low = src_i_low * 4 + src_j / 2; const int src_laneid_high = src_i_high * 4 + src_j / 2; const int shift_low = ((src_j + 0) % 2) * 16; const int shift_high = ((src_j + 1) % 2) * 16; const int ret_low = (__shfl_sync(0xFFFFFFFF, x, src_laneid_low, WARP_SIZE) >> shift_low) & 0x0000FFFF; const int ret_high = (__shfl_sync(0xFFFFFFFF, x, src_laneid_high, WARP_SIZE) << shift_high) & 0xFFFF0000; return ret_low | ret_high; } #endif // CUDART_VERSION >= 11080 static __device__ __forceinline__ half2 ggml_cuda_movmatrix(const half2 x) { half2 ret; *((int *) &ret) = ggml_cuda_movmatrix(*((const int *) &x)); return ret; } namespace ggml_cuda_mma { // Some architectures like Volta or CDNA3 perform multiple matrix multiplications per warp in parallel, // effectively the warp is being split into subgroups of threads that each perform a single mma instruction. // In those cases the data can be split in different ways across the warp. enum data_layout { // By default the data uses the I direction as its major dimension and the J direction as its minor dimension. // For the A/C matrices this means I major == row major, J major == column major. // For the B matrix this means I major == column major, J major == row major. // MIRRORED == Each data value is held exactly once per thread subgroup. DATA_LAYOUT_I_MAJOR = 0, // Always used for Turing, Ampere, Ada Lovelace, consumer Blackwell, matrix A&B for RDNA4 and CDNA. DATA_LAYOUT_J_MAJOR = 10, // Matrix C for CDNA and RDNA4, int and float matrix C for RDNA3. DATA_LAYOUT_I_MAJOR_MIRRORED = 20, // Volta, matrix A&B for RDNA3. DATA_LAYOUT_J_MAJOR_MIRRORED = 30, }; // Implemented mma combinations are: // - (I_MAJOR, I_MAJOR) -> I_MAJOR // - (I_MAJOR, I_MAJOR_MIRRORED) -> I_MAJOR // - (I_MAJOR, J_MAJOR_MIRRORED) -> I_MAJOR static constexpr bool is_i_major(const data_layout dl) { return dl == DATA_LAYOUT_I_MAJOR || dl == DATA_LAYOUT_I_MAJOR_MIRRORED; } static constexpr __device__ data_layout get_input_data_layout() { #if defined(RDNA3) || __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA return DATA_LAYOUT_I_MAJOR_MIRRORED; #else return DATA_LAYOUT_I_MAJOR; #endif // defined(RDNA3) || __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA } template struct tile {}; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR; #if defined(AMD_MFMA_AVAILABLE) static constexpr int ne = I * J / 64; T x[ne] = {0}; static constexpr __device__ bool supported() { if (I == 64 && J == 2) return true; if (I == 16 && J == 8) return true; if (I == 32 && J == 4) return true; if (I == 16 && J == 16) return true; if (I == 32 && J == 32) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> return threadIdx.x % 16; } else if constexpr (I == 16 && J == 8) { return threadIdx.x % 16; } else if constexpr (I == 32 && J == 4) { return threadIdx.x % 32; } else if constexpr (I == 16 && J == 16) { return threadIdx.x % 16; } else if constexpr (I == 32 && J == 32) { return threadIdx.x % 32; } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> return (2 * ((threadIdx.x / 16) % 2) + l); } else if constexpr (I == 16 && J == 8) { return 2 * (threadIdx.x / 16) + l; } else if constexpr (I == 32 && J == 4) { return 2 * (threadIdx.x / 32) + l; } else if constexpr (I == 16 && J == 16) { return 4 * (threadIdx.x / 16) + l; } else if constexpr (I == 32 && J == 32) { return 4 * (threadIdx.x / 32) + 8 * (l / 4) + (l % 4); } else { NO_DEVICE_CODE; return -1; } } #elif __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA static constexpr int ne = I * J / 32; T x[ne] = {0}; static constexpr __device__ bool supported() { if (I == 32 && J == 8) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 32 && J == 8) { #ifdef GGML_CUDA_MMA_NO_VOLTA_PERM return (((threadIdx.x % 16) / 4) * 8) + ((threadIdx.x / 16) * 4) + (l & 2) + (threadIdx.x % 2); #else return (l & 2) + (threadIdx.x & ~2); #endif // GGML_CUDA_MMA_NO_VOLTA_PERM } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 32 && J == 8) { return (threadIdx.x & 2) + (l & (4 + 1)); } else { NO_DEVICE_CODE; return -1; } } #elif defined(AMD_WMMA_AVAILABLE) static constexpr int ne = I * J / 32; T x[ne] = {0}; static constexpr __device__ bool supported() { if (I == 16 && J == 16) return true; if (I == 16 && J == 8) return true; if (I == 16 && J == 4) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (supported()) { return threadIdx.x % 16; } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 16 && J == 16) { // matrix C #if defined(RDNA3) return 2 * l + (threadIdx.x / 16); #else return ne * (threadIdx.x / 16) + l; #endif // defined(RDNA3) } else if constexpr (I == 16 && J == 8) { // mmq input for RDNA4 return ne * (threadIdx.x / 16) + l; } else if constexpr (I == 16 && J == 4) { return ne * (threadIdx.x / 16) + l; } else { NO_DEVICE_CODE; return -1; } } #else static constexpr int ne = I * J / 32; T x[ne] = {0}; static constexpr __device__ bool supported() { if (I == 8 && J == 4) return true; if (I == 8 && J == 8) return true; if (I == 16 && J == 8) return true; if (I == 16 && J == 16) return true; if (I == 32 && J == 8) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 8 && J == 4) { return threadIdx.x / 4; } else if constexpr (I == 8 && J == 8) { return threadIdx.x / 4; } else if constexpr (I == 16 && J == 8) { return ((l / 2) * 8) + (threadIdx.x / 4); } else if constexpr (I == 16 && J == 16) { return (((l / 2) % 2) * 8) + (threadIdx.x / 4); } else if constexpr (I == 32 && J == 8) { return tile<16, 8, T>::get_i(l); // Memory layout simply repeated with same pattern in i direction. } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 8 && J == 4) { return threadIdx.x % 4; } else if constexpr (I == 8 && J == 8) { return (l * 4) + (threadIdx.x % 4); } else if constexpr (I == 16 && J == 8) { return ((threadIdx.x % 4) * 2) + (l % 2); } else if constexpr (I == 16 && J == 16) { return ((l / 4) * 8) + ((threadIdx.x % 4) * 2) + (l % 2); } else if constexpr (I == 32 && J == 8) { return tile<16, 8, T>::get_j(l); // Memory layout simply repeated with same pattern in i direction. } else { NO_DEVICE_CODE; return -1; } } #endif // defined(GGML_USE_HIP) }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR; #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA static constexpr int ne = I * J / WARP_SIZE; half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 32 && J == 4) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 32 && J == 4) { #ifdef GGML_CUDA_MMA_NO_VOLTA_PERM return (((threadIdx.x % 16) / 4) * 8) + ((threadIdx.x / 16) * 4) + (threadIdx.x % 4); #else return threadIdx.x; #endif // GGML_CUDA_MMA_NO_VOLTA_PERM } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 32 && J == 4) { return l; } else { NO_DEVICE_CODE; return -1; } } #elif defined(AMD_WMMA_AVAILABLE) static constexpr int ne = I * J / 32; half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 16 && J == 8) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 16 && J == 8) { return threadIdx.x % 16; } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 16 && J == 8) { return 4 * (threadIdx.x / 16) + l; } else { NO_DEVICE_CODE; return -1; } } #else static constexpr int ne = I * J / WARP_SIZE; half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 8 && J == 4) return true; if (I == 8 && J == 8) return true; if (I == 16 && J == 8) return true; if (I == 16 && J == 16) return true; if (I == 32 && J == 8) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 8 && J == 8) { return threadIdx.x / 4; } else if constexpr (I == 16 && J == 4) { return (l * 8) + (threadIdx.x / 4); } else if constexpr (I == 16 && J == 8) { return ((l % 2) * 8) + (threadIdx.x / 4); } else if constexpr (I == 32 && J == 8) { return ((l / 4) * 16) + ((l % 2) * 8) + (threadIdx.x / 4); } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 8 && J == 8) { return (l * 4) + (threadIdx.x % 4); } else if constexpr (I == 16 && J == 4) { return threadIdx.x % 4; } else if constexpr (I == 16 && J == 8) { return ((l / 2) * 4) + (threadIdx.x % 4); } else if constexpr (I == 32 && J == 8) { return ((l & 2) * 2) + (threadIdx.x % 4); } else { NO_DEVICE_CODE; return -1; } } #endif // __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR; #if defined(AMD_WMMA_AVAILABLE) static constexpr int ne = I * J / 32; nv_bfloat162 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { return tile::supported(); } static __device__ __forceinline__ int get_i(const int l) { return tile::get_i(l); } static __device__ __forceinline__ int get_j(const int l) { return tile::get_j(l); } #else static constexpr int ne = I * J / WARP_SIZE; nv_bfloat162 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 8 && J == 8) return true; if (I == 16 && J == 4) return true; if (I == 16 && J == 8) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 8 && J == 8) { return threadIdx.x / 4; } else if constexpr (I == 16 && J == 4) { return (l * 8) + (threadIdx.x / 4); } else if constexpr (I == 16 && J == 8) { return ((l % 2) * 8) + (threadIdx.x / 4); } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 8 && J == 8) { return (l * 4) + (threadIdx.x % 4); } else if constexpr (I == 16 && J == 4) { return threadIdx.x % 4; } else if constexpr (I == 16 && J == 8) { return ((l / 2) * 4) + (threadIdx.x % 4); } else { NO_DEVICE_CODE; return -1; } } #endif // defined(AMD_WMMA_AVAILABLE) }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_J_MAJOR; static constexpr int ne = tile::ne; T x[ne] = {0}; static constexpr __device__ bool supported() { return tile::supported(); } static __device__ __forceinline__ int get_i(const int l) { return tile::get_j(l); } static __device__ __forceinline__ int get_j(const int l) { return tile::get_i(l); } }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR_MIRRORED; // RDNA3 static constexpr int ne = I * J / 32 * 2; T x[ne] = {0}; static constexpr __device__ bool supported() { if (I == 16 && J == 16) return true; if (I == 16 && J == 8) return true; if (I == 16 && J == 4) return true; return false; } static __device__ __forceinline__ int get_i(const int /*l*/) { if constexpr (supported()) { return threadIdx.x % 16; } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (supported()) { return l; } else { NO_DEVICE_CODE; return -1; } } }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR_MIRRORED; #if defined(RDNA3) static constexpr int ne = tile::ne; half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { return tile::supported(); } static __device__ __forceinline__ int get_i(const int l) { return tile::get_i(l); } static __device__ __forceinline__ int get_j(const int l) { return tile::get_j(l); } #else // Volta static constexpr int ne = I * J / (WARP_SIZE/4); half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 8 && J == 4) return true; return false; } static __device__ __forceinline__ int get_i(const int /*l*/) { if constexpr (I == 8 && J == 4) { return ((threadIdx.x / 16) * 4) + (threadIdx.x % 4); } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 8 && J == 4) { return l; } else { NO_DEVICE_CODE; return -1; } } #endif // defined(RDNA3) }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_I_MAJOR_MIRRORED; static constexpr int ne = tile::ne; nv_bfloat162 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { return tile::supported(); } static __device__ __forceinline__ int get_i(const int l) { return tile::get_i(l); } static __device__ __forceinline__ int get_j(const int l) { return tile::get_j(l); } }; template struct tile { static constexpr int I = I_; static constexpr int J = J_; static constexpr data_layout dl = DATA_LAYOUT_J_MAJOR_MIRRORED; static constexpr int ne = I * J / (WARP_SIZE/4); half2 x[ne] = {{0.0f, 0.0f}}; static constexpr __device__ bool supported() { if (I == 8 && J == 4) return true; return false; } static __device__ __forceinline__ int get_i(const int l) { if constexpr (I == 8 && J == 4) { return ((l / 2) * 4) + (threadIdx.x % 4); } else { NO_DEVICE_CODE; return -1; } } static __device__ __forceinline__ int get_j(const int l) { if constexpr (I == 8 && J == 4) { return ((threadIdx.x / 16) * 2) + (l % 2); } else { NO_DEVICE_CODE; return -1; } } }; #if defined(TURING_MMA_AVAILABLE) template static __device__ __forceinline__ tile get_half2(const tile & tile_float) { tile ret; #pragma unroll for (int l0 = 0; l0 < tile_float.ne; l0 += 2) { ret.x[l0/2] = make_half2(tile_float.x[l0 + 0], tile_float.x[l0 + 1]); } return ret; } static __device__ __forceinline__ tile<8, 8, half2> get_transposed(const tile<16, 4, half2> & t) { tile<8, 8, half2> ret; ret.x[0] = ggml_cuda_movmatrix(t.x[0]); ret.x[1] = ggml_cuda_movmatrix(t.x[1]); return ret; } #else // Volta template static __device__ __forceinline__ tile get_half2(const tile & tile_float) { tile ret; #pragma unroll for (int l0 = 0; l0 < tile_float.ne; l0 += 4) { ret.x[l0/2 + 0] = make_half2(tile_float.x[l0 + 0], tile_float.x[l0 + 1]); ret.x[l0/2 + 1] = make_half2(tile_float.x[l0 + 2], tile_float.x[l0 + 3]); // On Volta FP16 and FP32 tiles have a different memory layout, // for the conversion threads with an offset of 2 need to exchange half their values: ret.x[l0/2 + (((threadIdx.x % 4) / 2) ^ 1)] = __shfl_xor_sync( 0xFFFFFFFF, ret.x[l0/2 + (((threadIdx.x % 4) / 2) ^ 1)], 2, WARP_SIZE); } return ret; } #endif // defined(TURING_MMA_AVAILABLE) template static __device__ __forceinline__ void load_generic(tile & t, const T * __restrict__ xs0, const int stride) { #if defined(AMD_MFMA_AVAILABLE) if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> #pragma unroll for (int l = 0; l < t.ne; ++l) { t.x[l] = xs0[t.get_i(l)*stride + t.get_j(l)]; } } else { ggml_cuda_memcpy_1(t.x, xs0 + t.get_i(0) * stride + t.get_j(0)); } #elif defined(AMD_WMMA_AVAILABLE) // All wmma layout has contiguous data when i-major. if constexpr (is_i_major(dl)) { // the data must be aligned to 16 bytes when bigger than ggml_cuda_get_max_cpy_bytes() constexpr int aligned_copy_bytes = ggml_cuda_get_max_cpy_bytes(); if constexpr (sizeof(t.x) > aligned_copy_bytes) { static_assert(sizeof(t.x) % aligned_copy_bytes == 0, "bad type size"); constexpr int aligned_copy_count = sizeof(t.x)/aligned_copy_bytes; #pragma unroll for (int i = 0; i < aligned_copy_count; ++i) { ggml_cuda_memcpy_1(t.x + t.ne/aligned_copy_count*i, xs0 + t.get_i(0) * stride + t.get_j(t.ne/aligned_copy_count*i)); } } else { ggml_cuda_memcpy_1(t.x, xs0 + t.get_i(0) * stride + t.get_j(0)); } } else { #pragma unroll for (int l = 0; l < t.ne; ++l) { t.x[l] = xs0[t.get_i(l)*stride + t.get_j(l)]; } } #else #pragma unroll for (int l = 0; l < t.ne; ++l) { t.x[l] = xs0[t.get_i(l)*stride + t.get_j(l)]; } #endif // defined(AMD_MFMA_AVAILABLE) } template static __device__ __forceinline__ void load_ldmatrix( tile<8, 8, T> & t, const T * __restrict__ xs0, const int stride) { #ifdef TURING_MMA_AVAILABLE int * xi = (int *) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + ((threadIdx.x / t.I) * (t.J / 2)) % t.J; asm volatile("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" : "=r"(xi[0]), "=r"(xi[1]) : "l"(xs)); #else load_generic(t, xs0, stride); #endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void load_ldmatrix( tile<16, 4, T> & t, const T * __restrict__ xs0, const int stride) { #ifdef TURING_MMA_AVAILABLE int * xi = (int *) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride; asm volatile("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" : "=r"(xi[0]), "=r"(xi[1]) : "l"(xs)); #else #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA GGML_UNUSED_VARS(t, xs0, stride); NO_DEVICE_CODE; #else load_generic(t, xs0, stride); #endif // __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA #endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void load_ldmatrix( tile<16, 8, T, dl> & t, const T * __restrict__ xs0, const int stride) { #if defined(TURING_MMA_AVAILABLE) int * xi = (int * ) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + (threadIdx.x / t.I) * (t.J / 2); asm volatile("ldmatrix.sync.aligned.m8n8.x4.b16 {%0, %1, %2, %3}, [%4];" : "=r"(xi[0]), "=r"(xi[1]), "=r"(xi[2]), "=r"(xi[3]) : "l"(xs)); #else #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA #if 1 // TODO: more generic handling static_assert(sizeof(T) == 4, "bad type size"); ggml_cuda_memcpy_1<4*sizeof(T)>(t.x + 0, xs0 + t.get_i(0)*stride + 0); ggml_cuda_memcpy_1<4*sizeof(T)>(t.x + 4, xs0 + t.get_i(4)*stride + 4); #else load_generic(t, xs0, stride); #endif // 1 #else load_generic(t, xs0, stride); #endif // __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void load_ldmatrix( tile<8, 4, half2, DATA_LAYOUT_I_MAJOR_MIRRORED> & t, const half2 * __restrict__ xs0, const int stride) { ggml_cuda_memcpy_1<4*sizeof(half2)>(t.x, xs0 + t.get_i(0)*stride); } static __device__ __forceinline__ void load_ldmatrix( tile<8, 4, half2, DATA_LAYOUT_J_MAJOR_MIRRORED> & t, const half2 * __restrict__ xs0, const int stride) { #pragma unroll for (int l0 = 0; l0 < t.ne; l0 += 2) { ggml_cuda_memcpy_1<2*sizeof(half2)>(t.x + l0, xs0 + t.get_i(l0)*stride + t.get_j(l0)); } } static __device__ __forceinline__ void load_ldmatrix( tile<32, 4, half2> & t, const half2 * __restrict__ xs0, const int stride) { #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA ggml_cuda_memcpy_1<4*sizeof(half2)>(t.x, xs0 + t.get_i(0)*stride); #else GGML_UNUSED_VARS(t, xs0, stride); NO_DEVICE_CODE; #endif // __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA } template static __device__ __forceinline__ void load_ldmatrix_trans( tile<16, 8, T> & t, const T * __restrict__ xs0, const int stride) { #ifdef TURING_MMA_AVAILABLE int * xi = (int * ) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + (threadIdx.x / t.I) * (t.J / 2); asm volatile("ldmatrix.sync.aligned.m8n8.x4.trans.b16 {%0, %1, %2, %3}, [%4];" : "=r"(xi[0]), "=r"(xi[2]), "=r"(xi[1]), "=r"(xi[3]) : "l"(xs)); #else GGML_UNUSED_VARS(t, xs0, stride); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, int> & D, const tile<16, 4, int> & A, const tile<8, 4, int> & B) { #ifdef TURING_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(D.x[0]), "+r"(D.x[1]), "+r"(D.x[2]), "+r"(D.x[3]) : "r"(A.x[0]), "r"(A.x[1]), "r"(B.x[0])); #else // On Turing m16n8k16 mma is not available, use 2x m8n8k16 mma instead: asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[0]), "+r"(D.x[1]) : "r"(A.x[0]), "r"(B.x[0])); asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[2]), "+r"(D.x[3]) : "r"(A.x[1]), "r"(B.x[0])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, int> & D, const tile<16, 8, int> & A, const tile<8, 8, int> & B) { #ifdef TURING_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(D.x[0]), "+r"(D.x[1]), "+r"(D.x[2]), "+r"(D.x[3]) : "r"(A.x[0]), "r"(A.x[1]), "r"(A.x[2]), "r"(A.x[3]), "r"(B.x[0]), "r"(B.x[1])); #else // On Turing m16n8k32 mma is not available, use 4x m8n8k16 mma instead: asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[0]), "+r"(D.x[1]) : "r"(A.x[0]), "r"(B.x[0])); asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[2]), "+r"(D.x[3]) : "r"(A.x[1]), "r"(B.x[0])); asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[0]), "+r"(D.x[1]) : "r"(A.x[2]), "r"(B.x[1])); asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(D.x[2]), "+r"(D.x[3]) : "r"(A.x[3]), "r"(B.x[1])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 4, half2> & D, const tile<16, 8, half2> & A, const tile<8, 8, half2> & B) { #ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); #else // On Turing m16n8k16 mma is not available, use 2x m8n8k8 mma instead: asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, half2> & D, const tile<16, 8, half2> & A, const tile<16, 8, half2> & B) { #ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[2])); asm("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%0, %1};" : "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1]), "r"(Bxi[3])); #else // On Turing m16n8k16 mma is not available, use 4x m8n8k8 mma instead: asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[0]), "+r"(Dxi[1]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[2])); asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[1])); asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" : "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[3])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void mma( tile<16, 8, float, dl_d> & D, const tile<16, 8, float, dl_ab> & A, const tile<8, 8, float, dl_ab> & B) { #ifdef AMPERE_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; asm("mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // AMPERE_MMA_AVAILABLE } static __device__ __forceinline__ void mma_block_scaled(tile<16, 8, float> & D, const tile<16, 8, int> & A, const tile<8, 8, int> & B, uint32_t a_scale, uint32_t b_scale) { #ifdef BLACKWELL_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; float * Dxi = (float *) D.x; asm volatile( "mma.sync.aligned.kind::mxf4.block_scale.scale_vec::2X.m16n8k64.row.col.f32.e2m1.e2m1.f32.ue8m0 " "{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3}, " "%10, {0, 0}, %11, {0, 0};" : "+f"(Dxi[0]), "+f"(Dxi[1]), "+f"(Dxi[2]), "+f"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1]), "r"(a_scale), "r"(b_scale)); #else GGML_UNUSED_VARS(D, A, B, a_scale, b_scale); #endif // BLACKWELL_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, float> & D, const tile<16, 8, half2> & A, const tile<8, 8, half2> & B) { #ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); #else // On Turing m16n8k16 mma is not available, use 2x m8n8k8 mma instead: asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, float> & D, const tile<16, 8, nv_bfloat162> & A, const tile<8, 8, nv_bfloat162> & B) { #ifdef AMPERE_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; asm("mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // AMPERE_MMA_AVAILABLE } template static __device__ __forceinline__ void mma( tile<16, 16, float, dl_d> & D, const tile<16, 8, half2, dl_ab> & A, const tile<16, 8, half2, dl_ab> & B) { #ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[2])); asm("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(Dxi[4]), "+r"(Dxi[5]), "+r"(Dxi[6]), "+r"(Dxi[7]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1]), "r"(Bxi[3])); #else // On Turing m16n8k16 mma is not available, use 4x m8n8k8 mma instead: asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[2])); asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[4]), "+r"(Dxi[5]), "+r"(Dxi[6]), "+r"(Dxi[7]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[1])); asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(Dxi[4]), "+r"(Dxi[5]), "+r"(Dxi[6]), "+r"(Dxi[7]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[3])); #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #elif defined(AMD_WMMA_AVAILABLE) #if defined(RDNA4) using halfx8_t = __attribute__((ext_vector_type(8))) _Float16; using floatx8_t = __attribute__((ext_vector_type(8))) float; floatx8_t& acc_frag = reinterpret_cast(D.x[0]); const halfx8_t& a_frag = reinterpret_cast(A.x[0]); const halfx8_t& b_frag = reinterpret_cast(B.x[0]); acc_frag = __builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12(a_frag, b_frag, acc_frag); #elif defined(RDNA3) using halfx16_t = __attribute__((ext_vector_type(16))) _Float16; using floatx8_t = __attribute__((ext_vector_type(8))) float; floatx8_t& acc_frag = reinterpret_cast(D.x[0]); const halfx16_t& a_frag = reinterpret_cast(A.x[0]); const halfx16_t& b_frag = reinterpret_cast(B.x[0]); acc_frag = __builtin_amdgcn_wmma_f32_16x16x16_f16_w32(a_frag, b_frag, acc_frag); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // RDNA4 #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void mma( tile<16, 16, float, dl_d> & D, const tile<16, 8, nv_bfloat162, dl_ab> & A, const tile<16, 8, nv_bfloat162, dl_ab> & B) { #if defined(AMD_WMMA_AVAILABLE) #if defined(RDNA4) using bf16x8_t = __attribute__((ext_vector_type(8))) __bf16; using floatx8_t = __attribute__((ext_vector_type(8))) float; floatx8_t& acc_frag = reinterpret_cast(D.x[0]); const bf16x8_t& a_frag = reinterpret_cast(A.x[0]); const bf16x8_t& b_frag = reinterpret_cast(B.x[0]); acc_frag = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12(a_frag, b_frag, acc_frag); #elif defined(RDNA3) using bf16x16_t = __attribute__((ext_vector_type(16))) __bf16; using floatx8_t = __attribute__((ext_vector_type(8))) float; floatx8_t& acc_frag = reinterpret_cast(D.x[0]); const bf16x16_t& a_frag = reinterpret_cast(A.x[0]); const bf16x16_t& b_frag = reinterpret_cast(B.x[0]); acc_frag = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32(a_frag, b_frag, acc_frag); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // RDNA4 #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // AMPERE_MMA_AVAILABLE } template static __device__ __forceinline__ void mma( tile<16, 16, int, dl_d> & D, const tile<16, 8, int, dl_ab> & A, const tile<16, 8, int, dl_ab> & B) { #if defined(AMD_MFMA_AVAILABLE) using int32x4_t = __attribute__((__vector_size__(4 * sizeof(int)))) int; int32x4_t * acc = (int32x4_t *) D.x; #if defined(CDNA3) acc[0] = __builtin_amdgcn_mfma_i32_16x16x32_i8(((int64_t *) A.x)[0], ((int64_t *) B.x)[0], acc[0], 0, 0, 0); #elif defined(CDNA2) || defined(CDNA) acc[0] = __builtin_amdgcn_mfma_i32_16x16x16i8(A.x[0], B.x[0], acc[0], 0, 0, 0); acc[0] = __builtin_amdgcn_mfma_i32_16x16x16i8(A.x[1], B.x[1], acc[0], 0, 0, 0); #endif // defined(CDNA3) #elif defined(AMD_WMMA_AVAILABLE) using int32x8_t = __attribute__((__vector_size__(8 * sizeof(int)))) int; int32x8_t * acc = (int32x8_t *) D.x; #if defined(RDNA4) using int32x2_t = __attribute__((__vector_size__(2 * sizeof(int)))) int; int32x2_t * a_vec = (int32x2_t *) A.x; int32x2_t * b_vec = (int32x2_t *) B.x; acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12( true, a_vec[0], true, b_vec[0], acc[0], true ); acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12( true, a_vec[1], true, b_vec[1], acc[0], true ); #elif defined(RDNA3) using int32x4_t = __attribute__((__vector_size__(4 * sizeof(int)))) int; int32x4_t * a_vec = (int32x4_t *) A.x; int32x4_t * b_vec = (int32x4_t *) B.x; acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32( true, a_vec[0], true, b_vec[0], acc[0], true ); acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32( true, a_vec[1], true, b_vec[1], acc[0], true ); #endif // RDNA4 #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // AMD_MFMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<32, 32, int> & D, const tile<32, 4, int> & A, const tile<32, 4, int> & B) { #if defined(AMD_MFMA_AVAILABLE) using int32x16_t = __attribute__((__vector_size__(16 * sizeof(int)))) int; int32x16_t * acc = (int32x16_t *) D.x; #if defined(CDNA3) acc[0] = __builtin_amdgcn_mfma_i32_32x32x16_i8(((int64_t *) A.x)[0], ((int64_t *) B.x)[0], acc[0], 0, 0, 0); #elif defined(CDNA2) || defined(CDNA) acc[0] = __builtin_amdgcn_mfma_i32_32x32x8i8(A.x[0], B.x[0], acc[0], 0, 0, 0); acc[0] = __builtin_amdgcn_mfma_i32_32x32x8i8(A.x[1], B.x[1], acc[0], 0, 0, 0); #endif // defined(CDNA3) #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // AMD_MFMA_AVAILABLE } template static __device__ __forceinline__ void mma( tile<32, J, T1> & D, const tile<32, K, T2> & A, const tile & B) { tile <16, J, T1> * D16 = reinterpret_cast< tile<16, J, T1> *>(&D); const tile<16, K, T2> * A16 = reinterpret_cast *>(&A); mma(D16[0], A16[0], B); mma(D16[1], A16[1], B); } static __device__ __forceinline__ void mma( tile<32, 8, float> & D, const tile<32, 4, half2> & A, const tile<8, 4, half2, DATA_LAYOUT_I_MAJOR_MIRRORED> & B) { #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; asm("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 " "{%0, %1, %2, %3, %4, %5, %6, %7}, {%8, %9}, {%10, %11}, {%0, %1, %2, %3, %4, %5, %6, %7};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]), "+r"(Dxi[4]), "+r"(Dxi[5]), "+r"(Dxi[6]), "+r"(Dxi[7]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0]), "r"(Bxi[1])); asm("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 " "{%0, %1, %2, %3, %4, %5, %6, %7}, {%8, %9}, {%10, %11}, {%0, %1, %2, %3, %4, %5, %6, %7};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]), "+r"(Dxi[4]), "+r"(Dxi[5]), "+r"(Dxi[6]), "+r"(Dxi[7]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[2]), "r"(Bxi[3])); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA } static __device__ __forceinline__ void mma( tile<32, 4, half2> & D, const tile<32, 4, half2> & A, const tile<8, 4, half2, DATA_LAYOUT_J_MAJOR_MIRRORED> & B) { #if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; asm("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 " "{%0, %1, %2, %3}, {%4, %5}, {%6, %7}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0]), "r"(Bxi[1])); asm("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 " "{%0, %1, %2, %3}, {%4, %5}, {%6, %7}, {%0, %1, %2, %3};" : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[2]), "r"(Bxi[3])); #else GGML_UNUSED_VARS(D, A, B); NO_DEVICE_CODE; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA } template static __device__ __forceinline__ void mma( tile<16, 16, int, dl_d> & D, const tile<16, 4, int, dl_ab> & A, const tile<16, 4, int, dl_ab> & B) { #if defined(AMD_WMMA_AVAILABLE) using int32x8_t = __attribute__((__vector_size__(8 * sizeof(int)))) int; int32x8_t * acc = (int32x8_t *) D.x; #if defined(RDNA4) using int32x2_t = __attribute__((__vector_size__(2 * sizeof(int)))) int; int32x2_t * a_vec = (int32x2_t *) A.x; int32x2_t * b_vec = (int32x2_t *) B.x; acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12( true, a_vec[0], true, b_vec[0], acc[0], false ); #elif defined(RDNA3) using int32x4_t = __attribute__((__vector_size__(4 * sizeof(int)))) int; int32x4_t * a_vec = (int32x4_t *) A.x; int32x4_t * b_vec = (int32x4_t *) B.x; acc[0] = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32( true, a_vec[0], true, b_vec[0], acc[0], false ); #endif // RDNA4 #else GGML_UNUSED(D); GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; #endif // AMD_WMMA_AVAILABLE } } ggml-org-ggml-3678254/src/ggml-cuda/mmf.cu000066400000000000000000000153601512524704700201300ustar00rootroot00000000000000#include "ggml.h" #include "mmf.cuh" #include "mmid.cuh" void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { GGML_ASSERT( src1->type == GGML_TYPE_F32); GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS; const size_t ts_src0 = ggml_type_size(src0->type); const size_t ts_src1 = ggml_type_size(src1->type); const size_t ts_dst = ggml_type_size(dst->type); GGML_ASSERT(ne13 == ne3); GGML_ASSERT( nb00 == ts_src0); GGML_ASSERT( nb10 == ts_src1); GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); GGML_ASSERT( nb0 == ts_dst); const float * src1_d = (const float *) src1->data; const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; float * dst_d = (float *) dst->data; const int64_t s01 = src0->nb[1] / ts_src0; const int64_t s11 = src1->nb[1] / ts_src1; const int64_t s1 = dst->nb[1] / ts_dst; const int64_t s02 = src0->nb[2] / ts_src0; const int64_t s12 = src1->nb[2] / ts_src1; const int64_t s2 = dst->nb[2] / ts_dst; const int64_t s03 = src0->nb[3] / ts_src0; const int64_t s13 = src1->nb[3] / ts_src1; const int64_t s3 = dst->nb[3] / ts_dst; const int64_t ids_s0 = ids ? ids->nb[0] / ggml_type_size(ids->type) : 0; const int64_t ids_s1 = ids ? ids->nb[1] / ggml_type_size(ids->type) : 0; mmf_ids_data ids_info{}; mmf_ids_data * ids_info_ptr = nullptr; ggml_cuda_pool_alloc ids_src_compact_dev; ggml_cuda_pool_alloc ids_dst_compact_dev; ggml_cuda_pool_alloc expert_bounds_dev; // For MUL_MAT_ID the memory layout is different than for MUL_MAT: const int64_t ncols_dst = ids ? ne2 : ne1; const int64_t nchannels_dst = ids ? ne1 : ne2; const int64_t stride_col_dst = ids ? s2 : s1; const int64_t stride_col_y = ids ? s12 : s11; const int64_t stride_channel_dst = ids ? s1 : s2; int64_t stride_channel_y = ids ? s11 : s12; int64_t nchannels_y = ids ? ne11 : ne12; //mul_mat_id: handle broadcast if (ids && nchannels_y == 1) { stride_channel_y = 0; nchannels_y = ids->ne[0]; } if (ids && ncols_dst > 16) { const int64_t n_expert_used = ids->ne[0]; const int64_t n_experts = ne02; const int64_t n_tokens = ne12; const int64_t ne_get_rows = n_tokens * n_expert_used; ids_src_compact_dev.alloc(ctx.pool(), ne_get_rows); ids_dst_compact_dev.alloc(ctx.pool(), ne_get_rows); expert_bounds_dev.alloc(ctx.pool(), n_experts + 1); const int si1 = static_cast(ids_s1); const int sis1 = static_cast(src1->nb[2] / src1->nb[1]); GGML_ASSERT(sis1 > 0); ggml_cuda_launch_mm_ids_helper(ids_d, ids_src_compact_dev.get(), ids_dst_compact_dev.get(), expert_bounds_dev.get(), static_cast(n_experts), static_cast(n_tokens), static_cast(n_expert_used), static_cast(ne11), si1, sis1, ctx.stream()); CUDA_CHECK(cudaGetLastError()); ids_info.ids_src_compact = ids_src_compact_dev.get(); ids_info.ids_dst_compact = ids_dst_compact_dev.get(); ids_info.expert_bounds_dev = expert_bounds_dev.get(); ids_info.n_experts = static_cast(n_experts); ids_info.sis1 = sis1; ids_info_ptr = &ids_info; } switch (src0->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0->data; constexpr int vals_per_T = 1; mul_mat_f_switch_cols_per_block( src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, stride_col_y/vals_per_T, stride_col_dst, ids_s0, ids_s1, ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream(), ids_info_ptr); } break; case GGML_TYPE_F16: { const half2 * src0_d = (const half2 *) src0->data; constexpr int vals_per_T = 2; mul_mat_f_switch_cols_per_block( src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, stride_col_y/vals_per_T, stride_col_dst, ids_s0, ids_s1, ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream(), ids_info_ptr); } break; case GGML_TYPE_BF16: { const nv_bfloat162 * src0_d = (const nv_bfloat162 *) src0->data; constexpr int vals_per_T = 2; mul_mat_f_switch_cols_per_block( src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, stride_col_y/vals_per_T, stride_col_dst, ids_s0, ids_s1, ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream(), ids_info_ptr); } break; default: GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); } } bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * src0_ne, const size_t * src0_nb, const int src1_ncols, bool mul_mat_id) { if (ggml_is_quantized(type)) { return false; } const size_t ts = ggml_type_size(type); if (src0_ne[0] % (warp_size * (4/ts)) != 0) { return false; } if (src0_nb[0] != ts) { return false; } // Pointers not aligned to the size of half2/nv_bfloat162/float2 would result in a crash: for (size_t i = 1; i < GGML_MAX_DIMS; ++i) { if (src0_nb[i] % (2*ts) != 0) { return false; } } if (src0_ne[1] % MMF_ROWS_PER_BLOCK != 0) { return false; } if (mul_mat_id) { if (src0_ne[1] <= 1024 && src1_ncols > 512) { return false; } else if(src0_ne[1] > 1024 && src1_ncols > 128) { return false; } } else { if (GGML_CUDA_CC_IS_RDNA3_0(cc) && src1_ncols > 8) { return false; } else if (src1_ncols > 16) { return false; } } switch (type) { case GGML_TYPE_F32: return ampere_mma_available(cc); case GGML_TYPE_F16: return volta_mma_available(cc) || turing_mma_available(cc) || amd_wmma_available(cc); case GGML_TYPE_BF16: return ampere_mma_available(cc) || amd_wmma_available(cc); default: return false; } } ggml-org-ggml-3678254/src/ggml-cuda/mmf.cuh000066400000000000000000001165641512524704700203100ustar00rootroot00000000000000#pragma once #include "mma.cuh" #include "common.cuh" #include "convert.cuh" using namespace ggml_cuda_mma; #define MMF_ROWS_PER_BLOCK 32 struct mmf_ids_data { const int32_t * ids_src_compact = nullptr; const int32_t * ids_dst_compact = nullptr; const int32_t * expert_bounds_dev = nullptr; int n_experts = 0; int sis1 = 0; }; void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * scr0_ne, const size_t * src0_nb, const int src1_ncols, bool mul_mat_id); template __launch_bounds__(ggml_cuda_get_physical_warp_size()*nwarps, 1) static __global__ void mul_mat_f( const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, const int ncols, const int ncols_dst_total, const int nchannels_dst, const int stride_row, const int stride_col_y, const int stride_col_dst, const int stride_col_id, const int stride_row_id, const int channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const int sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst) { // TODO: handle this in a consistent and simpler way after AMD MFMA support has been added #if (!defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA)) || defined(AMD_WMMA_AVAILABLE) #if defined(AMD_WMMA_AVAILABLE) // Special case for tf32, just dummy mma layout as wmma doesn't support it. constexpr bool is_tf32 = std::is_same_v; constexpr int tile_B_I = is_tf32 ? 8 : 16; constexpr int tile_C_J = is_tf32 ? 8 : 16; constexpr data_layout ab_layout = is_tf32 ? DATA_LAYOUT_I_MAJOR : get_input_data_layout(); typedef tile<16, 8, T, ab_layout> tile_A; typedef tile tile_B; typedef tile<16, tile_C_J, float, DATA_LAYOUT_J_MAJOR> tile_C; #else #ifdef VOLTA_MMA_AVAILABLE if constexpr (!std::is_same_v) {NO_DEVICE_CODE;} else { typedef tile<32, 4, T, DATA_LAYOUT_I_MAJOR> tile_A; typedef tile< 8, 4, T, DATA_LAYOUT_I_MAJOR_MIRRORED> tile_B; typedef tile<32, 8, float, DATA_LAYOUT_I_MAJOR> tile_C; #else typedef tile<16, 8, T> tile_A; typedef tile<8, 8, T> tile_B; typedef tile<16, 8, float> tile_C; #endif // VOLTA_MMA_AVAILABLE #endif // defined(AMD_WMMA_AVAILABLE) if constexpr (!tile_A::supported() || !tile_B::supported() || !tile_C::supported()) { NO_DEVICE_CODE; return; } constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr int tile_k_padded = warp_size + 4; constexpr int ntA = rows_per_block / tile_A::I; constexpr int ntB = (cols_per_block + tile_B::I - 1) / tile_B::I; const int row0 = blockIdx.x * rows_per_block; int expert_idx = 0; int col_base = 0; const int channel_dst = has_ids ? 0 : blockIdx.y; if constexpr (has_ids) { // experts + tiles of ncols_dst are packed in the y dimension int col_tiles = (ncols_dst_total + cols_per_block - 1) / cols_per_block; const int nchannels_x = gridDim.y / col_tiles; const int tile_idx = blockIdx.y / nchannels_x; expert_idx = blockIdx.y - tile_idx * nchannels_x; col_base = tile_idx * cols_per_block; } const int channel_x = has_ids ? expert_idx : (channel_dst / channel_ratio); const int channel_y = channel_dst; const int sample_dst = blockIdx.z; const int sample_x = sample_dst / sample_ratio; const int sample_y = sample_dst; x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row0*stride_row ; y += int64_t(sample_y) *stride_sample_y + (has_ids ? 0 : channel_y *stride_channel_y); dst += int64_t(sample_dst)*stride_sample_dst + (has_ids ? 0 : channel_dst*stride_channel_dst); if constexpr (has_ids) { constexpr int y_stride_scale = std::is_same_v ? 1 : 2; const int64_t col_offset = col_base; y += col_offset * stride_col_y * y_stride_scale; dst += col_offset * stride_col_dst; ids += col_offset * stride_row_id; } const float2 * y2 = (const float2 *) y; extern __shared__ char data_mmv[]; char * shmem_base = data_mmv; int * slot_map = (int *) shmem_base; char * compute_base = has_ids ? (shmem_base + GGML_PAD(cols_per_block, 16) * sizeof(int)) : shmem_base; tile_C C[ntA][ntB]; T * tile_xy = (T *) compute_base + threadIdx.y*(tile_A::I * tile_k_padded); if constexpr (has_ids) { int found = 0; for (int j0 = 0; j0 < cols_per_block; j0 += nwarps) { const int j = j0 + threadIdx.y; if (threadIdx.x == 0) { slot_map[j] = -1; } if (col_base + j >= ncols_dst_total) { continue; } const int32_t * __restrict__ id_row = ids + j*stride_row_id; for (int k = threadIdx.x; k < nchannels_dst; k += warp_size) { int match = id_row[k*stride_col_id] == expert_idx; if (match) { slot_map[j] = k; found = 1; break; } } } if (!__syncthreads_or(found)) { return; } } for (int col = threadIdx.y*warp_size + threadIdx.x; col < ncols; col += nwarps*warp_size) { tile_A A[ntA][warp_size / tile_A::J]; #pragma unroll for (int itA = 0; itA < ntA; ++itA) { #pragma unroll for (int i = 0; i < tile_A::I; ++i) { tile_xy[i*tile_k_padded + threadIdx.x] = x[(itA*tile_A::I + i)*stride_row + col]; } #pragma unroll for (int k0 = 0; k0 < warp_size; k0 += tile_A::J) { load_ldmatrix(A[itA][k0/tile_A::J], tile_xy + k0, tile_k_padded); } } #pragma unroll for (int itB = 0; itB < ntB; ++itB) { if constexpr (std::is_same_v) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { const int j = j0 + itB*tile_B::I; if constexpr (!has_ids) { tile_xy[j0*tile_k_padded + threadIdx.x] = j < cols_per_block ? y[j*stride_col_y + col] : 0.0f; } else { const bool valid = j < cols_per_block && (col_base + j) < ncols_dst_total && slot_map[j] >= 0; tile_xy[j0*tile_k_padded + threadIdx.x] = valid ? y[slot_map[j]*stride_channel_y + j*stride_col_y + col] : 0.0f; } } } else if constexpr (std::is_same_v || std::is_same_v) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { const int j = j0 + itB*tile_B::I; if constexpr (!has_ids) { const float2 tmp = j < cols_per_block ? y2[j*stride_col_y + col] : make_float2(0.0f, 0.0f); tile_xy[j0*tile_k_padded + threadIdx.x] = ggml_cuda_cast(tmp); } else { const bool valid = j < cols_per_block && (col_base + j) < ncols_dst_total && slot_map[j] >= 0; float2 tmp = valid ? *(const float2*) &y[slot_map[j]*stride_channel_y + 2*(j*stride_col_y + col)] : make_float2(0.0f, 0.0f); tile_xy[j0*tile_k_padded + threadIdx.x] = ggml_cuda_cast(tmp); } } } else { static_assert(std::is_same_v, "unsupported type"); } #pragma unroll for (int k0 = 0; k0 < warp_size; k0 += tile_B::J) { tile_B B; load_ldmatrix(B, tile_xy + k0, tile_k_padded); #pragma unroll for (int itA = 0; itA < ntA; ++itA) { mma(C[itA][itB], A[itA][k0/tile_B::J], B); } } } } float * buf_iw = (float *) compute_base; constexpr int kiw = nwarps*rows_per_block + 4; if (nwarps > 1) { __syncthreads(); } #pragma unroll for (int itB = 0; itB < ntB; ++itB) { #pragma unroll for (int itA = 0; itA < ntA; ++itA) { #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = threadIdx.y*rows_per_block + itA*tile_C::I + tile_C::get_i(l); const int j = itB*tile_C::J + tile_C::get_j(l); buf_iw[j*kiw + i] = C[itA][itB].x[l]; } } } if (nwarps > 1) { __syncthreads(); } #pragma unroll for (int j0 = 0; j0 < cols_per_block; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j0 + nwarps > cols_per_block && j >= cols_per_block) { return; } float sum = 0.0f; static_assert(rows_per_block == warp_size, "need loop/check"); #pragma unroll for (int i0 = 0; i0 < nwarps*rows_per_block; i0 += rows_per_block) { const int i = i0 + threadIdx.x; sum += buf_iw[j*kiw + i]; } if constexpr (!has_ids) { dst[j*stride_col_dst + row0 + threadIdx.x] = sum; } else { const int slot = (j < cols_per_block) ? slot_map[j] : -1; if (slot >= 0 && (col_base + j) < ncols_dst_total) { dst[slot*stride_channel_dst + j*stride_col_dst + row0 + threadIdx.x] = sum; } } } #ifdef VOLTA_MMA_AVAILABLE } #endif //VOLTA_MMA_AVAILABLE #else GGML_UNUSED_VARS(x, y, ids, dst, ncols, ncols_dst_total, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); NO_DEVICE_CODE; #endif // (!defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA)) || defined(AMD_WMMA_AVAILABLE) } //This kernel is for larger batch sizes of mul_mat_id template __launch_bounds__(ggml_cuda_get_physical_warp_size()*nwarps, 1) static __global__ void mul_mat_f_ids( const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids_src_compact, const int32_t * __restrict__ ids_dst_compact, const int32_t * __restrict__ expert_bounds, float * __restrict__ dst, const int ncols, const int ncols_dst_total, const int nchannels_dst, const int stride_row, const int stride_col_y, const int stride_col_dst, const int channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const int sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst, const uint3 sis1_fd, const uint3 nch_fd) { // TODO: handle this in a consistent and simpler way after AMD MFMA support has been added #if (!defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA)) || defined(AMD_WMMA_AVAILABLE) #if defined(AMD_WMMA_AVAILABLE) // Special case for tf32, just dummy mma layout as wmma doesn't support it. constexpr bool is_tf32 = std::is_same_v; constexpr int tile_B_I = is_tf32 ? 8 : 16; constexpr int tile_C_J = is_tf32 ? 8 : 16; constexpr data_layout ab_layout = is_tf32 ? DATA_LAYOUT_I_MAJOR : get_input_data_layout(); typedef tile<16, 8, T, ab_layout> tile_A; typedef tile tile_B; typedef tile<16, tile_C_J, float, DATA_LAYOUT_J_MAJOR> tile_C; #else #ifdef VOLTA_MMA_AVAILABLE if constexpr (!std::is_same_v) {NO_DEVICE_CODE;} else { typedef tile<32, 4, T, DATA_LAYOUT_I_MAJOR> tile_A; typedef tile< 8, 4, T, DATA_LAYOUT_I_MAJOR_MIRRORED> tile_B; typedef tile<32, 8, float, DATA_LAYOUT_I_MAJOR> tile_C; #else typedef tile<16, 8, T> tile_A; typedef tile<8, 8, T> tile_B; typedef tile<16, 8, float> tile_C; #endif // VOLTA_MMA_AVAILABLE #endif // defined(AMD_WMMA_AVAILABLE) if constexpr (!tile_A::supported() || !tile_B::supported() || !tile_C::supported()) { NO_DEVICE_CODE; return; } constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr int tile_k_padded = warp_size + 4; constexpr int ntA = rows_per_block / tile_A::I; constexpr int ntB = (cols_per_block + tile_B::I - 1) / tile_B::I; const int row0 = blockIdx.x * rows_per_block; const int expert_idx = blockIdx.y; const int expert_start = expert_bounds[expert_idx]; const int expert_end = expert_bounds[expert_idx + 1]; const int ncols_expert = expert_end - expert_start; const int tiles_for_expert = (ncols_expert + cols_per_block - 1) / cols_per_block; const int tile_idx = blockIdx.z; if (tile_idx >= tiles_for_expert) { return; } const int col_base = tile_idx * cols_per_block; GGML_UNUSED(channel_ratio); const int channel_x = expert_idx; const int sample_dst = 0; const int sample_x = sample_dst / sample_ratio; const int sample_y = sample_dst; x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row0*stride_row; y += int64_t(sample_y) *stride_sample_y; dst += int64_t(sample_dst)*stride_sample_dst; const int32_t * ids_src_expert = ids_src_compact + expert_start; const int32_t * ids_dst_expert = ids_dst_compact + expert_start; extern __shared__ char data_mmv[]; char * compute_base = data_mmv; //const float2 * y2 = (const float2 *) y; tile_C C[ntA][ntB]; T * tile_xy = (T *) compute_base + threadIdx.y*(tile_A::I * tile_k_padded); for (int col = threadIdx.y*warp_size + threadIdx.x; col < ncols; col += nwarps*warp_size) { tile_A A[ntA][warp_size / tile_A::J]; #pragma unroll for (int itA = 0; itA < ntA; ++itA) { #pragma unroll for (int i = 0; i < tile_A::I; ++i) { tile_xy[i*tile_k_padded + threadIdx.x] = x[(itA*tile_A::I + i)*stride_row + col]; } #pragma unroll for (int k0 = 0; k0 < warp_size; k0 += tile_A::J) { load_ldmatrix(A[itA][k0/tile_A::J], tile_xy + k0, tile_k_padded); } } if constexpr (std::is_same_v) { float vals_buf[2][tile_B::I]; auto gather_tile = [&](int tile_idx_local, float *vals) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { const int j = j0 + tile_idx_local*tile_B::I; const int global_j = col_base + j; float val = 0.0f; if (j < cols_per_block && global_j < ncols_expert) { const int src_entry = ids_src_expert[global_j]; const uint2 qrm = fast_div_modulo((uint32_t) src_entry, sis1_fd); const int token = (int) qrm.x; const int channel = (int) qrm.y; if (token < ncols_dst_total) { val = y[channel*stride_channel_y + token*stride_col_y + col]; } } vals[j0] = val; } }; gather_tile(0, vals_buf[0]); int curr_buf = 0; int next_buf = 1; #pragma unroll for (int itB = 0; itB < ntB; ++itB) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { tile_xy[j0*tile_k_padded + threadIdx.x] = vals_buf[curr_buf][j0]; } if (itB + 1 < ntB) { gather_tile(itB + 1, vals_buf[next_buf]); } #pragma unroll for (int k0 = 0; k0 < warp_size; k0 += tile_B::J) { tile_B B; load_ldmatrix(B, tile_xy + k0, tile_k_padded); #pragma unroll for (int itA = 0; itA < ntA; ++itA) { mma(C[itA][itB], A[itA][k0/tile_B::J], B); } } if (itB + 1 < ntB) { curr_buf ^= 1; next_buf ^= 1; } } } else if constexpr (std::is_same_v || std::is_same_v) { float2 vals_buf[2][tile_B::I]; auto gather_tile = [&](int tile_idx_local, float2 *vals) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { const int j = j0 + tile_idx_local*tile_B::I; const int global_j = col_base + j; float2 tmp = make_float2(0.0f, 0.0f); if (j < cols_per_block && global_j < ncols_expert) { const int src_entry = ids_src_expert[global_j]; const uint2 qrm = fast_div_modulo((uint32_t) src_entry, sis1_fd); const int token = (int) qrm.x; const int channel = (int) qrm.y; if (token < ncols_dst_total) { tmp = *(const float2*) &y[channel*stride_channel_y + 2*(token*stride_col_y + col)]; } } vals[j0] = tmp; } }; if (ntB > 0) { gather_tile(0, vals_buf[0]); } int curr_buf = 0; int next_buf = 1; #pragma unroll for (int itB = 0; itB < ntB; ++itB) { #pragma unroll for (int j0 = 0; j0 < tile_B::I; ++j0) { const float2 tmp = vals_buf[curr_buf][j0]; tile_xy[j0*tile_k_padded + threadIdx.x] = ggml_cuda_cast(tmp); } if (itB + 1 < ntB) { gather_tile(itB + 1, vals_buf[next_buf]); } #pragma unroll for (int k0 = 0; k0 < warp_size; k0 += tile_B::J) { tile_B B; load_ldmatrix(B, tile_xy + k0, tile_k_padded); #pragma unroll for (int itA = 0; itA < ntA; ++itA) { mma(C[itA][itB], A[itA][k0/tile_B::J], B); } } if (itB + 1 < ntB) { curr_buf ^= 1; next_buf ^= 1; } } } else { static_assert(std::is_same_v, "unsupported type"); } } float * buf_iw = (float *) compute_base; constexpr int kiw = nwarps*rows_per_block + 4; if (nwarps > 1) { __syncthreads(); } #pragma unroll for (int itB = 0; itB < ntB; ++itB) { #pragma unroll for (int itA = 0; itA < ntA; ++itA) { #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = threadIdx.y*rows_per_block + itA*tile_C::I + tile_C::get_i(l); const int j = itB*tile_C::J + tile_C::get_j(l); buf_iw[j*kiw + i] = C[itA][itB].x[l]; } } } if (nwarps > 1) { __syncthreads(); } #pragma unroll for (int j0 = 0; j0 < cols_per_block; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j0 + nwarps > cols_per_block && j >= cols_per_block) { return; } float sum = 0.0f; static_assert(rows_per_block == warp_size, "need loop/check"); #pragma unroll for (int i0 = 0; i0 < nwarps*rows_per_block; i0 += rows_per_block) { const int i = i0 + threadIdx.x; sum += buf_iw[j*kiw + i]; } const int global_j = col_base + j; if (j < cols_per_block && global_j < ncols_expert && nchannels_dst > 0) { const int dst_entry = ids_dst_expert[global_j]; const uint2 qrm = fast_div_modulo((uint32_t) dst_entry, nch_fd); const int token = (int) qrm.x; if (token < ncols_dst_total) { const int slot = (int) qrm.y; dst[slot*stride_channel_dst + token*stride_col_dst + row0 + threadIdx.x] = sum; } } } #ifdef VOLTA_MMA_AVAILABLE } #endif // VOLTA_MMA_AVAILABLE #else GGML_UNUSED_VARS(x, y, ids_src_compact, ids_dst_compact, expert_bounds, dst, ncols, ncols_dst_total, nchannels_dst, stride_row, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, sis1_fd, nch_fd); NO_DEVICE_CODE; #endif // (!defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA)) || defined(AMD_WMMA_AVAILABLE) } template static inline void mul_mat_f_switch_ids( const T * x, const float * y, const int32_t * ids, float * dst, const int64_t ncols_x, const int64_t ncols_dst, const int64_t nchannels_dst, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const int64_t stride_col_id, const int64_t stride_row_id, const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t sample_ratio, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, const dim3 & block_nums, const dim3 & block_dims, const int nbytes_shared_total, cudaStream_t stream, const mmf_ids_data * ids_data) { const bool has_ids_data = ids_data && ids_data->ids_src_compact; // Use the compact-ids kernel only for larger tiles; for small ncols_dst (< 16) // we prefer the normal mul_mat_f path with has_ids=true. if (has_ids_data && ncols_dst > 16) { const int max_tiles = (int) ((ncols_dst + cols_per_block - 1) / cols_per_block); if (max_tiles == 0) { return; } dim3 block_nums_ids(block_nums.x, ids_data->n_experts, max_tiles); const uint3 sis1_fd = ids_data->sis1 > 0 ? init_fastdiv_values((uint32_t) ids_data->sis1) : make_uint3(0, 0, 1); const uint3 nch_fd = init_fastdiv_values((uint32_t) nchannels_dst); mul_mat_f_ids<<>> (x, y, ids_data->ids_src_compact, ids_data->ids_dst_compact, ids_data->expert_bounds_dev, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, sis1_fd, nch_fd); } else if (ids) { const int64_t col_tiles = (ncols_dst + cols_per_block - 1) / cols_per_block; dim3 block_nums_ids = block_nums; block_nums_ids.y *= col_tiles; mul_mat_f<<>> (x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } else { mul_mat_f<<>> (x, y, ids, dst, ncols_x, cols_per_block, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } } template void mul_mat_f_cuda( const T * x, const float * y, const int32_t * ids, float * dst, const int64_t ncols_x, const int64_t nrows_x, const int64_t ncols_dst, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const int64_t stride_col_id, const int64_t stride_row_id, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, cudaStream_t stream, const mmf_ids_data * ids_data) { typedef tile<16, 8, T> tile_A_16; typedef tile<32, 8, T> tile_A_32; typedef tile<16, 8, T> tile_B_16; typedef tile< 8, 8, T> tile_B_8; GGML_ASSERT(ncols_x % 2 == 0); GGML_ASSERT(stride_row % 2 == 0); GGML_ASSERT(stride_col_y % 2 == 0); GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); GGML_ASSERT( nsamples_dst % nsamples_x == 0); const int64_t channel_ratio = nchannels_dst / nchannels_x; const int64_t sample_ratio = nsamples_dst / nsamples_x; const int device = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[device].cc; const int warp_size = ggml_cuda_info().devices[device].warp_size; int64_t nwarps_best = 1; int64_t niter_best = (ncols_x + warp_size*2 - 1) / (warp_size*2); int64_t max_block_size = 256; for (int64_t nwarps = 2; nwarps <= max_block_size/warp_size; nwarps++) { const int64_t niter = (ncols_x + nwarps*warp_size*2 - 1) / (nwarps*warp_size*2); if (niter < niter_best) { niter_best = niter; nwarps_best = nwarps; } } constexpr int rows_per_block = MMF_ROWS_PER_BLOCK; const int nbytes_shared_iter = nwarps_best * (volta_mma_available(cc) ? tile_A_32::I : tile_A_16::I) * (warp_size + 4) * 4; const int nbytes_cols_per_block_pad = amd_wmma_available(cc) ? tile_B_16::I : tile_B_8::I; const int nbytes_shared_combine = GGML_PAD(cols_per_block, nbytes_cols_per_block_pad) * (nwarps_best*rows_per_block + 4) * 4; const int nbytes_shared = std::max(nbytes_shared_iter, nbytes_shared_combine); const int nbytes_slotmap = ids ? GGML_PAD(cols_per_block, 16) * sizeof(int) : 0; const int nbytes_shared_total = nbytes_shared + nbytes_slotmap; const int64_t grid_y = ids ? nchannels_x : nchannels_dst; const dim3 block_nums(nrows_x/rows_per_block, grid_y, nsamples_dst); const dim3 block_dims(warp_size, nwarps_best, 1); switch (nwarps_best) { case 1: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 2: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 3: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 4: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 5: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 6: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 7: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; case 8: { mul_mat_f_switch_ids( x, y, ids, dst, ncols_x, ncols_dst, nchannels_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst, block_nums, block_dims, nbytes_shared_total, stream, ids_data); } break; default: { GGML_ABORT("fatal error"); } break; } GGML_UNUSED_VARS(nchannels_y); } template static void mul_mat_f_switch_cols_per_block( const T * x, const float * y, const int32_t * ids, float * dst, const int64_t ncols_x, const int64_t nrows_x, const int64_t ncols_dst, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const int64_t stride_col_id, const int stride_row_id, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, cudaStream_t stream, const mmf_ids_data * ids_data) { const int ncols_case = (ids && ncols_dst > 16) ? 16 : ncols_dst; GGML_ASSERT(ids || ncols_dst <= 16); switch (ncols_case) { case 1: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 2: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 3: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 4: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 5: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 6: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 7: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 8: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 9: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 10: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 11: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 12: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 13: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 14: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 15: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; case 16: { mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row, stride_col_y, stride_col_dst, stride_col_id, stride_row_id, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream, ids_data); } break; default: { GGML_ABORT("fatal error"); } break; } } #define DECL_MMF_CASE_HELPER(T, ncols_dst) \ template void mul_mat_f_cuda( \ const T * x, const float * y, const int32_t * ids, float * dst, \ const int64_t ncols_x, const int64_t nrows_x, int64_t ncols_dst_total, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, \ const int64_t stride_col_id, const int64_t stride_row_id, \ const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, \ const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x,\ const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, \ cudaStream_t stream, const mmf_ids_data * ids_data); #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) #define DECL_MMF_CASE_EXTERN(ncols_dst) \ extern DECL_MMF_CASE_HELPER(float, ncols_dst) \ extern DECL_MMF_CASE_HELPER(half2, ncols_dst) \ extern DECL_MMF_CASE_HELPER(nv_bfloat162, ncols_dst) #define DECL_MMF_CASE(ncols_dst) \ DECL_MMF_CASE_HELPER(float, ncols_dst) \ DECL_MMF_CASE_HELPER(half2, ncols_dst) \ DECL_MMF_CASE_HELPER(nv_bfloat162, ncols_dst) DECL_MMF_CASE_EXTERN(1); DECL_MMF_CASE_EXTERN(2); DECL_MMF_CASE_EXTERN(3); DECL_MMF_CASE_EXTERN(4); DECL_MMF_CASE_EXTERN(5); DECL_MMF_CASE_EXTERN(6); DECL_MMF_CASE_EXTERN(7); DECL_MMF_CASE_EXTERN(8); DECL_MMF_CASE_EXTERN(9); DECL_MMF_CASE_EXTERN(10); DECL_MMF_CASE_EXTERN(11); DECL_MMF_CASE_EXTERN(12); DECL_MMF_CASE_EXTERN(13); DECL_MMF_CASE_EXTERN(14); DECL_MMF_CASE_EXTERN(15); DECL_MMF_CASE_EXTERN(16); #else #define DECL_MMF_CASE(ncols_dst) #endif ggml-org-ggml-3678254/src/ggml-cuda/mmid.cu000066400000000000000000000174221512524704700203000ustar00rootroot00000000000000#include "common.cuh" #include "mmid.cuh" // To reduce shared memory use, store "it" and "iex_used" with 22/10 bits each. struct mm_ids_helper_store { uint32_t data; __device__ mm_ids_helper_store(const uint32_t it, const uint32_t iex_used) { data = (it & 0x003FFFFF) | (iex_used << 22); } __device__ uint32_t it() const { return data & 0x003FFFFF; } __device__ uint32_t iex_used() const { return data >> 22; } }; static_assert(sizeof(mm_ids_helper_store) == 4, "unexpected size for mm_ids_helper_store"); // Helper function for mul_mat_id, converts ids to a more convenient format. // ids_src1 describes how to permute the flattened column indices of src1 in order to get a compact src1 tensor sorted by expert. // ids_dst describes the same mapping but for the dst tensor. // The upper and lower bounds for the ith expert in the compact src1 tensor are stored in expert_bounds[i:i+1]. template __launch_bounds__(ggml_cuda_get_physical_warp_size(), 1) static __global__ void mm_ids_helper( const int32_t * __restrict__ ids, int32_t * __restrict__ ids_src1, int32_t * __restrict__ ids_dst, int32_t * __restrict__ expert_bounds, const int n_tokens, const int n_expert_used_var, const int nchannels_y, const int si1, const int sis1) { constexpr int warp_size = ggml_cuda_get_physical_warp_size(); const int n_expert_used = n_expert_used_template == 0 ? n_expert_used_var : n_expert_used_template; const int expert = blockIdx.x; extern __shared__ char data_mm_ids_helper[]; mm_ids_helper_store * store = (mm_ids_helper_store *) data_mm_ids_helper; int nex_prev = 0; // Number of columns for experts with a lower index. int it_compact = 0; // Running index for the compact slice of this expert. if constexpr (n_expert_used_template == 0) { // Generic implementation: for (int it = 0; it < n_tokens; ++it) { int iex_used = -1; // The index at which the expert is used, if any. for (int iex = threadIdx.x; iex < n_expert_used; iex += warp_size) { const int expert_used = ids[it*si1 + iex]; nex_prev += expert_used < expert; if (expert_used == expert) { iex_used = iex; } } if (iex_used != -1) { store[it_compact] = mm_ids_helper_store(it, iex_used); } if (warp_reduce_any(iex_used != -1)) { it_compact++; } } } else { // Implementation optimized for specific numbers of experts used: static_assert(n_expert_used == 6 || warp_size % n_expert_used == 0, "bad n_expert_used"); const int neu_padded = n_expert_used == 6 ? 8 : n_expert_used; // Padded to next higher power of 2. for (int it0 = 0; it0 < n_tokens; it0 += warp_size/neu_padded) { const int it = it0 + threadIdx.x / neu_padded; const int iex = threadIdx.x % neu_padded; // The index at which the expert is used, if any. const int expert_used = (neu_padded == n_expert_used || iex < n_expert_used) && it < n_tokens ? ids[it*si1 + iex] : INT_MAX; const int iex_used = expert_used == expert ? iex : -1; nex_prev += expert_used < expert; // Whether the threads at this token position have used the expert: const int it_compact_add_self = warp_reduce_any(iex_used != -1); // Do a scan over threads at lower token positions in warp to get the correct index for writing data: int it_compact_add_lower = 0; #pragma unroll for (int offset = neu_padded; offset < warp_size; offset += neu_padded) { const int tmp = __shfl_up_sync(0xFFFFFFFF, it_compact_add_self, offset, warp_size); if (threadIdx.x >= static_cast(offset)) { it_compact_add_lower += tmp; } } if (iex_used != -1) { store[it_compact + it_compact_add_lower] = mm_ids_helper_store(it, iex_used); } // The thread with the highest index in the warp always has the sum over the whole warp, use it to increment all threads: it_compact += __shfl_sync(0xFFFFFFFF, it_compact_add_lower + it_compact_add_self, warp_size - 1, warp_size); } } nex_prev = warp_reduce_sum(nex_prev); for (int itc = threadIdx.x; itc < it_compact; itc += warp_size) { const mm_ids_helper_store store_it = store[itc]; const int it = store_it.it(); const int iex_used = store_it.iex_used(); ids_src1[nex_prev + itc] = it*sis1 + iex_used % nchannels_y; ids_dst [nex_prev + itc] = it*n_expert_used + iex_used; } if (threadIdx.x != 0) { return; } expert_bounds[expert] = nex_prev; if (expert < static_cast(gridDim.x) - 1) { return; } expert_bounds[gridDim.x] = nex_prev + it_compact; } template static void launch_mm_ids_helper( const int32_t * __restrict__ ids, int32_t * __restrict__ ids_src1, int32_t * __restrict__ ids_dst, int32_t * __restrict__ expert_bounds, const int n_experts, const int n_tokens, const int n_expert_used_var, const int nchannels_y, const int si1, const int sis1, cudaStream_t stream) { GGML_ASSERT(n_tokens < (1 << 22) && "too few bits in mm_ids_helper_store"); GGML_ASSERT(n_expert_used_var < (1 << 10) && "too few bits in mm_ids_helper_store"); const int id = ggml_cuda_get_device(); const int warp_size = ggml_cuda_info().devices[id].warp_size; const size_t smpbo = ggml_cuda_info().devices[id].smpbo; CUDA_SET_SHARED_MEMORY_LIMIT(mm_ids_helper, smpbo); const dim3 num_blocks(n_experts, 1, 1); const dim3 block_size(warp_size, 1, 1); const size_t nbytes_shared = n_tokens*sizeof(mm_ids_helper_store); GGML_ASSERT(nbytes_shared <= smpbo); mm_ids_helper<<>> (ids, ids_src1, ids_dst, expert_bounds, n_tokens, n_expert_used_var, nchannels_y, si1, sis1); } void ggml_cuda_launch_mm_ids_helper( const int32_t * __restrict__ ids, int32_t * __restrict__ ids_src1, int32_t * __restrict__ ids_dst, int32_t * __restrict__ expert_bounds, const int n_experts, const int n_tokens, const int n_expert_used, const int nchannels_y, const int si1, const int sis1, cudaStream_t stream) { switch (n_expert_used) { case 2: launch_mm_ids_helper< 2>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; case 4: launch_mm_ids_helper< 4>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; case 6: launch_mm_ids_helper< 6>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; case 8: launch_mm_ids_helper< 8>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; case 16: launch_mm_ids_helper<16>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; case 32: launch_mm_ids_helper<32>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; default: launch_mm_ids_helper< 0>(ids, ids_src1, ids_dst, expert_bounds, n_experts, n_tokens, n_expert_used, nchannels_y, si1, sis1, stream); break; } } ggml-org-ggml-3678254/src/ggml-cuda/mmid.cuh000066400000000000000000000004021512524704700204360ustar00rootroot00000000000000#pragma once void ggml_cuda_launch_mm_ids_helper( const int32_t * ids, int32_t * ids_src1, int32_t * ids_dst, int32_t * expert_bounds, int n_experts, int n_tokens, int n_expert_used, int nchannels_y, int si1, int sis1, cudaStream_t stream); ggml-org-ggml-3678254/src/ggml-cuda/mmq.cu000066400000000000000000000317421512524704700201450ustar00rootroot00000000000000#include "common.cuh" #include "mmq.cuh" #include "quantize.cuh" #include "mmid.cuh" static void ggml_cuda_mul_mat_q_switch_type(ggml_backend_cuda_context & ctx, const mmq_args & args, cudaStream_t stream) { switch (args.type_x) { case GGML_TYPE_Q4_0: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q4_1: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q5_0: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q5_1: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q8_0: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_MXFP4: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q2_K: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q3_K: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q4_K: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q5_K: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_Q6_K: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ2_XXS: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ2_XS: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ2_S: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ3_XXS: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ3_S: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ1_S: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ4_XS: mul_mat_q_case(ctx, args, stream); break; case GGML_TYPE_IQ4_NL: mul_mat_q_case(ctx, args, stream); break; default: GGML_ABORT("fatal error"); break; } } void ggml_cuda_mul_mat_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { GGML_ASSERT( src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); // Optional, used for batched GGML_MUL_MAT_ID. GGML_TENSOR_BINARY_OP_LOCALS; cudaStream_t stream = ctx.stream(); const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const size_t ts_src0 = ggml_type_size(src0->type); const size_t ts_src1 = ggml_type_size(src1->type); const size_t ts_dst = ggml_type_size(dst->type); GGML_ASSERT( nb00 == ts_src0); GGML_ASSERT( nb10 == ts_src1); GGML_ASSERT( nb0 == ts_dst); GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); const char * src0_d = (const char *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; // If src0 is a temporary compute buffer, clear any potential padding. if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) { const size_t size_data = ggml_nbytes(src0); const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0); if (size_alloc > size_data) { GGML_ASSERT(ggml_is_contiguously_allocated(src0)); GGML_ASSERT(!src0->view_src); CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream)); } } const int64_t ne10_padded = GGML_PAD(ne10, MATRIX_ROW_PADDING); const int64_t s01 = src0->nb[1] / ts_src0; const int64_t s1 = dst->nb[1] / ts_dst; const int64_t s02 = src0->nb[2] / ts_src0; const int64_t s2 = dst->nb[2] / ts_dst; const int64_t s03 = src0->nb[3] / ts_src0; const int64_t s3 = dst->nb[3] / ts_dst; const bool use_stream_k = (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_CDNA(cc); // TODO: tighter pool buffer size vs q8 path const bool use_native_mxfp4 = blackwell_mma_available(cc) && src0->type == GGML_TYPE_MXFP4; if (!ids) { const size_t nbytes_src1_q8_1 = ne13*ne12 * ne11*ne10_padded * sizeof(block_q8_1)/QK8_1 + get_mmq_x_max_host(cc)*sizeof(block_q8_1_mmq); ggml_cuda_pool_alloc src1_q8_1(ctx.pool(), nbytes_src1_q8_1); { const int64_t s11 = src1->nb[1] / ts_src1; const int64_t s12 = src1->nb[2] / ts_src1; const int64_t s13 = src1->nb[3] / ts_src1; if (use_native_mxfp4) { static_assert(sizeof(block_fp4_mmq) == 4 * sizeof(block_q8_1)); quantize_mmq_mxfp4_cuda(src1_d, nullptr, src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11, ne12, ne13, stream); } else { quantize_mmq_q8_1_cuda(src1_d, nullptr, src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11, ne12, ne13, stream); } CUDA_CHECK(cudaGetLastError()); } // Stride depends on quantization format const int64_t s12 = use_native_mxfp4 ? ne11 * ne10_padded * sizeof(block_fp4_mmq) / (8 * QK_MXFP4 * sizeof(int)) // block_fp4_mmq holds 256 values (8 blocks of 32) : ne11 * ne10_padded * sizeof(block_q8_1) / (QK8_1 * sizeof(int)); const int64_t s13 = ne12*s12; const mmq_args args = { src0_d, src0->type, (const int *) src1_q8_1.ptr, nullptr, nullptr, dst_d, ne00, ne01, ne1, s01, ne11, s1, ne02, ne12, s02, s12, s2, ne03, ne13, s03, s13, s3, use_stream_k, ne1}; ggml_cuda_mul_mat_q_switch_type(ctx, args, stream); return; } GGML_ASSERT(ne13 == 1); GGML_ASSERT(nb12 % nb11 == 0); GGML_ASSERT(nb2 % nb1 == 0); const int64_t n_expert_used = ids->ne[0]; const int64_t ne_get_rows = ne12 * n_expert_used; GGML_ASSERT(ne1 == n_expert_used); ggml_cuda_pool_alloc ids_src1(ctx.pool(), ne_get_rows); ggml_cuda_pool_alloc ids_dst(ctx.pool(), ne_get_rows); ggml_cuda_pool_alloc expert_bounds(ctx.pool(), ne02 + 1); { GGML_ASSERT(ids->nb[0] == ggml_element_size(ids)); const int si1 = ids->nb[1] / ggml_element_size(ids); const int sis1 = nb12 / nb11; ggml_cuda_launch_mm_ids_helper((const int32_t *) ids->data, ids_src1.get(), ids_dst.get(), expert_bounds.get(), ne02, ne12, n_expert_used, ne11, si1, sis1, stream); CUDA_CHECK(cudaGetLastError()); } const size_t nbytes_src1_q8_1 = ne12*n_expert_used*ne10_padded * sizeof(block_q8_1)/QK8_1 + get_mmq_x_max_host(cc)*sizeof(block_q8_1_mmq); ggml_cuda_pool_alloc src1_q8_1(ctx.pool(), nbytes_src1_q8_1); const int64_t ne11_flat = ne12*n_expert_used; const int64_t ne12_flat = 1; const int64_t ne13_flat = 1; { const int64_t s11 = src1->nb[1] / ts_src1; const int64_t s12 = src1->nb[2] / ts_src1; const int64_t s13 = src1->nb[2] / ts_src1; if (use_native_mxfp4) { quantize_mmq_mxfp4_cuda(src1_d, ids_src1.get(), src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11_flat, ne12_flat, ne13_flat, stream); } else { quantize_mmq_q8_1_cuda(src1_d, ids_src1.get(), src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11_flat, ne12_flat, ne13_flat, stream); } CUDA_CHECK(cudaGetLastError()); } const int64_t s12 = use_native_mxfp4 ? ne11 * ne10_padded * sizeof(block_fp4_mmq) / (8 * QK_MXFP4 * sizeof(int)) : ne11 * ne10_padded * sizeof(block_q8_1) / (QK8_1 * sizeof(int)); const int64_t s13 = ne12*s12; // Note that ne02 is used instead of ne12 because the number of y channels determines the z dimension of the CUDA grid. const mmq_args args = { src0_d, src0->type, (const int *) src1_q8_1.get(), ids_dst.get(), expert_bounds.get(), dst_d, ne00, ne01, ne_get_rows, s01, ne_get_rows, s1, ne02, ne02, s02, s12, s2, ne03, ne13, s03, s13, s3, use_stream_k, ne12}; ggml_cuda_mul_mat_q_switch_type(ctx, args, stream); } void ggml_cuda_op_mul_mat_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream) { const int64_t ne00 = src0->ne[0]; const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; GGML_ASSERT(ne10 % QK8_1 == 0); const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; const int64_t stride01 = ne00 / ggml_blck_size(src0->type); const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; // the main device has a larger memory buffer to hold the results from all GPUs // nrows_dst == nrows of the matrix that the kernel writes into const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff; // The stream-k decomposition is only faster for recent NVIDIA GPUs. // Also its fixup needs to allocate a temporary buffer in the memory pool. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer. const bool use_stream_k = ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_CDNA(cc)) && src1_ncols == ne11; const mmq_args args = { src0_dd_i, src0->type, (const int *) src1_ddq_i, nullptr, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride01, ne11, nrows_dst, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, use_stream_k, src1_ncols}; ggml_cuda_mul_mat_q_switch_type(ctx, args, stream); GGML_UNUSED_VARS(src1, dst, src1_ddf_i, src1_padded_row_size); } bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11, int64_t n_experts) { #ifdef GGML_CUDA_FORCE_CUBLAS return false; #endif // GGML_CUDA_FORCE_CUBLAS bool mmq_supported; switch (type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: mmq_supported = true; break; default: mmq_supported = false; break; } if (!mmq_supported) { return false; } if (turing_mma_available(cc)) { return true; } if (ggml_cuda_highest_compiled_arch(cc) < GGML_CUDA_CC_DP4A) { return false; } #ifdef GGML_CUDA_FORCE_MMQ return true; #endif //GGML_CUDA_FORCE_MMQ if (GGML_CUDA_CC_IS_NVIDIA(cc)) { return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } if (amd_mfma_available(cc)) { // As of ROCM 7.0 rocblas/tensile performs very poorly on CDNA3 and hipblaslt (via ROCBLAS_USE_HIPBLASLT) // performs better but is currently suffering from a crash on this architecture. // TODO: Revisit when hipblaslt is fixed on CDNA3 if (GGML_CUDA_CC_IS_CDNA3(cc)) { return true; } if (n_experts > 64 || ne11 <= 128) { return true; } if (type == GGML_TYPE_Q4_0 || type == GGML_TYPE_Q4_1 || type == GGML_TYPE_Q5_0 || type == GGML_TYPE_Q5_1) { return true; } if (ne11 <= 256 && (type == GGML_TYPE_Q4_K || type == GGML_TYPE_Q5_K)) { return true; } return false; } if (amd_wmma_available(cc)) { return true; } return (!GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } ggml-org-ggml-3678254/src/ggml-cuda/mmq.cuh000066400000000000000000005167271512524704700203300ustar00rootroot00000000000000#pragma once #include "common.cuh" #include "vecdotq.cuh" #include "mma.cuh" #include #include using namespace ggml_cuda_mma; #define MMQ_DP4A_MAX_BATCH_SIZE 64 // Max. batch size to use for dp4a MMQ kernels when FP16 tensor cores are available. #define MMQ_ITER_K 256 #define MMQ_ITER_K_MXFP4_FP4 512 #define MMQ_NWARPS 8 typedef void (*load_tiles_mmq_t)(const char * __restrict__ x, int * x_tile, const int kbx0, const int i_max, const int stride); typedef void (*vec_dot_mmq_t)(const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00); typedef void (*mmq_write_back_t)(const float * __restrict__ sum, const int32_t * __restrict__ get_rows_to_sorted, float * __restrict__ dst, const int stride, const int i_max, const int j_max); enum mmq_q8_1_ds_layout { MMQ_Q8_1_DS_LAYOUT_D4, MMQ_Q8_1_DS_LAYOUT_DS4, MMQ_Q8_1_DS_LAYOUT_D2S6, }; struct block_q8_1_mmq { // The y float data is converted to a data layout that can simply be copied to shared memory as a contiguous block. // The y float data is first grouped as blocks of 128 values. // These blocks are then treated as individual data values and transposed. // // To avoid shared memory bank conflicts each block is padded with 16 bytes. // This padding is also used to store block scales/partial sums. // The scales multiplied with the quantized data are equal to the unquantized values. // The partial sums are obtained by summing up a subgroup of the contained values (prior to quantization) // and are only needed for performance reasons. // // The exact data stored depends on the x data type. union { float d4[4]; // 1 32 bit scale per 32 values, stored as d0,d1,d2,d3 half2 ds4[4]; // 1 16 bit scale + 1 16 bit partial sum per 32 values, stored as d0,s0,d1,s1,d2,s2,d3,s3 half d2s6[8]; // 1 16 bit scale per 64 values + 1 16 bit partial sum per 16 values for the first 96 values, // stored as d0,d1,s1,s2,s3,s4,s5 }; int8_t qs[4*QK8_1]; // 128 values quantized to 8 bit each }; struct block_fp4_mmq { uint32_t d4[4]; // 8 E8M0 scales (1 per 32 values), 2 packed per uint32: d4[0]={s0,s1}, d4[1]={s2,s3}, etc. int8_t qs[4 * 32]; // 256 FP4 values packed as 4-bit pairs (2 per byte), 8 blocks of 32 values }; static_assert(sizeof(block_q8_1_mmq) == 4*QK8_1 + 4*sizeof(half2), "Unexpected block_q8_1_mmq size"); static_assert(sizeof(block_q8_1_mmq) == 4*sizeof(block_q8_1), "Unexpected block_q8_1_mmq size"); static_assert(sizeof(block_fp4_mmq) == sizeof(block_q8_1_mmq), "Unexpected block_fp4_mmq size"); static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) { switch (type_x) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: return MMQ_Q8_1_DS_LAYOUT_DS4; case GGML_TYPE_Q5_0: return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_Q5_1: return MMQ_Q8_1_DS_LAYOUT_DS4; case GGML_TYPE_Q8_0: return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_MXFP4: return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_Q2_K: return MMQ_Q8_1_DS_LAYOUT_D2S6; case GGML_TYPE_Q3_K: return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: return MMQ_Q8_1_DS_LAYOUT_DS4; case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_IQ1_S: return MMQ_Q8_1_DS_LAYOUT_DS4; case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: return MMQ_Q8_1_DS_LAYOUT_D4; default: GGML_ABORT("fatal error"); break; } } struct tile_x_sizes { int qs; int dm; int sc; }; static int get_mmq_x_max_host(const int cc) { return (amd_mfma_available(cc) || turing_mma_available(cc) || amd_wmma_available(cc)) ? 128 : GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA ? #ifdef GGML_CUDA_FORCE_MMQ 128 : 64; #else MMQ_DP4A_MAX_BATCH_SIZE : 64; #endif // GGML_CUDA_FORCE_MMQ } static constexpr __device__ int get_mmq_x_max_device() { #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) return 128; #else // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) #if defined(GGML_USE_HIP) return 64; #else // defined(GGML_USE_HIP) #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #ifdef GGML_CUDA_FORCE_MMQ return 128; #else // GGML_CUDA_FORCE_MMQ return MMQ_DP4A_MAX_BATCH_SIZE; #endif // GGML_CUDA_FORCE_MMQ #else // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA return 64; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } static int get_mmq_y_host(const int cc) { return GGML_CUDA_CC_IS_AMD(cc) ? (GGML_CUDA_CC_IS_RDNA1(cc) ? 64 : 128) : ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) ? 128 : 64); } static constexpr __device__ int get_iter_k([[maybe_unused]] const ggml_type type) { #if defined(BLACKWELL_MMA_AVAILABLE) return type == GGML_TYPE_MXFP4 ? MMQ_ITER_K_MXFP4_FP4 : MMQ_ITER_K; #else return MMQ_ITER_K; #endif // defined(BLACKWELL_MMA_AVAILABLE) } static constexpr __device__ int get_mmq_y_device() { #if defined(GGML_USE_HIP) #if defined(RDNA1) return 64; #else return 128; #endif // defined RDNA1 #else #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA return 128; #else return 64; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) } // Decouple shared memory tile sizes from WARP_SIZE to allow for different warp sizes. // The K dimension of the tiles has either, // 1*MMQ_TILE_NE_K==32 (always for TILE_Y_K) or 2*MMQ_TILE_NE_K==64 (typically for TILE_X_K), // 32 bit elements for the quantized data (does not include scales). // In other words, the size of the quantized data in the K dimension is a multiple of MMQ_TILE_NE_K. // The final tile size in K direction is padded to avoid shared memory bank conflicts, // in terms of 32 bit elements that means K % 2 == 1 for dp4a or K % 8 == 4 for mma. #define MMQ_TILE_NE_K 32 #define MMQ_DP4A_TXS_Q4_0 tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_0 + mmq_y/QI4_0, 0} #define MMQ_DP4A_TXS_Q4_1 tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_1 + mmq_y/QI4_1, 0} #define MMQ_DP4A_TXS_Q8_0 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*2/QI8_0 + mmq_y/(QI8_0/2), 0} #define MMQ_DP4A_TXS_Q8_0_16 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*4/QI8_0 + mmq_y/(QI8_0/4), 0} #define MMQ_DP4A_TXS_Q8_1 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*2/QI8_1 + mmq_y/(QI8_1/2), 0} #define MMQ_DP4A_TXS_Q2_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K + mmq_y, 0} #define MMQ_DP4A_TXS_Q3_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} #define MMQ_DP4A_TXS_Q4_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} #define MMQ_DP4A_TXS_Q5_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K/QI5_K + mmq_y/QI5_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} #define MMQ_DP4A_TXS_Q6_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K/QI6_K + mmq_y/QI6_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml_type type, int mmq_y) { switch (type) { case GGML_TYPE_Q4_0: return MMQ_DP4A_TXS_Q4_0; case GGML_TYPE_Q4_1: return MMQ_DP4A_TXS_Q4_1; case GGML_TYPE_Q5_0: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_Q5_1: return MMQ_DP4A_TXS_Q8_1; case GGML_TYPE_Q8_0: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_MXFP4: return MMQ_DP4A_TXS_Q8_1; case GGML_TYPE_Q2_K: return MMQ_DP4A_TXS_Q2_K; case GGML_TYPE_Q3_K: return MMQ_DP4A_TXS_Q3_K; case GGML_TYPE_Q4_K: return MMQ_DP4A_TXS_Q4_K; case GGML_TYPE_Q5_K: return MMQ_DP4A_TXS_Q5_K; case GGML_TYPE_Q6_K: return MMQ_DP4A_TXS_Q6_K; case GGML_TYPE_IQ2_XXS: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_IQ2_XS: return MMQ_DP4A_TXS_Q8_0_16; case GGML_TYPE_IQ2_S: return MMQ_DP4A_TXS_Q8_0_16; case GGML_TYPE_IQ3_XXS: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_IQ3_S: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_IQ1_S: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_IQ4_XS: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_IQ4_NL: return MMQ_DP4A_TXS_Q8_0; default: return tile_x_sizes{0, 0, 0}; } } #define MMQ_MMA_TILE_X_K_Q8_0 (2*MMQ_TILE_NE_K + 2*MMQ_TILE_NE_K/QI8_0 + 4) #define MMQ_MMA_TILE_X_K_FP4 (2*MMQ_TILE_NE_K + 8 + 4) #define MMQ_MMA_TILE_X_K_Q8_1 (2*MMQ_TILE_NE_K + 2*MMQ_TILE_NE_K/QI8_0 + 4) #define MMQ_MMA_TILE_X_K_Q2_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K + 4) #define MMQ_MMA_TILE_X_K_Q3_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K/2 + 4) #define MMQ_MMA_TILE_X_K_Q6_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K/QI6_K + MMQ_TILE_NE_K/8 + 7) static_assert(MMQ_MMA_TILE_X_K_Q8_0 % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_Q8_1 % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_Q2_K % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_Q3_K % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_Q6_K % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_FP4 % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_FP4 == MMQ_MMA_TILE_X_K_Q8_1, "Wrong tile size for MXFP4"); static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_Q4_1: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q5_0: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_Q5_1: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q8_0: return MMQ_MMA_TILE_X_K_Q8_0; // tile sizes are the same for Q8_1 and FP4 for blackwell case GGML_TYPE_MXFP4: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q2_K: return MMQ_MMA_TILE_X_K_Q2_K; case GGML_TYPE_Q3_K: return MMQ_MMA_TILE_X_K_Q3_K; case GGML_TYPE_Q4_K: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q5_K: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q6_K: return MMQ_MMA_TILE_X_K_Q6_K; case GGML_TYPE_IQ2_XXS: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_IQ2_XS: return MMQ_MMA_TILE_X_K_Q3_K; case GGML_TYPE_IQ2_S: return MMQ_MMA_TILE_X_K_Q3_K; case GGML_TYPE_IQ3_XXS: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_IQ3_S: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_IQ1_S: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_IQ4_XS: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_IQ4_NL: return MMQ_MMA_TILE_X_K_Q8_0; default: return 0; } } // block_q8_1_mmq has (128 8-bit ints == 32 32-bit ints + 4 32-bit scales) #define MMQ_TILE_Y_K (MMQ_TILE_NE_K + MMQ_TILE_NE_K / QI8_1) #define MMQ_TILE_Y_FP4_K MMQ_TILE_Y_K static int mmq_get_granularity_host(const int mmq_x, const int cc) { if (amd_mfma_available(cc) || amd_wmma_available(cc)) { return mmq_x >= 128 ? 32 : 16; } else if (turing_mma_available(cc) && mmq_x >= 48) { return 16; } else { return 8; } } #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { return mmq_x >= 128 ? 32 : 16; } #elif defined(TURING_MMA_AVAILABLE) static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { return mmq_x >= 48 ? 16 : 8; } #else static constexpr __device__ int mmq_get_granularity_device(const int /*mmq_x*/) { return 8; } #endif // AMD_MFMA_AVAILABLE #if defined(GGML_USE_HIP) static int mmq_get_nwarps_host(const int cc, const int warp_size) { return amd_mfma_available(cc) ? 8 : 256/warp_size; } #else static int mmq_get_nwarps_host(const int /*cc*/, const int warp_size) { return 256/warp_size; } #endif // (GGML_USE_HIP) static constexpr __device__ int mmq_get_nwarps_device() { #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) return 8; #else return 256/ggml_cuda_get_physical_warp_size(); #endif // AMD_MFMA_AVAILABLE } // ------------------------------------------------------------ template static __device__ __forceinline__ void load_tiles_q4_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_0); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI4_0; const int kqsx = txi % QI4_0; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b2(bxi->qs, kqsx); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + 0] = __vsubss4((qs0 >> 0) & 0x0F0F0F0F, 0x08080808); x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + QI4_0] = __vsubss4((qs0 >> 4) & 0x0F0F0F0F, 0x08080808); #else x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_0; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(MMQ_TILE_NE_K/QI4_0) + i/QI4_0 + kbxd] = bxi->d; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_0, mmq_y); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_0*VDR_Q4_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int kyqs = QI8_1 * ((k01/2) / (QI8_1/2)) + (k01/2) % (QI8_1/2); int u[2*VDR_Q4_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j*MMQ_TILE_Y_K + kyqs + l]; u[2*l+1] = y_qs[j*MMQ_TILE_Y_K + kyqs + (l + QI4_0)]; } sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_0_q8_1_impl (&x_qs[i*(MMQ_TILE_NE_K + 1) + k0/QR4_0], u, x_df[i*(MMQ_TILE_NE_K/QI4_0) + i/QI4_0 + k0/(QR4_0*QI4_0)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void load_tiles_q4_1( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_1); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI4_1; const int kqsx = txi % QI4_1; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b4(bxi->qs, kqsx); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + 0] = (qs0 >> 0) & 0x0F0F0F0F; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + QI4_1] = (qs0 >> 4) & 0x0F0F0F0F; #else x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_1; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else x_dm[i*(MMQ_TILE_NE_K/QI4_1) + i/QI4_1 + kbxd] = bxi->dm; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_1, mmq_y); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_1*VDR_Q4_1_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int kyqs = QI8_1 * ((k01/2) / (QI8_1/2)) + (k01/2) % (QI8_1/2); int u[2*VDR_Q4_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j*MMQ_TILE_Y_K + kyqs + l]; u[2*l+1] = y_qs[j*MMQ_TILE_Y_K + kyqs + (l + QI4_1)]; } sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_1_q8_1_impl (&x_qs[i*(MMQ_TILE_NE_K + 1) + k0/QR4_1], u, x_dm[i*(MMQ_TILE_NE_K/QI4_1) + i/QI4_1 + k0/(QR4_1*QI4_1)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void load_tiles_q5_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_0); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI5_0; const int kqsx = txi % QI5_0; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbx; const int ql = get_int_b2(bxi->qs, kqsx); const int qh = get_int_b2(bxi->qh, 0) >> (4 * kqsx); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_0) + kqsx + 0] = qs0; x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI5_0; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(MMQ_TILE_NE_K/QI5_0) + i/QI5_0 + kbxd] = bxi->d; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_q5_1( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_1); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI5_1; const int kqsx = txi % QI5_1; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbx; const int ql = get_int_b4(bxi->qs, kqsx); const int qh = get_int_b4(bxi->qh, 0) >> (4 * kqsx); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_1) + kqsx + 0] = qs0; x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI5_1; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else x_dm[i*(MMQ_TILE_NE_K/QI5_1) + i/QI5_1 + kbxd] = bxi->dm; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_q8_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_tile + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q8_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) // MMQ_ITER_K / (4 * QR8_0) == 64 required. but NV has only 32 threads per warp constexpr int threads_per_row = 32; constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI8_0; const int kqsx = txi % QI8_0; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 0 + txi] = get_int_b2(bxi[0].qs, kqsx); x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + MMQ_TILE_NE_K + txi] = get_int_b2(bxi[MMQ_TILE_NE_K/QI8_0].qs, kqsx); #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 0 + txi] = get_int_b2(bxi[0].qs, kqsx); x_qs[i*(2*MMQ_TILE_NE_K + 1) + MMQ_TILE_NE_K + txi] = get_int_b2(bxi[MMQ_TILE_NE_K/QI8_0].qs, kqsx); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = 2*MMQ_TILE_NE_K / QI8_0; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(2*MMQ_TILE_NE_K/QI8_0) + i/(QI8_0/2) + kbxd] = bxi->d; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_mxfp4( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_MXFP4, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR_MXFP4); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI_MXFP4; const int kqsx = txi % QI_MXFP4; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_mxfp4 * bxi = (const block_mxfp4 *) x + kbx0 + i*stride + kbx; const int aux_q4 = get_int_b1(bxi->qs, kqsx); const int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4); const int k0 = kbx * (2 * QI_MXFP4) + kqsx; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + k0 + QI_MXFP4] = v.y; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + QI_MXFP4] = v.y; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI_MXFP4; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_mxfp4 * bxi = (const block_mxfp4 *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = ggml_cuda_e8m0_to_fp32(bxi->e)*0.5f; #else x_df[i*(MMQ_TILE_NE_K/QI_MXFP4) + i/QI_MXFP4 + kbxd] = ggml_cuda_e8m0_to_fp32(bxi->e)*0.5f; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_mxfp4_fp4(const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); int * x_qs = (int *) x_tile; uint32_t * x_sc = (uint32_t *) (x_qs + 2 * MMQ_TILE_NE_K); const int txi = threadIdx.x; constexpr int iter_k = get_iter_k(GGML_TYPE_MXFP4); constexpr int threads_per_row = iter_k / QK_MXFP4; // each thread processes 1 block constexpr int rows_per_warp = warp_size / threads_per_row; const int kbx = txi % threads_per_row; const int row_in_warp = txi / threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += rows_per_warp * nwarps) { int i = i0 + threadIdx.y * rows_per_warp + row_in_warp; if constexpr (need_check) { i = min(i, i_max); } const block_mxfp4 * bxi = (const block_mxfp4 *) x + kbx0 + i * stride + kbx; // quantize_mxfp4_mmq permutes nibbles to match the quantized format const int k0 = kbx * 4; memcpy(x_qs + i * MMQ_MMA_TILE_X_K_FP4 + k0, bxi->qs, 16); // Load E8M0 scales: pack 2 consecutive scales into one uint32 if (kbx % 2 == 0) { uint32_t e = bxi->e; e |= ((bxi + 1)->e << 8); x_sc[i * MMQ_MMA_TILE_X_K_FP4 + kbx / 2] = e; } } } template static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q8_0, mmq_y); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += VDR_Q8_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_0_q8_1_impl (&x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k0 % MMQ_TILE_NE_K], x_df[i*(2*MMQ_TILE_NE_K/QI8_0) + i/(QI8_0/2) + k0/QI8_0], y_df[j*MMQ_TILE_Y_K + (k0/QI8_1) % (MMQ_TILE_NE_K/QI8_1)]); } } } } template static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 8, int, input_layout> tile_A; typedef tile<16, 8, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const half2 * y_ds = (const half2 *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); float dB; const int j = j0 + tile_C::get_j(0); if (ds_layout == MMQ_Q8_1_DS_LAYOUT_D4) { dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; } else { dB = __low2float(y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_A::I + tile_C::get_i(l); const float dA = x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + k0/QI8_0]; sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l]*dA*dB; } } } } #else typedef tile<16, 8, int> tile_A; typedef tile< 8, 8, int> tile_B; typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const half2 * y_ds = (const half2 *) y; tile_A A[ntx][MMQ_TILE_NE_K/QI8_0]; float dA[ntx][tile_C::ne/2][MMQ_TILE_NE_K/QI8_0]; const int i0 = (threadIdx.y/ntx)*rows_per_warp; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/QI8_0], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); } #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_A::I + tile_C::get_i(2*l); #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; dA[n][l][k01/QI8_0] = x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + k0/QI8_0]; } } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { tile_B B; float dB[tile_C::ne/2]; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); // faster than load_ldmatrix #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); if (ds_layout == MMQ_Q8_1_DS_LAYOUT_D4) { dB[l] = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; } else { dB[l] = __low2float(y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n][k01/QI8_0], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l]*dA[n][l/2][k01/QI8_0]*dB[l%2]; } } } } #endif // defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } template static __device__ __forceinline__ void vec_dot_mxfp4_mxfp4_mma(const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { typedef tile<16, 8, int> tile_A; typedef tile<8, 8, int> tile_B; typedef tile<16, 8, float> tile_C; // Output is float for native scaled MMA constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp / tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J * MMQ_TILE_Y_FP4_K); // Match layout from load_tiles_mxfp4_fp4 const int * x_qs = (const int *) x; const uint32_t * x_sc = (const uint32_t *) (x_qs + 2 * MMQ_TILE_NE_K); const int * y_qs = (const int *) y + 4; const uint32_t * y_sc = (const uint32_t *) y; // tile_A has a length of 64 logical values vs. 32 values in block_mxfp4 tile_A A[ntx][MMQ_TILE_NE_K / (2 * QI_MXFP4)]; uint32_t scaleA[ntx][MMQ_TILE_NE_K / (2 * QI_MXFP4)]; // Block scale // Each thread has to point to a 4 byte scale value // https://docs.nvidia.com/cuda/parallel-thread-execution/#warp-level-block-scaling const int i0 = (threadIdx.y / ntx) * rows_per_warp; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 2 * QI_MXFP4) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01 / (2 * QI_MXFP4)], x_qs + (i0 + n * tile_A::I) * MMQ_MMA_TILE_X_K_FP4 + k0, MMQ_MMA_TILE_X_K_FP4); // based on block-scaling document, 2 threads in each quad need to supply to the scale value const int tidx = threadIdx.x / 4 + (threadIdx.x % 2) * 8; scaleA[n][k01 / (2 * QI_MXFP4)] = *(x_sc + (i0 + n * tile_A::I + tidx) * MMQ_MMA_TILE_X_K_FP4 + k0 / (2 * QI_MXFP4)); } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx * tile_C::J) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 2 * QI_MXFP4) { tile_B B; uint32_t scaleB; // 2xN scales load_generic(B, y_qs + j0 * MMQ_TILE_Y_FP4_K + k01, MMQ_TILE_Y_FP4_K); scaleB = y_sc[(j0 + threadIdx.x / 4) * MMQ_TILE_Y_FP4_K + k01 / (2 * QI_MXFP4)]; #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma_block_scaled(C, A[n][k01 / (2 * QI_MXFP4)], B, scaleA[n][k01 / (2 * QI_MXFP4)], scaleB); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0 / tile_C::J + n) * tile_C::ne + l] += C.x[l]; } } } } } template static __device__ __forceinline__ void vec_dot_q8_1_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_1, mmq_y); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += VDR_Q8_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_1_q8_1_impl (&x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], x_dm[i*(MMQ_TILE_NE_K/QI5_1) + i/QI5_1 + k0/QI8_1], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 8, int, input_layout> tile_A; typedef tile<16, 8, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const half2 * y_dm = (const half2 *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float2 dsB = __half22float2(y_dm[j*MMQ_TILE_Y_K + k01/QI8_1]); #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_A::I + tile_C::get_i(l); float2 dmA = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + k0/QI8_1]); sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA.x*dsB.x*C.x[l]; sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA.y*dsB.y; } } } } #else typedef tile<16, 8, int> tile_A; typedef tile< 8, 8, int> tile_B; typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const half2 * y_dm = (const half2 *) y; tile_A A[ntx][MMQ_TILE_NE_K/QI8_1]; float2 dmA[ntx][tile_C::ne/2][MMQ_TILE_NE_K/QI8_1]; const int i0 = (threadIdx.y/ntx)*rows_per_warp; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/QI8_1], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); } #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_A::I + tile_C::get_i(2*l); #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; dmA[n][l][k01/QI8_1] = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + k0/QI8_1]); } } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { tile_B B; float2 dsB[tile_C::ne/2]; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); // faster than load_ldmatrix #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); dsB[l] = __half22float2(y_dm[j*MMQ_TILE_Y_K + k01/QI8_1]); } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n][k01/QI8_1], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA[n][l/2][k01/QI8_1].x*dsB[l%2].x*C.x[l]; sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA[n][l/2][k01/QI8_1].y*dsB[l%2].y; } } } } #endif // defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } // Used for Q3_K, IQ2_S, and IQ2_XS template static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_0_16_q8_1_impl( &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], &x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + k0/(QI8_0/2)], y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } // Used for Q3_K, IQ2_S, and IQ2_XS: template static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { #if defined(AMD_MFMA_AVAILABLE) constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 8, int, input_layout> tile_A; typedef tile<16, 8, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; typedef tile<64, 2, int, input_layout> tile_load; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B[1]; load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1] / 2; #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B[0]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * x_df[i*MMQ_MMA_TILE_X_K_Q3_K + k0/4] * dB; } } } } #elif defined(AMD_WMMA_AVAILABLE) //wmma instructions can handle 16x4 tiles, does not require loading 64x2 tiles constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 4, int, input_layout> tile_A; typedef tile<16, 4, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * x_df[i*MMQ_MMA_TILE_X_K_Q3_K + k0/4] * dB; } } } } #elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile<16, 8, int> tile_A_8; typedef tile< 8, 4, int> tile_B; typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * (ntx*tile_A::I); tile_A A[ntx][8]; float dA[ntx][tile_C::ne/2][8]; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { const int k0 = k00 + k01; load_ldmatrix(((tile_A_8 *) A[n])[k01/8], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); } #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; dA[n][l][k01/4] = x_df[i*MMQ_MMA_TILE_X_K_Q3_K + k0/4]; } } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { tile_B B[2]; float dB[tile_C::ne/2]; // Here load_generic is faster than load_ldmatrix. load_generic(B[0], y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); load_generic(B[1], y_qs + j0*MMQ_TILE_Y_K + (k01 + tile_B::J), MMQ_TILE_Y_K); #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); dB[l] = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C[2]; mma(C[0], A[n][k01/4 + 0], B[0]); mma(C[1], A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0/tile_C::J + n)*tile_C::ne + l] += dB[l%2]*(C[0].x[l]*dA[n][l/2][k01/4 + 0] + C[1].x[l]*dA[n][l/2][k01/4 + 1]); } } } } #else GGML_UNUSED_VARS(x, y, sum, k00); NO_DEVICE_CODE; #endif // AMD_MFMA_AVAILABLE || AMD_WMMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_q2_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q2_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR2_K); constexpr int nrows = ggml_cuda_get_physical_warp_size() / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride; const int x_ql_0 = get_int_b2(bxi->qs, kqsx); #pragma unroll for (int l = 0; l < QR2_K; ++l) { const int k = (kqsx/8)*32 + l*8 + kqsx % 8; const int x_qs_k = (x_ql_0 >> (2*l)) & 0x03030303; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q2_K + k] = x_qs_k; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + k] = x_qs_k; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int sc_m = bxi->scales[kqsx]; #ifdef FAST_FP16_AVAILABLE const half2 x_dm_ik = __hmul2(bxi->dm, make_half2(sc_m & 0x0F, sc_m >> 4)); #else const float2 bxi_dmf = __half22float2(bxi->dm); const half2 x_dm_ik = make_half2(bxi_dmf.x*(sc_m & 0x0F), bxi_dmf.y*(sc_m >> 4)); #endif // FAST_FP16_AVAILABLE #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + kqsx] = x_dm_ik; #else x_dm[i*(MMQ_TILE_NE_K + 1) + kqsx] = x_dm_ik; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q2_K, mmq_y); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + txs.qs; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; float2 y_df[mmq_x/nwarps]; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; y_df[j0/nwarps] = __half22float2(y_ds[j*MMQ_TILE_Y_K]); } #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K/2; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; constexpr int ns = 2; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q2_K_q8_1_impl_mmq( &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], &x_dm[i*(MMQ_TILE_NE_K + 1) + k0/4], k01 < MMQ_TILE_NE_K/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, &y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]); } } } // Some compilers fail to unroll the loop over k01 if there is a conditional statement for ns in the inner loop. // As a workaround 2 separate loops are used instead. #pragma unroll for (int k01 = MMQ_TILE_NE_K/2; k01 < MMQ_TILE_NE_K; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; constexpr int ns = 1; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q2_K_q8_1_impl_mmq( &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], &x_dm[i*(MMQ_TILE_NE_K + 1) + k0/4], k01 < MMQ_TILE_NE_K/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, &y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]); } } } } template static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { #if defined(AMD_MFMA_AVAILABLE) constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 8, int, input_layout> tile_A; typedef tile<16, 8, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; typedef tile<64, 2, int, input_layout> tile_load; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B[1]; load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = (k01 < MMQ_TILE_NE_K/2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K]).x/2 : __half22float2(y_ds[j*MMQ_TILE_Y_K]).y/2; const float sB = (k01 >= MMQ_TILE_NE_K * 3/4) ? 0 : (((k01/4)%2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).y : __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).x); tile_C Cm; if (k01 >= MMQ_TILE_NE_K * 3/4) { tile_A A1; A1.x[0] = 0x01010101; A1.x[1] = 0x01010101; mma(Cm, A1, B[0]); } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C Cd; mma(Cd, A[n], B[0]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); const float2 dm = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + k0/4]); float tmp = Cd.x[l]*dm.x; if (k01 >= MMQ_TILE_NE_K * 3/4) { tmp -= Cm.x[l]*dm.y; } sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*dB; sum[(j0/tile_C::J + n)*tile_C::ne + l] -= dm.y*sB; } } } } #elif defined(AMD_WMMA_AVAILABLE) //wmma instructions can handle 16x4 tiles, does not require loading 64x2 tiles constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 4, int, input_layout> tile_A; typedef tile<16, 4, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = (k01 < MMQ_TILE_NE_K/2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K]).x : __half22float2(y_ds[j*MMQ_TILE_Y_K]).y; const float sB = (k01 >= MMQ_TILE_NE_K * 3/4) ? 0 : (((k01/4)%2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).y : __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).x); tile_C Cm; if (k01 >= MMQ_TILE_NE_K * 3/4) { tile_A A1; #pragma unroll for (int l = 0; l < tile_A::ne; ++l) { A1.x[l] = 0x01010101; } mma(Cm, A1, B); } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C Cd; mma(Cd, A[n], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); const float2 dm = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + k0/4]); float tmp = Cd.x[l]*dm.x; if (k01 >= MMQ_TILE_NE_K * 3/4) { tmp -= Cm.x[l]*dm.y; } sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*dB; sum[(j0/tile_C::J + n)*tile_C::ne + l] -= dm.y*sB; } } } } #elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile<16, 8, int> tile_A_8; typedef tile< 8, 4, int> tile_B; typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; const int i0 = (threadIdx.y / ntx) * (ntx*tile_A::I); tile_A A[ntx][8]; float dA[ntx][tile_C::ne/2][8]; float mA[ntx][tile_C::ne/2][8]; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; load_ldmatrix(((tile_A_8 *) A[n])[k01/QI8_1], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); } } #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1/2) { const int k0 = k00 + k01; const float2 dm = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + k0/(QI8_1/2)]); dA[n][l][k01/(QI8_1/2)] = dm.x; mA[n][l][k01/(QI8_1/2)] = dm.y; } } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { float2 dB[tile_C::ne/2]; #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); dB[l] = __half22float2(y_ds[j*MMQ_TILE_Y_K]); } #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { tile_B B[2]; // Here load_generic is faster than load_ldmatrix. load_generic(B[0], y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); load_generic(B[1], y_qs + j0*MMQ_TILE_Y_K + (k01 + tile_B::J), MMQ_TILE_Y_K); tile_C Cm[2]; if (k01 >= MMQ_TILE_NE_K * 3/4) { tile_A A1; A1.x[0] = 0x01010101; A1.x[1] = 0x01010101; mma(Cm[0], A1, B[0]); mma(Cm[1], A1, B[1]); } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C Cd[2]; mma(Cd[0], A[n][k01/4 + 0], B[0]); mma(Cd[1], A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { float tmp = Cd[0].x[l]*dA[n][l/2][k01/4 + 0] + Cd[1].x[l]*dA[n][l/2][k01/4 + 1]; if (k01 >= MMQ_TILE_NE_K * 3/4) { tmp -= Cm[0].x[l]*mA[n][l/2][k01/4 + 0] + Cm[1].x[l]*mA[n][l/2][k01/4 + 1]; } sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*(k01 < MMQ_TILE_NE_K/2 ? dB[l%2].x : dB[l%2].y); } } } #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K * 3/4; k01 += QI8_1) { float2 sB[tile_C::ne/2]; #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); sB[l] = __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]); } #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0/tile_C::J + n)*tile_C::ne + l] -= mA[n][l/2][k01/4 + 0]*sB[l%2].x; sum[(j0/tile_C::J + n)*tile_C::ne + l] -= mA[n][l/2][k01/4 + 1]*sB[l%2].y; } } } } #else GGML_UNUSED_VARS(x, y, sum, k00); NO_DEVICE_CODE; #endif // AMD_MFMA_AVAILABLE || AMD_WMMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_q3_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q3_K, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR3_K); constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride; const int x_ql_0 = get_int_b2(bxi->qs, kqsx); const int x_qh_0 = get_int_b2(bxi->hmask, kqsx % (QI3_K/2)) >> (4 * (kqsx / (QI3_K/2))); #pragma unroll for (int l = 0; l < QR3_K; ++l) { const int k = (kqsx/8)*32 + l*8 + kqsx % 8; const int x_ql_k = (x_ql_0 >> (2*l)) & 0x03030303; const int x_qh_k = ((x_qh_0 >> l) << 2) & 0x04040404; const int x_qs_k = __vsubss4(x_ql_k | x_qh_k, 0x04040404); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + k] = x_qs_k; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + k] = x_qs_k; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } constexpr int rows_per_warp = warp_size / 4; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/4; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride; const int ksc = threadIdx.x % 4; const int ksc_low = ksc % (QI3_K/8); const int shift_low = 4 * (ksc / (QI3_K/8)); const int sc_low = (get_int_b2(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F; const int ksc_high = QI3_K/8; const int shift_high = 2 * ksc; const int sc_high = ((get_int_b2(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030; const int sc = __vsubss4(sc_low | sc_high, 0x20202020); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) const int8_t * sc8 = (const int8_t *) ≻ const float d = bxi->d; #pragma unroll for (int l = 0; l < int(sizeof(int)); ++l) { x_df[i*MMQ_MMA_TILE_X_K_Q3_K + sizeof(int)*ksc + l] = d*sc8[l]; } #else x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = sc; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } #if !(defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE)) #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride; x_df[i] = bxi->d; } #endif // !(defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)) || defined(AMD_WMMA_AVAILABLE) } template static __device__ __forceinline__ void vec_dot_q3_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q3_K, mmq_y); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + txs.qs; const int * x_sc = (const int *) x_df + txs.dm; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int8_t * scales = ((const int8_t *) (x_sc + i*(MMQ_TILE_NE_K/8) + i/8)) + k0/4; sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q3_K_q8_1_impl_mmq( &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], scales, x_df[i], y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } static __device__ __forceinline__ int unpack_scales_q45_K(const int * scales, const int ksc) { // scale arrangement after the following two lines: // - ksc == 0: sc0, sc1, sc2, sc3 // - ksc == 1: sc4, sc5, sc6, sc7 // - ksc == 2: m0, m1, m2, m3 // - ksc == 3: m4, m5, m6, m7 return ((scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F) | // lower 4 bits ((scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030); // upper 2 bits } template static __device__ __forceinline__ void load_tiles_q4_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_K); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; const int qs0 = get_int_b4(bxi->qs, txi); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(txi/8) + txi % 8 + 0] = (qs0 >> 0) & 0x0F0F0F0F; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(txi/8) + txi % 8 + 8] = (qs0 >> 4) & 0x0F0F0F0F; #else x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int rows_per_warp = warp_size / 2; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) // Need if on AMD instead of % because warp_size == 64 // This causes double work and throughput loss (MI300X) // H100 loses about 100 t/s with 'if' condition over '%' int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/2; if (i < mmq_y) { #else int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/2) % mmq_y; { #endif // defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; const int * scales = (const int *) bxi->scales; const int ksc = threadIdx.x % 2; const int sc32 = unpack_scales_q45_K(scales, ksc + 0); const int m32 = unpack_scales_q45_K(scales, ksc + 2); const uint8_t * sc8 = (const uint8_t *) &sc32; const uint8_t * m8 = (const uint8_t *) &m32; const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); #pragma unroll for (int l = 0; l < sizeof(int); ++l) { x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); } } } #else #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; x_dm[i] = bxi->dm; } constexpr int rows_per_warp = warp_size / 4; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + (threadIdx.x % (MMQ_TILE_NE_K/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; const int ksc = threadIdx.x % (MMQ_TILE_NE_K/8); const int scales8 = unpack_scales_q45_K(scales, ksc); x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = scales8; } #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } template static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_K, mmq_y); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + txs.qs; const int * x_sc = (const int *) x_dm + txs.dm; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_K*VDR_Q4_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const uint8_t * sc = (const uint8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k0/32] + 2*(k01/16); sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_K_q8_1_impl_mmq( &x_qs[i*(MMQ_TILE_NE_K + 1) + k0/2], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, x_dm[i], &y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void load_tiles_q5_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_K); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; const int ky = QR5_K*txi; const int ql = get_int_b4(bxi->qs, txi); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_b4(bxi->qh, txi % (QI5_K/4)); const int qh0 = ((qh >> (2 * (txi / (QI5_K/4)) + 0)) << 4) & 0x10101010; const int qh1 = ((qh >> (2 * (txi / (QI5_K/4)) + 1)) << 4) & 0x10101010; const int kq0 = ky - ky % (QI5_K/2) + txi % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + txi % (QI5_K/4) + QI5_K/4; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq0] = ql0 | qh0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq1] = ql1 | qh1; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq0] = ql0 | qh0; x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq1] = ql1 | qh1; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int rows_per_warp = warp_size / 2; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { #if defined(AMD_MFMA_AVAILABLE) // Need if on AMD instead of % because warp_size == 64 // This causes double work and throughput loss (MI300X) // H100 loses about 100 t/s with 'if' condition over '%' int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/2; if (i < mmq_y) { #else int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/2) % mmq_y; { #endif // defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; const int * scales = (const int *) bxi->scales; const int ksc = threadIdx.x % 2; const int sc32 = unpack_scales_q45_K(scales, ksc + 0); const int m32 = unpack_scales_q45_K(scales, ksc + 2); const uint8_t * sc8 = (const uint8_t *) &sc32; const uint8_t * m8 = (const uint8_t *) &m32; const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); #pragma unroll for (int l = 0; l < int(sizeof(int)); ++l) { x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); } } } #else #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; x_dm[i] = bxi->dm; } constexpr int rows_per_warp = warp_size / 4; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; const int * scales = (const int *) bxi->scales; const int ksc = threadIdx.x % (MMQ_TILE_NE_K/8); const int scales8 = unpack_scales_q45_K(scales, ksc); x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = scales8; } #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } template static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_K, mmq_y); const int * x_qs = (const int *) x; const half2 * x_dm = (const half2 *) x_qs + txs.qs; const int * x_sc = (const int *) x_dm + txs.dm; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR5_K*VDR_Q5_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const uint8_t * sc = ((const uint8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k00/32]) + 2*(k01/16); sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q5_K_q8_1_impl_mmq( &x_qs[i*(QR5_K*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, x_dm[i], &y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void load_tiles_q6_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); int * x_sc = (int *) (x_df + MMQ_TILE_NE_K/QI6_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q6_K, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR6_K); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride; const int ql = get_int_b2(bxi->ql, txi); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_b2(bxi->qh, (QI6_K/4) * (txi / (QI6_K/2)) + txi % (QI6_K/4)); const int qh0 = ((qh >> ((txi & 0x08) >> 2)) << 4) & 0x30303030; const int qh1 = (qh >> ((txi & 0x08) >> 2)) & 0x30303030; const int kq0 = 2*txi - txi % (QI6_K/2) + 0; const int kq1 = 2*txi - txi % (QI6_K/2) + QI6_K/2; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq1] = __vsubss4(ql1 | qh1, 0x20202020); #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q6_K] = bxi->d; #else x_df[i*(MMQ_TILE_NE_K/QI6_K) + i/QI6_K] = bxi->d; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int rows_per_warp = warp_size / 4; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (MMQ_TILE_NE_K/8)) / 4; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x%4] = get_int_b2(bxi->scales, threadIdx.x % (MMQ_TILE_NE_K/8)); #else x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + threadIdx.x%(MMQ_TILE_NE_K/8)] = get_int_b2(bxi->scales, threadIdx.x%(QI6_K/8)); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q6_K, mmq_y); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + txs.qs; const int * x_sc = (const int *) x_df + txs.dm; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; // #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR6_K*VDR_Q6_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int8_t * sc = ((const int8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k0/16]); sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q6_K_q8_1_impl_mmq( &x_qs[i*(QR6_K*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, x_df[i*(MMQ_TILE_NE_K/QI6_K) + i/QI6_K], &y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } template static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { #if defined(AMD_MFMA_AVAILABLE) constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 8, int, input_layout> tile_A; typedef tile<16, 8, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; typedef tile<64, 2, int, input_layout> tile_load; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * x_sc = (const int *) x_df + MMQ_TILE_NE_K/QI6_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + k0, MMQ_MMA_TILE_X_K_Q6_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B[1]; load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1] / 2; #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B[0]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); const int8_t * sc = (const int8_t *) (x_sc + i*MMQ_MMA_TILE_X_K_Q6_K + k00/16); sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * sc[k01/4] * x_df[i*MMQ_MMA_TILE_X_K_Q6_K] * dB; } } } } #elif defined(AMD_WMMA_AVAILABLE) //wmma instructions can handle 16x4 tiles, does not require loading 64x2 tiles constexpr data_layout input_layout = get_input_data_layout(); typedef tile<16, 4, int, input_layout> tile_A; typedef tile<16, 4, int, input_layout> tile_B; typedef tile<16, 16, int, DATA_LAYOUT_J_MAJOR> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * x_sc = (const int *) x_df + MMQ_TILE_NE_K/QI6_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * rows_per_warp; for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; tile_A A[ntx]; #pragma unroll for (int n = 0; n < ntx; ++n) { load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + k0, MMQ_MMA_TILE_X_K_Q6_K); } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { tile_B B; load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); const int j = j0 + tile_C::get_j(0); const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C; mma(C, A[n], B); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(l); const int8_t * sc = (const int8_t *) (x_sc + i*MMQ_MMA_TILE_X_K_Q6_K + k00/16); sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * sc[k01/4] * x_df[i*MMQ_MMA_TILE_X_K_Q6_K] * dB; } } } } #elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile< 8, 4, int> tile_B; typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * x_sc = (const int *) x_df + MMQ_TILE_NE_K/QI6_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const int i0 = (threadIdx.y / ntx) * (ntx*tile_A::I); tile_A A[ntx][8]; int scA[ntx][tile_C::ne/2][8]; float dA[ntx][tile_C::ne/2]; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/4 + 0], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + 0), MMQ_MMA_TILE_X_K_Q6_K); load_ldmatrix(A[n][k01/4 + 1], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + tile_A::J), MMQ_MMA_TILE_X_K_Q6_K); } #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 16) { const int k0 = k00 + k01; #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); const int sc_packed = x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + k0/16]; const int8_t * sc = (const int8_t *) &sc_packed; #pragma unroll for (int ksc = 0; ksc < sizeof(int); ++ksc) { scA[n][l][k01/4 + ksc] = sc[ksc]; } } } #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); dA[n][l] = x_df[i*MMQ_MMA_TILE_X_K_Q6_K]; } } #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { float tmp[ntx][tile_C::ne] = {{0.0f}}; #pragma unroll for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { tile_B B[2]; float dB[tile_C::ne/2]; // Here load_generic is faster than load_ldmatrix. load_generic(B[0], y_qs + j0*MMQ_TILE_Y_K + 0 + k01, MMQ_TILE_Y_K); load_generic(B[1], y_qs + j0*MMQ_TILE_Y_K + tile_B::J + k01, MMQ_TILE_Y_K); #pragma unroll for (int l = 0; l < tile_C::ne/2; ++l) { const int j = j0 + tile_C::get_j(l); dB[l] = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; } #pragma unroll for (int n = 0; n < ntx; ++n) { tile_C C[2]; mma(C[0], A[n][k01/4 + 0], B[0]); mma(C[1], A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { tmp[n][l] += (C[0].x[l]*scA[n][l/2][k01/4 + 0] + C[1].x[l]*scA[n][l/2][k01/4 + 1])*dB[l%2]; } } } #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp[n][l]*dA[n][l/2]; } } } #else GGML_UNUSED_VARS(x, y, sum, k00); NO_DEVICE_CODE; #endif // AMD_MFMA_AVAILABLE || AMD_WMMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_iq4_nl( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_NL, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_NL); constexpr int nrows = warp_size / threads_per_row; const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; const int kbx = txi / QI4_NL; const int kqsx = txi % QI4_NL; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbx; const int aux_q4 = get_int_b2(bxi->qs, kqsx); const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); const int k0 = kbx * (2 * QI4_NL) + kqsx; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + QI4_NL] = v.y; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + QI4_NL] = v.y; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_NL; constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbxd; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = __half2float(bxi->d); #else x_df[i*(MMQ_TILE_NE_K/QI4_NL) + i/QI4_NL + kbxd] = __half2float(bxi->d); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq2_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_XXS)) / 2; constexpr int nrows = warp_size / threads_per_row; const int kqsx = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq2_xxs * bxi = (const block_iq2_xxs *) x + kbx0 + i*stride; const int q2 = get_int_b2(bxi->qs, 2*kqsx+0); const uint8_t * aux8 = (const uint8_t *) &q2; const uint32_t aux32 = get_int_b2(bxi->qs, 2*kqsx+1); #pragma unroll for (int l = 0; l < QR2_XXS; ++l) { const int * grid_pos = (const int *) (iq2xxs_grid + aux8[l]); const int signs_packed = ksigns_iq2xs[(aux32 >> (7*l)) & 0x7F]; const int signs0 = __vcmpne4(((signs_packed & 0x03) << 7) | ((signs_packed & 0x0C) << 21), 0x00000000); const int grid0 = __vsub4(grid_pos[0] ^ signs0, signs0); const int signs1 = __vcmpne4(((signs_packed & 0x30) << 3) | ((signs_packed & 0xC0) << 17), 0x00000000); const int grid1 = __vsub4(grid_pos[1] ^ signs1, signs1); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid1; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid0; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid1; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int ls = aux32 >> 28; const float d = bxi->d; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/4; #else x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = (ls*d + d/2)/4; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq2_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_XS)) / 2; constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq2_xs * bxi = (const block_iq2_xs *) x + kbx0 + i*stride; const int2 q2_packed = make_int2(get_int_b2(bxi->qs, 2*kqsx+0), get_int_b2(bxi->qs, 2*kqsx+1)); const uint16_t * q2 = (const uint16_t *) &q2_packed; #pragma unroll for (int l = 0; l < QR2_XS; ++l) { const uint32_t * grid_pos = (const uint32_t *)(iq2xs_grid + (q2[l] & 0x000001FF)); const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9)); const int grid_l = __vsub4(grid_pos[0] ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos[1] ^ signs[1], signs[1]); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int ls = bxi->scales[kqsx]; const float d = bxi->d; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq2_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_S)) / 2; constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq2_s * bxi = (const block_iq2_s *) x + kbx0 + i*stride; const int qs_packed = get_int_b2(bxi->qs, kqsx); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bxi->qh[kqsx]; const int signs_packed_32 = get_int_b2(bxi->qs, QK_K/32 + kqsx); const uint8_t * signs_packed_8 = (const uint8_t *) &signs_packed_32; #pragma unroll for (int l = 0; l < QR2_S; ++l) { const int * grid_pos = (const int *)(iq2s_grid + (qs[l] | ((qh << (8-2*l)) & 0x300))); const int signs0 = __vcmpne4(((signs_packed_8[l] & 0x03) << 7) | ((signs_packed_8[l] & 0x0C) << 21), 0x00000000); const int signs1 = __vcmpne4(((signs_packed_8[l] & 0x30) << 3) | ((signs_packed_8[l] & 0xC0) << 17), 0x00000000); const int grid_l = __vsub4(grid_pos[0] ^ signs0, signs0); const int grid_h = __vsub4(grid_pos[1] ^ signs1, signs1); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int ls = bxi->scales[kqsx]; const float d = bxi->d; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq3_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR3_XXS)) / 2; constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq3_xxs * bxi = (const block_iq3_xxs *) x + kbx0 + i*stride; const int2 q3_packed = make_int2(get_int_b2(bxi->qs, 2*kqsx+0), get_int_b2(bxi->qs, 2*kqsx+1)); const uint8_t * q3 = (const uint8_t *) &q3_packed; const uint32_t aux32 = get_int_b2(bxi->qs, QK_K/16 + kqsx); #pragma unroll for (int l = 0; l < QR3_XXS; ++l) { const int2 grid_pos = make_int2(iq3xxs_grid[q3[2*l+0]], iq3xxs_grid[q3[2*l+1]]); const int * signs = (const int *)(ksigns64 + ((aux32 >> (7*l)) & 0x7F)); const int grid_l = __vsub4(grid_pos.x ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos.y ^ signs[1], signs[1]); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int ls = aux32 >> 28; const float d = bxi->d; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/2; #else x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = (ls*d + d/2)/2; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq3_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR3_S)) / 2; constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq3_s * bxi = (const block_iq3_s *) x + kbx0 + i*stride; const int2 qs_packed = make_int2(get_int_b2(bxi->qs, 2*kqsx+0), get_int_b2(bxi->qs, 2*kqsx+1)); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bxi->qh[kqsx]; const int signs_packed_32 = get_int_b2(bxi->signs, kqsx); const uint8_t * signs_packed_8 = (const uint8_t *) &signs_packed_32; #pragma unroll for (int l = 0; l < QR3_S; ++l) { const int2 grid_pos = make_int2( iq3s_grid[qs[2*l+0] | ((qh << (8 - 2*l)) & 0x100)], iq3s_grid[qs[2*l+1] | ((qh << (7 - 2*l)) & 0x100)]); const int signs0 = __vcmpne4(((signs_packed_8[l] & 0x03) << 7) | ((signs_packed_8[l] & 0x0C) << 21), 0x00000000); const int signs1 = __vcmpne4(((signs_packed_8[l] & 0x30) << 3) | ((signs_packed_8[l] & 0xC0) << 17), 0x00000000); const int grid_l = __vsub4(grid_pos.x ^ signs0, signs0); const int grid_h = __vsub4(grid_pos.y ^ signs1, signs1); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+1)] = grid_h; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+0)] = grid_l; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+1)] = grid_h; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const int ls = 1 + 2*((bxi->scales[kqsx/2] >> (((2*kqsx) << 1) & 0x04)) & 0x0F); const float d = bxi->d; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = ls*d; #else x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = ls*d; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq1_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; half2 * x_ds = (half2 *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; half2 * x_ds = (half2 *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR1_S); constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); } const block_iq1_s * bxi = (const block_iq1_s *) x + kbx0 + i*stride; const int qs_packed = get_int_b2(bxi->qs, kqsx); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bxi->qh[kqsx]; #pragma unroll for (int l = 0; l < QR1_S/2; ++l) { const int grid = iq1s_grid_gpu[qs[l] | (((qh >> (3*l)) & 0x07) << 8)]; const int grid0 = (grid >> 0) & 0x0F0F0F0F; const int grid1 = (grid >> 4) & 0x0F0F0F0F; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+1)] = grid1; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+0)] = grid0; x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+1)] = grid1; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } const float d1q = __half2float(bxi->d) * (((qh >> 11) & 0x0E) + 1); const float delta = -1.0f + IQ1S_DELTA - (qh & 0x8000) * (2.0f*IQ1S_DELTA/0x8000); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_ds[i*MMQ_MMA_TILE_X_K_Q8_1 + kqsx] = make_half2(d1q, d1q*delta); #else x_ds[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = make_half2(d1q, d1q*delta); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void load_tiles_iq4_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_XS); constexpr int nrows = warp_size / threads_per_row; const int kqsx = threadIdx.x % threads_per_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride; const int aux_q4 = get_int_b4(bxi->qs, kqsx); const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); const int k0 = 8 * (kqsx / 4) + kqsx % 4; #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y; #else x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 4] = v.y; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } constexpr int rows_per_warp = warp_size / 8; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / (MMQ_TILE_NE_K/4); if (need_check) { i = min(i, i_max); } const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride; const float d = __half2float(bxi->d); const int ls = ((bxi->scales_l[(threadIdx.x % 8)/2] >> (4*(threadIdx.x % 2))) & 0x0F) | (((bxi->scales_h >> (2*(threadIdx.x % 8))) & 0x03) << 4); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * (ls - 32); #else x_df[i*(MMQ_TILE_NE_K/4) + i/4 + threadIdx.x % 8] = d * (ls - 32); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) } } template static __device__ __forceinline__ void mmq_write_back_dp4a( const float * __restrict__ sum, const int32_t * __restrict__ ids_dst, float * __restrict__ dst, const int stride, const int i_max, const int j_max) { constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j > j_max) { return; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } dst[ids_dst[j]*stride + i] = sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } } template static __device__ __forceinline__ void mmq_write_back_mma( const float * __restrict__ sum, const int * __restrict__ ids_dst, float * __restrict__ dst, const int stride, const int i_max, const int j_max) { constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int nwarps = mmq_get_nwarps_device(); #if defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr int tileC_IJ = mmq_get_granularity_device(0); typedef tile tile_C; constexpr int rows_per_warp = granularity; #else typedef tile<16, 8, int> tile_C; constexpr int rows_per_warp = 2 * granularity; #endif // defined(AMD_MFMA_AVAILABLE) constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. const int i0 = (threadIdx.y / ntx) * (ntx*tile_C::I); #if defined(TURING_MMA_AVAILABLE) || defined(AMD_MFMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) static_assert(nwarps*tile_C::I == mmq_y, "nwarps*tile_C::I != mmq_y"); #else GGML_UNUSED(nwarps); #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { const int j = j0 + (threadIdx.y % ntx) * tile_C::J + tile_C::get_j(l); if (j > j_max) { continue; } const int i = i0 + n*tile_C::I + tile_C::get_i(l); if (need_check && i > i_max) { continue; } dst[ids_dst[j]*stride + i] = sum[(j0/tile_C::J + n)*tile_C::ne + l]; } } } } // ------------------------------------------------------------------------------------------------------------------------------------- template struct mmq_type_traits; template struct mmq_type_traits { static constexpr int vdr = VDR_Q4_0_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_0; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q4_1_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_1; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_1_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q5_0_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_0; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q5_1_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_1; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q8_0_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q8_0; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_MXFP4_Q8_1_MMQ; #ifdef BLACKWELL_MMA_AVAILABLE static constexpr load_tiles_mmq_t load_tiles = load_tiles_mxfp4_fp4; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_mxfp4_mxfp4_mma; #else static constexpr load_tiles_mmq_t load_tiles = load_tiles_mxfp4; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; #endif // BLACKWELL_MMA_AVAILABLE static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q2_K_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q2_K; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q2_K_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q2_K_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q3_K_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q3_K; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q3_K_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q4_K_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_K; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_K_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q5_K_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_K; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_K_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_Q6_K_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_q6_K; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q6_K_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_XXS_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xxs; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_XS_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xs; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_S_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_s; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ3_XXS_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_xxs; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ3_S_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_s; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ1_S_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq1_s; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ4_NL_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_nl; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template struct mmq_type_traits { static constexpr int vdr = VDR_IQ4_XS_Q8_1_MMQ; static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_xs; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; template static __device__ __forceinline__ void mul_mat_q_process_tile( const char * __restrict__ x, const int offset_x, const int * __restrict__ y, const int * __restrict__ ids_dst, float * __restrict__ dst, float * __restrict__ tmp_fixup, const int stride_row_x, const int ncols_y, const int stride_col_dst, const int tile_x_max_i, const int tile_y_max_j, const int kb0_start, const int kb0_stop) { constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr int nwarps = mmq_get_nwarps_device(); constexpr int qk = ggml_cuda_type_traits::qk; constexpr int mmq_y = get_mmq_y_device(); constexpr load_tiles_mmq_t load_tiles = mmq_type_traits::load_tiles; extern __shared__ int data_mul_mat_q[]; int * tile_y = data_mul_mat_q + mmq_x; int * tile_x = tile_y + GGML_PAD(mmq_x*MMQ_TILE_Y_K, nwarps*warp_size); #if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_mma; constexpr mmq_write_back_t write_back = mmq_write_back_mma; #else constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_dp4a; constexpr mmq_write_back_t write_back = mmq_write_back_dp4a; #endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) || defined(AMD_WMMA_AVAILABLE) #if defined(BLACKWELL_MMA_AVAILABLE) // FP4 tile stores 8 blocks constexpr int ne_block = (type == GGML_TYPE_MXFP4) ? 8 * QK_MXFP4 : 4 * QK8_1; #else constexpr int ne_block = 4 * QK8_1; #endif // defined(BLACKWELL_MMA_AVAILABLE) constexpr int ITER_K = get_iter_k(type); constexpr int blocks_per_iter = ITER_K / qk; float sum[mmq_x*mmq_y / (nwarps*warp_size)] = {0.0f}; constexpr int sz = sizeof(block_q8_1_mmq) / sizeof(int); for (int kb0 = kb0_start; kb0 < kb0_stop; kb0 += blocks_per_iter) { load_tiles(x, tile_x, offset_x + kb0, tile_x_max_i, stride_row_x); { const int * by0 = y + ncols_y * (kb0 * qk / ne_block) * sz; #pragma unroll for (int l0 = 0; l0 < mmq_x * MMQ_TILE_Y_K; l0 += nwarps * warp_size) { int l = l0 + threadIdx.y*warp_size + threadIdx.x; tile_y[l] = by0[l]; } } __syncthreads(); vec_dot(tile_x, tile_y, sum, 0); __syncthreads(); { const int * by0 = y + ncols_y * ((kb0 * qk / ne_block) * sz + sz); #pragma unroll for (int l0 = 0; l0 < mmq_x * MMQ_TILE_Y_K; l0 += nwarps * warp_size) { int l = l0 + threadIdx.y*warp_size + threadIdx.x; tile_y[l] = by0[l]; } } __syncthreads(); vec_dot(tile_x, tile_y, sum, MMQ_TILE_NE_K); __syncthreads(); } if (fixup) { write_back(sum, ids_dst, tmp_fixup + blockIdx.x*(mmq_x*mmq_y), mmq_y, mmq_y, mmq_x); } else { write_back(sum, ids_dst, dst, stride_col_dst, tile_x_max_i, tile_y_max_j); } } // The mul_mat_q kernel implements "stream-k" work partitioning as described in https://arxiv.org/abs/2301.03598 template #if defined(GGML_USE_HIP) #if defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 2) #endif // defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) #else #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 1) #else __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 2) #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) static __global__ void mul_mat_q( const char * __restrict__ x, const int * __restrict__ y, const int32_t * __restrict__ ids_dst, const int32_t * __restrict__ expert_bounds, float * __restrict__ dst, float * __restrict__ tmp_fixup, const int ncols_x, const int nrows_x, const int ncols_dst, const int stride_row_x, const int ncols_y, const int stride_col_dst, const int channel_ratio, const int nchannels_y, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const int sample_ratio, const int nsamples_y, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst, const int ncols_max) { // Skip unused template specializations for faster compilation: if (mmq_x > get_mmq_x_max_device() || mmq_x % mmq_get_granularity_device(mmq_x) != 0) { NO_DEVICE_CODE; return; } constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr int qk = ggml_cuda_type_traits::qk; constexpr int mmq_y = get_mmq_y_device(); const int ntx = (ncols_max + mmq_x - 1) / mmq_x; // Number of tiles x const int nty = (nrows_x + mmq_y - 1) / mmq_y; // Number of tiles y // Initialize the ids for writing back data with just the index. // For regular matrix multiplications this is never changed. // For MoE the correct indices are loaded from ids_dst. extern __shared__ int ids_dst_shared[]; // Stored at beginning of shared memory. #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { const int j = j0 + threadIdx.y*warp_size + threadIdx.x; if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } ids_dst_shared[j] = j; } __syncthreads(); // On non-CDNA AMD or old CUDA the performance with stream-k was worse, use conventional tiling instead: #if (defined(GGML_USE_HIP) && !defined(CDNA)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA { const int wt = blockIdx.z / nchannels_y; const int zt = blockIdx.z - wt*nchannels_y; const int jt = blockIdx.y; const int it = blockIdx.x; // Defaults for regular matrix multiplication: int col_low = 0; int col_high = ncols_dst; int col_diff = ncols_dst; int offset_y = wt*stride_sample_y + zt*stride_channel_y; int offset_dst = wt*stride_sample_dst + zt*stride_channel_dst + jt*mmq_x*stride_col_dst; if (ids_dst) { col_low = expert_bounds[zt + 0]; col_high = expert_bounds[zt + 1]; col_diff = col_high - col_low; offset_y = 0; offset_dst = 0; if (jt*mmq_x >= col_diff) { return; } // __syncthreads(); // There is no previous tile that could cause a race condition. #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { const int j = j0 + threadIdx.y*warp_size + threadIdx.x; if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } ids_dst_shared[j] = ids_dst[col_low + jt*mmq_x + j]; } __syncthreads(); } offset_y += (col_low + jt*mmq_x)*(sizeof(block_q8_1_mmq)/sizeof(int)); offset_dst += it*mmq_y; const int tile_x_max_i = nrows_x - it*mmq_y - 1; const int tile_y_max_j = col_diff - jt*mmq_x - 1; const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = false; mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, 0, ncols_x/qk); return; } #endif // (defined(GGML_USE_HIP) && !defined(CDNA3)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA constexpr int ITER_K = get_iter_k(type); const int64_t blocks_per_ne00 = ncols_x / qk; constexpr int blocks_per_iter = ITER_K / qk; // kbc == k block continuous, current index in continuous ijk space. int64_t kbc = (int64_t) blockIdx.x *nsamples_y*nchannels_y*ntx*nty*blocks_per_ne00 / gridDim.x; int64_t kbc_stop = (int64_t)(blockIdx.x + 1)*nsamples_y*nchannels_y*ntx*nty*blocks_per_ne00 / gridDim.x; kbc -= (kbc % blocks_per_ne00) % blocks_per_iter; kbc_stop -= (kbc_stop % blocks_per_ne00) % blocks_per_iter; // kb0 == k index when doing the matrix multiplication for an output tile. int kb0_start = kbc % blocks_per_ne00; int kb0_stop = min(blocks_per_ne00, kb0_start + kbc_stop - kbc); while (kbc < kbc_stop && kb0_stop == blocks_per_ne00) { int tmp = kbc; const int it = tmp / (nsamples_y*nchannels_y*ntx*blocks_per_ne00); tmp -= it * (nsamples_y*nchannels_y*ntx*blocks_per_ne00); const int wt = tmp / (nchannels_y*ntx*blocks_per_ne00); tmp -= wt * (nchannels_y*ntx*blocks_per_ne00); const int zt = tmp / (ntx*blocks_per_ne00); tmp -= zt * (ntx*blocks_per_ne00); const int jt = tmp / blocks_per_ne00; // Defaults for regular matrix multiplication: int col_low = 0; int col_high = ncols_dst; int col_diff = ncols_dst; int offset_y = wt*stride_sample_y + zt*stride_channel_y; int offset_dst = wt*stride_sample_dst + zt*stride_channel_dst + jt*mmq_x*stride_col_dst; if (ids_dst) { col_low = expert_bounds[zt + 0]; col_high = expert_bounds[zt + 1]; col_diff = col_high - col_low; offset_y = 0; offset_dst = 0; if (jt*mmq_x >= col_diff) { kbc += blocks_per_ne00; kbc -= kbc % blocks_per_ne00; kb0_start = 0; kb0_stop = min(blocks_per_ne00, kbc_stop - kbc); continue; } __syncthreads(); #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { const int j = j0 + threadIdx.y*warp_size + threadIdx.x; if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } ids_dst_shared[j] = ids_dst[col_low + jt*mmq_x + j]; } __syncthreads(); } offset_y += (col_low + jt * mmq_x) * (sizeof(block_q8_1_mmq) / sizeof(int)); offset_dst += it*mmq_y; const int tile_x_max_i = nrows_x - it*mmq_y - 1; const int tile_y_max_j = col_diff - jt*mmq_x - 1; const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); kbc += blocks_per_ne00; kbc -= kbc % blocks_per_ne00; kb0_start = 0; kb0_stop = min(blocks_per_ne00, kbc_stop - kbc); } if (kbc >= kbc_stop) { return; } int tmp = kbc; const int it = tmp / (nsamples_y*nchannels_y*ntx*blocks_per_ne00); tmp -= it * (nsamples_y*nchannels_y*ntx*blocks_per_ne00); const int wt = tmp / (nchannels_y*ntx*blocks_per_ne00); tmp -= wt * (nchannels_y*ntx*blocks_per_ne00); const int zt = tmp / (ntx*blocks_per_ne00); tmp -= zt * (ntx*blocks_per_ne00); const int jt = tmp / blocks_per_ne00; // Defaults for regular matrix multiplication: int col_low = 0; int col_high = ncols_dst; int col_diff = ncols_dst; int offset_y = wt*stride_sample_y + zt*stride_channel_y; int offset_dst = wt*stride_sample_dst + zt*stride_channel_dst + jt*mmq_x*stride_col_dst; if (ids_dst) { col_low = expert_bounds[zt + 0]; col_high = expert_bounds[zt + 1]; col_diff = col_high - col_low; offset_y = 0; offset_dst = 0; if (jt*mmq_x >= col_diff) { return; } // The memory layout for the fixup buffer is always contiguous, therefore reset ids: __syncthreads(); #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { const int j = j0 + threadIdx.y*warp_size + threadIdx.x; if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } ids_dst_shared[j] = j; } __syncthreads(); } offset_y += (col_low + jt * mmq_x) * (sizeof(block_q8_1_mmq) / sizeof(int)); offset_dst += it*mmq_y; const int tile_x_max_i = nrows_x - it*mmq_y - 1; const int tile_y_max_j = col_diff - jt*mmq_x - 1; const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); } template static __global__ void mul_mat_q_stream_k_fixup( const int32_t * ids_dst, const int32_t * expert_bounds, float * __restrict__ dst, const float * __restrict__ tmp_last_tile, const int ncols_x, const int nrows_x, const int ncols_dst, const int stride_col_dst, const int nchannels_y, const int stride_channel_dst, const int nsamples_y, const int stride_sample_dst, const int ncols_max) { constexpr int mmq_y = get_mmq_y_device(); constexpr int qk = ggml_cuda_type_traits::qk; constexpr int ITER_K = get_iter_k(type); constexpr int blocks_per_iter = ITER_K / qk; const int64_t blocks_per_ne00 = ncols_x / qk; constexpr int nwarps = mmq_get_nwarps_device(); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); float sum[mmq_x*mmq_y / (nwarps*warp_size)] = {0.0f}; const int ntx = (ncols_max + mmq_x - 1) / mmq_x; const int nty = (nrows_x + mmq_y - 1) / mmq_y; const int bidx0 = blockIdx.x; // kbc == k block continuous, current index in continuous ijk space. int64_t kbc0 = (int64_t) bidx0 *nsamples_y*nchannels_y*ntx*nty*blocks_per_ne00 / gridDim.x; int64_t kbc0_stop = (int64_t)(bidx0 + 1)*nsamples_y*nchannels_y*ntx*nty*blocks_per_ne00 / gridDim.x; kbc0 -= (kbc0 % blocks_per_ne00) % blocks_per_iter; kbc0_stop -= (kbc0_stop % blocks_per_ne00) % blocks_per_iter; const bool did_not_have_any_data = kbc0 == kbc0_stop; const bool wrote_beginning_of_tile = kbc0 % blocks_per_ne00 == 0; const bool did_not_write_last = kbc0/blocks_per_ne00 == kbc0_stop/blocks_per_ne00 && kbc0_stop % blocks_per_ne00 != 0; if (did_not_have_any_data || wrote_beginning_of_tile || did_not_write_last) { return; } bool any_fixup = false; // Iterate over previous blocks and sum up partial sums written to fixup buffer. // All CUDA blocks that get here must have a previous block that needs a fixup. int64_t bidx = bidx0 - 1; int64_t kbc_stop = kbc0; while(true) { int64_t kbc = bidx*nsamples_y*nchannels_y*ntx*nty*blocks_per_ne00 / gridDim.x; kbc -= (kbc % blocks_per_ne00) % blocks_per_iter; if (kbc == kbc_stop) { // Did not have any data. bidx--; kbc_stop = kbc; continue; } any_fixup = true; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size] += tmp_last_tile[bidx*(mmq_x*mmq_y) + j*mmq_y + i]; } } // If this block started in a previous tile we are done and don't need to combine additional partial results. if (kbc % blocks_per_ne00 == 0 || kbc/blocks_per_ne00 < kbc0/blocks_per_ne00) { break; } bidx--; kbc_stop = kbc; } if (!any_fixup) { return; } int tmp = kbc0; const int it = tmp / (nsamples_y*nchannels_y*ntx*blocks_per_ne00); tmp -= it * (nsamples_y*nchannels_y*ntx*blocks_per_ne00); const int wt = tmp / (nchannels_y*ntx*blocks_per_ne00); tmp -= wt * (nchannels_y*ntx*blocks_per_ne00); const int zt = tmp / (ntx*blocks_per_ne00); tmp -= zt * (ntx*blocks_per_ne00); const int jt = tmp / blocks_per_ne00; if (!ids_dst) { const int offset_dst = wt*stride_sample_dst + zt*stride_channel_dst + jt*mmq_x*stride_col_dst + it*mmq_y; dst += offset_dst; const int i_max = nrows_x - it*mmq_y - 1; const int j_max = ncols_dst - jt*mmq_x - 1; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j > j_max) { return; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } dst[j*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } return; } __shared__ int ids_dst_shared[mmq_x]; const int col_low = expert_bounds[zt + 0]; const int col_high = expert_bounds[zt + 1]; const int col_diff = col_high - col_low; for (int j = threadIdx.y*warp_size + threadIdx.x; j < mmq_x; j += nwarps*warp_size) { ids_dst_shared[j] = ids_dst[col_low + jt*mmq_x + j]; } __syncthreads(); const int offset_dst = it*mmq_y; dst += offset_dst; const int i_max = nrows_x - it*mmq_y - 1; const int j_max = col_diff - jt*mmq_x - 1; #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; if (j > j_max) { return; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } dst[ids_dst_shared[j]*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } } struct mmq_args { const char * x; ggml_type type_x; const int * y; const int32_t * ids_dst; const int32_t * expert_bounds; float * dst; int64_t ncols_x; int64_t nrows_x; int64_t ncols_dst; int64_t stride_row_x; int64_t ncols_y; int64_t nrows_dst; int64_t nchannels_x; int64_t nchannels_y; int64_t stride_channel_x; int64_t stride_channel_y; int64_t stride_channel_dst; int64_t nsamples_x; int64_t nsamples_y; int64_t stride_sample_x; int64_t stride_sample_y; int64_t stride_sample_dst; bool use_stream_k; int64_t ncols_max; }; template static size_t mmq_get_nbytes_shared(const int mmq_x, const int mmq_y, const int cc, const int warp_size, const int nwarps) { const tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(type, mmq_y); const int mmq_tile_x_k = mmq_get_mma_tile_x_k(type); const size_t nbs_ids = mmq_x*sizeof(int); const size_t nbs_x = (turing_mma_available(cc) || amd_mfma_available(cc) || amd_wmma_available(cc)) ? mmq_y*mmq_tile_x_k*sizeof(int) : txs.qs*sizeof(int) + txs.dm*sizeof(half2) + txs.sc*sizeof(int); const size_t nbs_y = mmq_x * (sizeof(block_q8_1_mmq)); return nbs_ids + nbs_x + GGML_PAD(nbs_y, nwarps*warp_size*sizeof(int)); } template static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & args, cudaStream_t stream) { const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const int nsm = ggml_cuda_info().devices[id].nsm; const int warp_size = ggml_cuda_info().devices[id].warp_size; const int nwarps = mmq_get_nwarps_host(cc, warp_size); const int mmq_y = get_mmq_y_host(cc); const dim3 block_dims(warp_size, nwarps, 1); const int nbytes_shared = mmq_get_nbytes_shared(mmq_x, mmq_y, cc, warp_size, nwarps); CUDA_SET_SHARED_MEMORY_LIMIT((mul_mat_q), nbytes_shared); CUDA_SET_SHARED_MEMORY_LIMIT((mul_mat_q), nbytes_shared); const int nty = (args.nrows_x + mmq_y - 1) / mmq_y; const int ntx = (args.ncols_max + mmq_x - 1) / mmq_x; const int ntzw = args.nchannels_y * args.nsamples_y; const dim3 block_nums_xy_tiling(nty, ntx, ntzw); GGML_ASSERT(args.nchannels_y % args.nchannels_x == 0); GGML_ASSERT(args.nsamples_y % args.nsamples_x == 0); const int channel_ratio = args.nchannels_y / args.nchannels_x; const int sample_ratio = args.nsamples_y / args.nsamples_x; if (!args.use_stream_k) { if (args.nrows_x % mmq_y == 0) { constexpr bool need_check = false; mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, nullptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, sample_ratio, args.nsamples_y, args.stride_sample_x, args.stride_sample_y, args.stride_sample_dst, args.ncols_max); } else { constexpr bool need_check = true; mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, nullptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, sample_ratio, args.nsamples_y, args.stride_sample_x, args.stride_sample_y, args.stride_sample_dst, args.ncols_max); } return; } const dim3 block_nums_stream_k(nsm, 1, 1); const bool fixup_needed = ntx*nty*ntzw % nsm != 0; ggml_cuda_pool & pool = ctx.pool(id); ggml_cuda_pool_alloc tmp_fixup(pool); if (fixup_needed) { tmp_fixup.alloc(block_nums_stream_k.x * mmq_x*mmq_y); } if (args.nrows_x % mmq_y == 0) { constexpr bool need_check = false; mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, sample_ratio, args.nsamples_y, args.stride_sample_x, args.stride_sample_y, args.stride_sample_dst, args.ncols_max); if (!fixup_needed) { return; } mul_mat_q_stream_k_fixup<<>> (args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.nrows_dst, args.nchannels_y, args.stride_channel_dst, args.nsamples_y, args.stride_sample_dst, args.ncols_max); } else { constexpr bool need_check = true; mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, sample_ratio, args.nsamples_y, args.stride_sample_x, args.stride_sample_y, args.stride_sample_dst, args.ncols_max); if (!fixup_needed) { return; } mul_mat_q_stream_k_fixup<<>> (args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.nrows_dst, args.nchannels_y, args.stride_channel_dst, args.nsamples_y, args.stride_sample_dst, args.ncols_max); } } template void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cudaStream_t stream) { const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const size_t smpbo = ggml_cuda_info().devices[id].smpbo; const int warp_size = ggml_cuda_info().devices[id].warp_size; const int nwarps = mmq_get_nwarps_host(cc, warp_size); const int mmq_x_max = get_mmq_x_max_host(cc); const int mmq_y = get_mmq_y_host(cc); int mmq_x_best = 0; int ntiles_x_best = INT_MAX; for (int mmq_x = 8; mmq_x <= mmq_x_max && ntiles_x_best > 1; mmq_x += 8) { const int granularity = mmq_get_granularity_host(mmq_x, cc); if (mmq_x % granularity != 0 || mmq_get_nbytes_shared(mmq_x, mmq_y, cc, warp_size, nwarps) > smpbo) { continue; } const int ntiles_x = (args.ncols_max + mmq_x - 1) / mmq_x; if (ntiles_x < ntiles_x_best) { mmq_x_best = mmq_x; ntiles_x_best = ntiles_x; } } switch (mmq_x_best) { case 8: launch_mul_mat_q(ctx, args, stream); break; case 16: launch_mul_mat_q(ctx, args, stream); break; case 24: launch_mul_mat_q(ctx, args, stream); break; case 32: launch_mul_mat_q(ctx, args, stream); break; case 40: launch_mul_mat_q(ctx, args, stream); break; case 48: launch_mul_mat_q(ctx, args, stream); break; case 56: launch_mul_mat_q(ctx, args, stream); break; case 64: launch_mul_mat_q(ctx, args, stream); break; case 72: launch_mul_mat_q(ctx, args, stream); break; case 80: launch_mul_mat_q(ctx, args, stream); break; case 88: launch_mul_mat_q(ctx, args, stream); break; case 96: launch_mul_mat_q(ctx, args, stream); break; case 104: launch_mul_mat_q(ctx, args, stream); break; case 112: launch_mul_mat_q(ctx, args, stream); break; case 120: launch_mul_mat_q(ctx, args, stream); break; case 128: launch_mul_mat_q(ctx, args, stream); break; default: fprintf(stderr, "mmq_x_best=%d\n", mmq_x_best); GGML_ABORT("fatal error"); break; } } #define DECL_MMQ_CASE(type) \ template void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cudaStream_t stream) \ extern DECL_MMQ_CASE(GGML_TYPE_Q4_0); extern DECL_MMQ_CASE(GGML_TYPE_Q4_1); extern DECL_MMQ_CASE(GGML_TYPE_Q5_0); extern DECL_MMQ_CASE(GGML_TYPE_Q5_1); extern DECL_MMQ_CASE(GGML_TYPE_Q8_0); extern DECL_MMQ_CASE(GGML_TYPE_MXFP4); extern DECL_MMQ_CASE(GGML_TYPE_Q2_K); extern DECL_MMQ_CASE(GGML_TYPE_Q3_K); extern DECL_MMQ_CASE(GGML_TYPE_Q4_K); extern DECL_MMQ_CASE(GGML_TYPE_Q5_K); extern DECL_MMQ_CASE(GGML_TYPE_Q6_K); extern DECL_MMQ_CASE(GGML_TYPE_IQ2_XXS); extern DECL_MMQ_CASE(GGML_TYPE_IQ2_XS); extern DECL_MMQ_CASE(GGML_TYPE_IQ2_S); extern DECL_MMQ_CASE(GGML_TYPE_IQ3_XXS); extern DECL_MMQ_CASE(GGML_TYPE_IQ3_S); extern DECL_MMQ_CASE(GGML_TYPE_IQ1_S); extern DECL_MMQ_CASE(GGML_TYPE_IQ4_NL); extern DECL_MMQ_CASE(GGML_TYPE_IQ4_XS); // ------------------------------------------------------------------------------------------------------------------------- void ggml_cuda_mul_mat_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); void ggml_cuda_op_mul_mat_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11, int64_t n_experts); ggml-org-ggml-3678254/src/ggml-cuda/mmvf.cu000066400000000000000000001075571512524704700203300ustar00rootroot00000000000000#include "ggml.h" #include "common.cuh" #include "unary.cuh" #include "mmvf.cuh" #include "convert.cuh" template static __global__ void mul_mat_vec_f( const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, const ggml_cuda_mm_fusion_args_device fusion, float * __restrict__ dst, const int ncols2, const int nchannels_y, const int stride_row, const int stride_col_y2, const int stride_col_dst, const uint3 channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const uint3 sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst) { const int row = blockIdx.x; const int channel_dst = blockIdx.y; const int channel_x = ids ? ids[channel_dst] : fastdiv((uint32_t) channel_dst, channel_ratio); const int channel_y = ids ? channel_dst % nchannels_y : channel_dst; const int sample_dst = blockIdx.z; const int sample_x = fastdiv((uint32_t) sample_dst, sample_ratio); const int sample_y = sample_dst; const int tid = threadIdx.x; constexpr int warp_size = ggml_cuda_get_physical_warp_size(); x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row*stride_row; y += int64_t(sample_y) *stride_sample_y + channel_y *stride_channel_y; dst += int64_t(sample_dst)*stride_sample_dst + channel_dst*stride_channel_dst; bool use_gate = false; bool use_bias = false; bool use_gate_bias = false; ggml_glu_op glu_op = ggml_glu_op::GGML_GLU_OP_SWIGLU; const T * gate_x = nullptr; const float * x_bias = nullptr; const float * gate_bias = nullptr; if constexpr (has_fusion) { use_gate = fusion.gate != nullptr; use_bias = fusion.x_bias != nullptr; use_gate_bias = fusion.gate_bias != nullptr; glu_op = fusion.glu_op; if (use_gate) { gate_x = static_cast(fusion.gate); } if (use_bias) { x_bias = static_cast(fusion.x_bias); } if (use_gate_bias) { gate_bias = static_cast(fusion.gate_bias); use_gate_bias = use_gate; } else { use_gate_bias = false; } } if (use_gate) { gate_x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row*stride_row; } if constexpr (has_fusion) { const int channel_bias = ids ? channel_x : channel_dst; if (use_bias) { x_bias += int64_t(sample_dst)*stride_sample_dst + channel_bias*stride_channel_dst; } if (use_gate_bias) { gate_bias += int64_t(sample_dst)*stride_sample_dst + channel_bias*stride_channel_dst; } } const float2 * y2 = (const float2 *) y; extern __shared__ char data_mmv[]; float * buf_iw = (float *) data_mmv; float * buf_iw_gate = nullptr; if constexpr (has_fusion) { buf_iw_gate = (float *) (data_mmv + warp_size*sizeof(float)); } if (block_size > warp_size) { if (tid < warp_size) { buf_iw[tid] = 0.0f; if constexpr (has_fusion) { if (use_gate) { buf_iw_gate[tid] = 0.0f; } } } __syncthreads(); } float sumf[ncols_dst] = {0.0f}; float sumf_gate[ncols_dst]; if constexpr (has_fusion) { #pragma unroll for (int j = 0; j < ncols_dst; ++j) { sumf_gate[j] = 0.0f; } } if constexpr (std::is_same_v) { const float2 * x2 = (const float2 *) x; const float2 * gate_x2 = nullptr; if constexpr (has_fusion) { if (use_gate) { gate_x2 = (const float2 *) gate_x; } } for (int col2 = tid; col2 < ncols2; col2 += block_size) { const float2 tmpx = x2[col2]; float2 tmpx_gate = make_float2(0.0f, 0.0f); if constexpr (has_fusion) { if (use_gate) { tmpx_gate = gate_x2[col2]; } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { const float2 tmpy = y2[j*stride_col_y2 + col2]; ggml_cuda_mad(sumf[j], tmpx.x, tmpy.x); ggml_cuda_mad(sumf[j], tmpx.y, tmpy.y); if constexpr (has_fusion) { if (use_gate) { ggml_cuda_mad(sumf_gate[j], tmpx_gate.x, tmpy.x); ggml_cuda_mad(sumf_gate[j], tmpx_gate.y, tmpy.y); } } } } } else if constexpr (std::is_same_v) { const half2 * x2 = (const half2 *) x; const half2 * gate_x2 = nullptr; if constexpr (has_fusion) { if (use_gate) { gate_x2 = (const half2 *) gate_x; } } if (std::is_same_v) { for (int col2 = tid; col2 < ncols2; col2 += block_size) { const float2 tmpx = __half22float2(x2[col2]); float2 tmpx_gate = make_float2(0.0f, 0.0f); if constexpr (has_fusion) { if (use_gate) { tmpx_gate = __half22float2(gate_x2[col2]); } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { const float2 tmpy = y2[j*stride_col_y2 + col2]; ggml_cuda_mad(sumf[j], tmpx.x, tmpy.x); ggml_cuda_mad(sumf[j], tmpx.y, tmpy.y); if constexpr (has_fusion) { if (use_gate) { ggml_cuda_mad(sumf_gate[j], tmpx_gate.x, tmpy.x); ggml_cuda_mad(sumf_gate[j], tmpx_gate.y, tmpy.y); } } } } } else { #ifdef FP16_AVAILABLE half2 sumh2[ncols_dst] = {{0.0f, 0.0f}}; half2 sumh2_gate[ncols_dst] = {{0.0f, 0.0f}}; for (int col2 = tid; col2 < ncols2; col2 += block_size) { const half2 tmpx = x2[col2]; half2 tmpx_gate = make_half2(0.0f, 0.0f); if constexpr (has_fusion) { if (use_gate) { tmpx_gate = gate_x2[col2]; } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { const float2 tmpy = y2[j*stride_col_y2 + col2]; sumh2[j] += tmpx * make_half2(tmpy.x, tmpy.y); if constexpr (has_fusion) { if (use_gate) { sumh2_gate[j] += tmpx_gate * make_half2(tmpy.x, tmpy.y); } } } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { sumf[j] = __low2float(sumh2[j]) + __high2float(sumh2[j]); } if constexpr (has_fusion) { if (use_gate) { #pragma unroll for (int j = 0; j < ncols_dst; ++j) { sumf_gate[j] = __low2float(sumh2_gate[j]) + __high2float(sumh2_gate[j]); } } } #else NO_DEVICE_CODE; #endif // FP16_AVAILABLE } } else if constexpr (std::is_same_v) { //TODO: add support for ggml_cuda_mad for hip_bfloat162 #if defined(GGML_USE_HIP) const int * x2 = (const int *) x; const int * gate_x2 = nullptr; if constexpr (has_fusion) { if (use_gate) { gate_x2 = (const int *) gate_x; } } for (int col2 = tid; col2 < ncols2; col2 += block_size) { const int tmpx = x2[col2]; int tmpx_gate = 0; if constexpr (has_fusion) { if (use_gate) { tmpx_gate = gate_x2[col2]; } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { const float2 tmpy = y2[j*stride_col_y2 + col2]; const float tmpx0 = ggml_cuda_cast(reinterpret_cast(&tmpx)[0]); const float tmpx1 = ggml_cuda_cast(reinterpret_cast(&tmpx)[1]); ggml_cuda_mad(sumf[j], tmpx0, tmpy.x); ggml_cuda_mad(sumf[j], tmpx1, tmpy.y); if constexpr (has_fusion) { if (use_gate) { const float tmpx0_gate = ggml_cuda_cast(reinterpret_cast(&tmpx_gate)[0]); const float tmpx1_gate = ggml_cuda_cast(reinterpret_cast(&tmpx_gate)[1]); ggml_cuda_mad(sumf_gate[j], tmpx0_gate, tmpy.x); ggml_cuda_mad(sumf_gate[j], tmpx1_gate, tmpy.y); } } } } #else const nv_bfloat162 * x2 = (const nv_bfloat162 *) x; const nv_bfloat162 * gate_x2 = nullptr; if constexpr (has_fusion) { if (use_gate) { gate_x2 = (const nv_bfloat162 *) gate_x; } } for (int col2 = tid; col2 < ncols2; col2 += block_size) { const nv_bfloat162 tmpx = x2[col2]; nv_bfloat162 tmpx_gate; if constexpr (has_fusion) { if (use_gate) { tmpx_gate = gate_x2[col2]; } } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { const float2 tmpy = y2[j*stride_col_y2 + col2]; ggml_cuda_mad(sumf[j], tmpx.x, tmpy.x); ggml_cuda_mad(sumf[j], tmpx.y, tmpy.y); if constexpr (has_fusion) { if (use_gate) { ggml_cuda_mad(sumf_gate[j], tmpx_gate.x, tmpy.x); ggml_cuda_mad(sumf_gate[j], tmpx_gate.y, tmpy.y); } } } } #endif } else { static_assert(std::is_same_v, "unsupported type"); } #pragma unroll for (int j = 0; j < ncols_dst; ++j) { sumf[j] = warp_reduce_sum(sumf[j]); if constexpr (has_fusion) { if (use_gate) { sumf_gate[j] = warp_reduce_sum(sumf_gate[j]); } } if (block_size > warp_size) { buf_iw[tid/warp_size] = sumf[j]; if constexpr (has_fusion) { if (use_gate) { buf_iw_gate[tid/warp_size] = sumf_gate[j]; } } __syncthreads(); if (tid < warp_size) { sumf[j] = buf_iw[tid]; sumf[j] = warp_reduce_sum(sumf[j]); if constexpr (has_fusion) { if (use_gate) { sumf_gate[j] = buf_iw_gate[tid]; sumf_gate[j] = warp_reduce_sum(sumf_gate[j]); } } } if (j < ncols_dst) { __syncthreads(); } } } if (tid >= ncols_dst) { return; } float value = sumf[tid]; if constexpr (has_fusion) { if (use_bias) { value += x_bias[tid*stride_col_dst + row]; } if (use_gate) { float gate_value = sumf_gate[tid]; if (use_gate_bias) { gate_value += gate_bias[tid*stride_col_dst + row]; } switch (glu_op) { case GGML_GLU_OP_SWIGLU: value *= ggml_cuda_op_silu_single(gate_value); break; case GGML_GLU_OP_GEGLU: value *= ggml_cuda_op_gelu_single(gate_value); break; case GGML_GLU_OP_SWIGLU_OAI: { value = ggml_cuda_op_swiglu_oai_single(gate_value, value); break; } default: break; } } } dst[tid*stride_col_dst + row] = value; if constexpr (!has_fusion) { GGML_UNUSED_VARS(use_gate, use_bias, use_gate_bias, glu_op, gate_x, x_bias, gate_bias, sumf_gate); } } template static void mul_mat_vec_f_switch_fusion( const T * x, const float * y, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const uint3 channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const uint3 sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst, const dim3 & block_dims, const dim3 & block_nums, const int nbytes_shared, const cudaStream_t stream) { const bool has_fusion = fusion.gate != nullptr || fusion.x_bias != nullptr || fusion.gate_bias != nullptr; if constexpr (ncols_dst == 1) { if (has_fusion) { mul_mat_vec_f<<>> (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); return; } } GGML_ASSERT(!has_fusion && "fusion only supported for ncols_dst=1"); mul_mat_vec_f<<>> (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } template void launch_mul_mat_vec_f_cuda( const T * x, const float * y, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, cudaStream_t stream) { GGML_ASSERT(ncols % 2 == 0); GGML_ASSERT(stride_row % 2 == 0); GGML_ASSERT(stride_col_y % 2 == 0); GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); GGML_ASSERT( nsamples_dst % nsamples_x == 0); const uint3 channel_ratio_fd = ids ? make_uint3(0, 0, 0) : init_fastdiv_values(nchannels_dst / nchannels_x); const uint3 sample_ratio_fd = init_fastdiv_values(nsamples_dst / nsamples_x); const int device = ggml_cuda_get_device(); const int warp_size = ggml_cuda_info().devices[device].warp_size; int64_t block_size_best = warp_size; int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); int64_t max_block_size = 256; if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { max_block_size = 128; } for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); if (niter < niter_best) { niter_best = niter; block_size_best = block_size; } } const bool has_fusion = fusion.gate != nullptr || fusion.x_bias != nullptr || fusion.gate_bias != nullptr; const int nbytes_shared = warp_size*sizeof(float) + (has_fusion ? warp_size*sizeof(float) : 0); const dim3 block_nums(nrows, nchannels_dst, nsamples_dst); const dim3 block_dims(block_size_best, 1, 1); switch (block_size_best) { case 32: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 64: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 96: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 128: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 160: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 192: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 224: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; case 256: { mul_mat_vec_f_switch_fusion (x, y, ids, fusion, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, block_dims, block_nums, nbytes_shared, stream); } break; default: { GGML_ABORT("fatal error"); } break; } } template static void mul_mat_vec_f_cuda_switch_ncols_dst( const T * x, const float * y, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, cudaStream_t stream) { switch (ncols_dst) { case 1: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 2: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 3: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 4: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 5: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 6: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 7: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case 8: launch_mul_mat_vec_f_cuda (x, y, ids, fusion, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; default: GGML_ABORT("fatal error"); break; } } template static void mul_mat_vec_f_cuda( const T * x, const float * y, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, const int64_t stride_row, const int64_t stride_col_y, const int stride_col_dst, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, enum ggml_prec prec, cudaStream_t stream) { if constexpr(std::is_same_v) { if (prec == GGML_PREC_DEFAULT) { mul_mat_vec_f_cuda_switch_ncols_dst (x, y, ids, fusion, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); return; } } mul_mat_vec_f_cuda_switch_ncols_dst (x, y, ids, fusion, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); } void ggml_cuda_mul_mat_vec_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, const ggml_cuda_mm_fusion_args_host * fusion) { GGML_ASSERT( src1->type == GGML_TYPE_F32); GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_BINARY_OP_LOCALS; const size_t ts_src0 = ggml_type_size(src0->type); const size_t ts_src1 = ggml_type_size(src1->type); const size_t ts_dst = ggml_type_size(dst->type); GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. GGML_ASSERT(ne13 == ne3); GGML_ASSERT( nb00 == ts_src0); GGML_ASSERT( nb10 == ts_src1); GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); GGML_ASSERT( nb0 == ts_dst); const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; const float * src1_d = (const float *) src1->data; const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; float * dst_d = (float *) dst->data; ggml_cuda_mm_fusion_args_device fusion_local{}; if (fusion) { GGML_ASSERT( !ids || dst->ne[2] == 1); GGML_ASSERT( ids || dst->ne[1] == 1); if (fusion->x_bias) { GGML_ASSERT(fusion->x_bias->type == GGML_TYPE_F32); GGML_ASSERT(fusion->x_bias->ne[0] == dst->ne[0]); GGML_ASSERT(!ids || fusion->x_bias->ne[1] == src0->ne[2]); fusion_local.x_bias = fusion->x_bias->data; } if (fusion->gate) { GGML_ASSERT(fusion->gate->type == src0->type && ggml_are_same_stride(fusion->gate, src0)); fusion_local.gate = fusion->gate->data; } if (fusion->gate_bias) { GGML_ASSERT(fusion->gate_bias->type == GGML_TYPE_F32); GGML_ASSERT(fusion->gate_bias->ne[0] == dst->ne[0]); GGML_ASSERT(!ids || fusion->gate_bias->ne[1] == src0->ne[2]); fusion_local.gate_bias = fusion->gate_bias->data; } fusion_local.glu_op = fusion->glu_op; } const int64_t s01 = src0->nb[1] / ts_src0; const int64_t s11 = src1->nb[1] / ts_src1; const int64_t s1 = dst->nb[1] / ts_dst; const int64_t s02 = src0->nb[2] / ts_src0; const int64_t s12 = src1->nb[2] / ts_src1; const int64_t s2 = dst->nb[2] / ts_dst; const int64_t s03 = src0->nb[3] / ts_src0; const int64_t s13 = src1->nb[3] / ts_src1; const int64_t s3 = dst->nb[3] / ts_dst; // For MUL_MAT_ID the memory layout is different than for MUL_MAT: const int64_t ncols_dst = ids ? ne2 : ne1; const int64_t nchannels_y = ids ? ne11 : ne12; const int64_t nchannels_dst = ids ? ne1 : ne2; const int64_t stride_channel_dst = ids ? s1 : s2; const int64_t stride_channel_y = ids ? s11 : s12; GGML_ASSERT(!ids || ncols_dst == 1); switch (src0->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0->data; mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, fusion_local, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; case GGML_TYPE_F16: { const half * src0_d = (const half *) src0->data; mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, fusion_local, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; case GGML_TYPE_BF16: { const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data; mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, fusion_local, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; default: GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); } } void ggml_cuda_op_mul_mat_vec_f( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream) { GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); const int64_t ne00 = src0->ne[0]; const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; // ggml_cuda_op provides single, contiguous matrices const int64_t stride_row = ne00; const int64_t stride_col_y = ne10; const int64_t stride_col_dst = id == ctx.device ? ne0 : row_diff; // main device has larger memory buffer const int64_t nchannels_x = 1; const int64_t nchannels_y = 1; const int64_t nchannels_dst = 1; const int64_t stride_channel_x = 0; const int64_t stride_channel_y = 0; const int64_t stride_channel_dst = 0; const int64_t nsamples_x = 1; const int64_t nsamples_dst = 1; const int64_t stride_sample_x = 0; const int64_t stride_sample_y = 0; const int64_t stride_sample_dst = 0; ggml_cuda_mm_fusion_args_device empty{}; switch (src0->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0_dd_i; mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, empty, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; case GGML_TYPE_F16: { const half * src0_d = (const half *) src0_dd_i; mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, empty, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; case GGML_TYPE_BF16: { const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i; mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, empty, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; default: GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); } GGML_UNUSED_VARS(ctx, src1, dst, src1_ddq_i, src1_ncols, src1_padded_row_size); } bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, const size_t * src0_nb, int64_t ne11) { if (src0_ne[0] % 2 != 0) { return false; } const size_t ts = ggml_type_size(type); if (src0_nb[0] != ts) { return false; } // Pointers not aligned to the size of half2/nv_bfloat162/float2 would result in a crash: for (size_t i = 1; i < GGML_MAX_DIMS; ++i) { if (src0_nb[i] % (2*ts) != 0) { return false; } } switch (type) { case GGML_TYPE_F32: if (GGML_CUDA_CC_IS_NVIDIA(cc)) { if (ampere_mma_available(cc)) { return ne11 <= 3; } if (cc >= GGML_CUDA_CC_TURING) { return ne11 <= 4; } return ne11 <= 3; } else if (GGML_CUDA_CC_IS_AMD(cc)) { if (fp32_mma_hardware_available(cc)) { return ne11 <= 3; } return ne11 <= 8; } return ne11 <= 8; case GGML_TYPE_F16: if (GGML_CUDA_CC_IS_NVIDIA(cc)) { const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); if (ampere_mma_available(cc)) { return src0_small && ne11 == 1; } if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { return src0_small && ne11 <= 4; } if (fp16_mma_hardware_available(cc)) { return src0_small && ne11 <= 3; } return ne11 <= 8; } else if (GGML_CUDA_CC_IS_AMD(cc)) { if (fp16_mma_hardware_available(cc)) { if (GGML_CUDA_CC_IS_RDNA3(cc)) { return ne11 <= 3; } if (GGML_CUDA_CC_IS_RDNA4(cc)) { return ne11 <= 5; } return ne11 <= 2; } return ne11 <= 8; } return ne11 <= 8; case GGML_TYPE_BF16: if (GGML_CUDA_CC_IS_NVIDIA(cc)) { const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); if (ampere_mma_available(cc)) { return src0_small && ne11 == 1; } if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { return src0_small && ne11 <= 4; } if (bf16_mma_hardware_available(cc)) { return src0_small && ne11 <= 3; } return ne11 <= 8; } else if (GGML_CUDA_CC_IS_AMD(cc)) { if (bf16_mma_hardware_available(cc)) { return ne11 <= 3; } return ne11 <= 8; } return ne11 <= 8; default: return false; } } ggml-org-ggml-3678254/src/ggml-cuda/mmvf.cuh000066400000000000000000000013511512524704700204610ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_mul_mat_vec_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, const ggml_cuda_mm_fusion_args_host * fusion = nullptr); void ggml_cuda_op_mul_mat_vec_f( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, const size_t * src0_nb, int64_t ne11); ggml-org-ggml-3678254/src/ggml-cuda/mmvq.cu000066400000000000000000001113151512524704700203260ustar00rootroot00000000000000#include "mmvq.cuh" #include "quantize.cuh" #include "unary.cuh" #include "vecdotq.cuh" #include typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs); static constexpr __device__ vec_dot_q_cuda_t get_vec_dot_q_cuda(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: return vec_dot_q4_0_q8_1; case GGML_TYPE_Q4_1: return vec_dot_q4_1_q8_1; case GGML_TYPE_Q5_0: return vec_dot_q5_0_q8_1; case GGML_TYPE_Q5_1: return vec_dot_q5_1_q8_1; case GGML_TYPE_Q8_0: return vec_dot_q8_0_q8_1; case GGML_TYPE_MXFP4: return vec_dot_mxfp4_q8_1; case GGML_TYPE_Q2_K: return vec_dot_q2_K_q8_1; case GGML_TYPE_Q3_K: return vec_dot_q3_K_q8_1; case GGML_TYPE_Q4_K: return vec_dot_q4_K_q8_1; case GGML_TYPE_Q5_K: return vec_dot_q5_K_q8_1; case GGML_TYPE_Q6_K: return vec_dot_q6_K_q8_1; case GGML_TYPE_IQ2_XXS: return vec_dot_iq2_xxs_q8_1; case GGML_TYPE_IQ2_XS: return vec_dot_iq2_xs_q8_1; case GGML_TYPE_IQ2_S: return vec_dot_iq2_s_q8_1; case GGML_TYPE_IQ3_XXS: return vec_dot_iq3_xxs_q8_1; case GGML_TYPE_IQ1_S: return vec_dot_iq1_s_q8_1; case GGML_TYPE_IQ1_M: return vec_dot_iq1_m_q8_1; case GGML_TYPE_IQ4_NL: return vec_dot_iq4_nl_q8_1; case GGML_TYPE_IQ4_XS: return vec_dot_iq4_xs_q8_1; case GGML_TYPE_IQ3_S: return vec_dot_iq3_s_q8_1; default: return nullptr; } } static constexpr __device__ int get_vdr_mmvq(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: return VDR_Q4_0_Q8_1_MMVQ; case GGML_TYPE_Q4_1: return VDR_Q4_1_Q8_1_MMVQ; case GGML_TYPE_Q5_0: return VDR_Q5_0_Q8_1_MMVQ; case GGML_TYPE_Q5_1: return VDR_Q5_1_Q8_1_MMVQ; case GGML_TYPE_Q8_0: return VDR_Q8_0_Q8_1_MMVQ; case GGML_TYPE_MXFP4: return VDR_MXFP4_Q8_1_MMVQ; case GGML_TYPE_Q2_K: return VDR_Q2_K_Q8_1_MMVQ; case GGML_TYPE_Q3_K: return VDR_Q3_K_Q8_1_MMVQ; case GGML_TYPE_Q4_K: return VDR_Q4_K_Q8_1_MMVQ; case GGML_TYPE_Q5_K: return VDR_Q5_K_Q8_1_MMVQ; case GGML_TYPE_Q6_K: return VDR_Q6_K_Q8_1_MMVQ; case GGML_TYPE_IQ2_XXS: return VDR_IQ2_XXS_Q8_1_MMVQ; case GGML_TYPE_IQ2_XS: return VDR_IQ2_XS_Q8_1_MMVQ; case GGML_TYPE_IQ2_S: return VDR_IQ2_S_Q8_1_MMVQ; case GGML_TYPE_IQ3_XXS: return VDR_IQ3_XXS_Q8_1_MMVQ; case GGML_TYPE_IQ3_S: return VDR_IQ3_S_Q8_1_MMVQ; case GGML_TYPE_IQ4_NL: return VDR_IQ4_NL_Q8_1_MMVQ; case GGML_TYPE_IQ4_XS: return VDR_IQ4_XS_Q8_1_MMVQ; default: return 1; } } enum mmvq_parameter_table_id { MMVQ_PARAMETERS_GENERIC = 0, MMVQ_PARAMETERS_GCN, MMVQ_PARAMETERS_RDNA2 }; static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { #if defined(RDNA2) || defined(RDNA3) || defined(RDNA4) return MMVQ_PARAMETERS_RDNA2; #elif defined(GCN) || defined(CDNA) return MMVQ_PARAMETERS_GCN; #else return MMVQ_PARAMETERS_GENERIC; #endif } static __host__ mmvq_parameter_table_id get_device_table_id(int cc) { if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { return MMVQ_PARAMETERS_RDNA2; } if (GGML_CUDA_CC_IS_GCN(cc) || GGML_CUDA_CC_IS_CDNA(cc)) { return MMVQ_PARAMETERS_GCN; } return MMVQ_PARAMETERS_GENERIC; } static constexpr __host__ __device__ int calc_nwarps(int ncols_dst, mmvq_parameter_table_id table_id) { if (table_id == MMVQ_PARAMETERS_GENERIC) { switch (ncols_dst) { case 1: case 2: case 3: case 4: return 4; case 5: case 6: case 7: case 8: return 2; default: return 1; } } else if (table_id == MMVQ_PARAMETERS_GCN) { switch (ncols_dst) { case 1: case 2: case 3: case 4: return 2; case 5: case 6: case 7: case 8: default: return 1; } } return 1; } static constexpr __host__ __device__ int calc_rows_per_block(int ncols_dst, int table_id) { if (table_id == MMVQ_PARAMETERS_GENERIC || table_id == MMVQ_PARAMETERS_GCN) { switch (ncols_dst) { case 1: return 1; case 2: case 3: case 4: case 5: case 6: case 7: case 8: return 2; default: return 1; } } return 1; } // tell the compiler to use as many registers as it wants, see nwarps definition below template __launch_bounds__(calc_nwarps(ncols_dst, get_device_table_id())*ggml_cuda_get_physical_warp_size(), 1) static __global__ void mul_mat_vec_q( const void * __restrict__ vx, const void * __restrict__ vy, const int32_t * __restrict__ ids, const ggml_cuda_mm_fusion_args_device fusion, float * __restrict__ dst, const uint32_t ncols_x, const uint3 nchannels_y, const uint32_t stride_row_x, const uint32_t stride_col_y, const uint32_t stride_col_dst, const uint3 channel_ratio, const uint32_t stride_channel_x, const uint32_t stride_channel_y, const uint32_t stride_channel_dst, const uint3 sample_ratio, const uint32_t stride_sample_x, const uint32_t stride_sample_y, const uint32_t stride_sample_dst) { constexpr int qk = ggml_cuda_type_traits::qk; constexpr int qi = ggml_cuda_type_traits::qi; constexpr int vdr = get_vdr_mmvq(type); constexpr mmvq_parameter_table_id table_id = get_device_table_id(); constexpr int nwarps = calc_nwarps(ncols_dst, table_id); constexpr int rows_per_cuda_block = calc_rows_per_block(ncols_dst, table_id); constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr vec_dot_q_cuda_t vec_dot_q_cuda = get_vec_dot_q_cuda(type); const int tid = warp_size*threadIdx.y + threadIdx.x; const int row0 = rows_per_cuda_block*blockIdx.x; const int blocks_per_row_x = ncols_x / qk; constexpr int blocks_per_iter = vdr * nwarps*warp_size / qi; // The MUL_MAT_ID code path with ids != nullptr is only implemented for ncols_dst == 1. const uint32_t channel_dst = blockIdx.y; const uint32_t channel_x = ncols_dst == 1 && ids ? ids[channel_dst] : fastdiv(channel_dst, channel_ratio); const uint32_t channel_y = ncols_dst == 1 && ids ? fastmodulo(channel_dst, nchannels_y) : channel_dst; const uint32_t sample_dst = blockIdx.z; const uint32_t sample_x = fastdiv(sample_dst, sample_ratio); const uint32_t sample_y = sample_dst; bool use_gate = false; bool use_bias = false; bool use_gate_bias = false; const void * vgate = nullptr; const float * x_bias = nullptr; const float * gate_bias = nullptr; ggml_glu_op active_glu; if constexpr (has_fusion) { use_gate = fusion.gate != nullptr; use_bias = fusion.x_bias != nullptr; use_gate_bias = fusion.gate_bias != nullptr && use_gate; vgate = fusion.gate; x_bias = (const float *) fusion.x_bias; gate_bias = (const float *) fusion.gate_bias; active_glu = fusion.glu_op; } const uint32_t channel_bias = ids ? channel_x : channel_dst; float x_biases[ncols_dst] = { 0.0f }; float gate_biases[ncols_dst] = { 0.0f }; if constexpr (has_fusion) { if (use_bias) { x_bias = x_bias + sample_dst*stride_sample_dst + channel_bias*stride_channel_dst + row0; // 1. Hide latency by prefetching bias and gate here // 2. load only on threads that won't die after partial sum calculation if (threadIdx.x < rows_per_cuda_block && threadIdx.y == 0 && (rows_per_cuda_block == 1 || uint32_t(row0 + threadIdx.x) < stride_col_dst)) { #pragma unroll for (int j = 0; j < ncols_dst; ++j) { x_biases[j] = x_bias[j * stride_col_dst + threadIdx.x]; } } } if (use_gate_bias) { gate_bias = gate_bias + sample_dst*stride_sample_dst + channel_bias*stride_channel_dst + row0; if (threadIdx.x < rows_per_cuda_block && threadIdx.y == 0 && (rows_per_cuda_block == 1 || uint32_t(row0 + threadIdx.x) < stride_col_dst)) { #pragma unroll for (int j = 0; j < ncols_dst; ++j) { gate_biases[j] = gate_bias[j * stride_col_dst + threadIdx.x]; } } } } // partial sum for each thread float tmp[ncols_dst][rows_per_cuda_block] = {{0.0f}}; float tmp_gate[ncols_dst][rows_per_cuda_block] = {{0.0f}}; const block_q8_1 * y = ((const block_q8_1 *) vy) + sample_y*stride_sample_y + channel_y*stride_channel_y; const int kbx_offset = sample_x*stride_sample_x + channel_x*stride_channel_x + row0*stride_row_x; for (int kbx = tid / (qi/vdr); kbx < blocks_per_row_x; kbx += blocks_per_iter) { const int kby = kbx * (qk/QK8_1); // y block index that aligns with kbx // x block quant index when casting the quants to int const int kqs = vdr * (tid % (qi/vdr)); #pragma unroll for (int j = 0; j < ncols_dst; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp[j][i] += vec_dot_q_cuda( vx, &y[j*stride_col_y + kby], kbx_offset + i*stride_row_x + kbx, kqs); if constexpr (has_fusion) { if (use_gate) { tmp_gate[j][i] += vec_dot_q_cuda( vgate, &y[j*stride_col_y + kby], kbx_offset + i*stride_row_x + kbx, kqs); } } } } } __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_dst][rows_per_cuda_block][warp_size]; __shared__ float tmp_shared_gate[(has_fusion && (nwarps-1 > 0)) ? nwarps-1 : 1][ncols_dst][rows_per_cuda_block][warp_size]; if constexpr (!has_fusion) { (void) tmp_shared_gate; } else if (!use_gate) { (void) tmp_shared_gate; } if (threadIdx.y > 0) { #pragma unroll for (int j = 0; j < ncols_dst; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp_shared[threadIdx.y-1][j][i][threadIdx.x] = tmp[j][i]; if constexpr (has_fusion) { if (use_gate) { tmp_shared_gate[threadIdx.y-1][j][i][threadIdx.x] = tmp_gate[j][i]; } } } } } __syncthreads(); if (threadIdx.y > 0) { return; } dst += sample_dst*stride_sample_dst + channel_dst*stride_channel_dst + row0; // sum up partial sums and write back result #pragma unroll for (int j = 0; j < ncols_dst; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { #pragma unroll for (int l = 0; l < nwarps-1; ++l) { tmp[j][i] += tmp_shared[l][j][i][threadIdx.x]; if constexpr (has_fusion) { if (use_gate) { tmp_gate[j][i] += tmp_shared_gate[l][j][i][threadIdx.x]; } } } tmp[j][i] = warp_reduce_sum(tmp[j][i]); if constexpr (has_fusion) { if (use_gate) { tmp_gate[j][i] = warp_reduce_sum(tmp_gate[j][i]); } } } if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || uint32_t(row0 + threadIdx.x) < stride_col_dst)) { float result = tmp[j][threadIdx.x]; if constexpr (has_fusion) { if (use_bias) { result += x_biases[j]; } if (use_gate) { float gate_value = tmp_gate[j][threadIdx.x]; if (use_gate_bias) { gate_value += gate_biases[j]; } switch (active_glu) { case GGML_GLU_OP_SWIGLU: result *= ggml_cuda_op_silu_single(gate_value); break; case GGML_GLU_OP_GEGLU: result *= ggml_cuda_op_gelu_single(gate_value); break; case GGML_GLU_OP_SWIGLU_OAI: { result = ggml_cuda_op_swiglu_oai_single(gate_value, result); break; } default: result = result * gate_value; break; } } } dst[j*stride_col_dst + threadIdx.x] = result; } } if constexpr (!has_fusion) { GGML_UNUSED_VARS(use_gate, use_bias, use_gate_bias, active_glu, gate_bias, x_bias, tmp_gate); } } static std::pair calc_launch_params( const int ncols_dst, const int nrows_x, const int nchannels_y, const int nsamples_y, const int warp_size, const mmvq_parameter_table_id table_id) { const int64_t nblocks = (nrows_x + calc_rows_per_block(ncols_dst, table_id) - 1) / calc_rows_per_block(ncols_dst, table_id); const dim3 block_nums(nblocks, nchannels_y, nsamples_y); const dim3 block_dims(warp_size, calc_nwarps(ncols_dst, table_id), 1); return {block_nums, block_dims}; } template static void mul_mat_vec_q_switch_fusion( const void * vx, const void * vy, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const uint32_t ncols_x, const uint3 nchannels_y, const uint32_t stride_row_x, const uint32_t stride_col_y, const uint32_t stride_col_dst, const uint3 channel_ratio, const uint32_t stride_channel_x, const uint32_t stride_channel_y, const uint32_t stride_channel_dst, const uint3 sample_ratio, const uint32_t stride_sample_x, const uint32_t stride_sample_y, const uint32_t stride_sample_dst, const dim3 & block_nums, const dim3 & block_dims, const int nbytes_shared, cudaStream_t stream) { const bool has_fusion = fusion.gate != nullptr || fusion.x_bias != nullptr || fusion.gate_bias != nullptr; if constexpr (c_ncols_dst == 1) { if (has_fusion) { mul_mat_vec_q<<>> (vx, vy, ids, fusion, dst, ncols_x, nchannels_y, stride_row_x, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); return; } } GGML_ASSERT(!has_fusion && "fusion only supported for ncols_dst=1"); mul_mat_vec_q<<>> (vx, vy, ids, fusion, dst, ncols_x, nchannels_y, stride_row_x, stride_col_y, stride_col_dst, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } template static void mul_mat_vec_q_switch_ncols_dst( const void * vx, const void * vy, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int ncols_x, const int nrows_x, const int ncols_dst, const int stride_row_x, const int stride_col_y, const int stride_col_dst, const int nchannels_x, const int nchannels_y, const int nchannels_dst, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const int nsamples_x, const int nsamples_dst, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst, cudaStream_t stream) { GGML_ASSERT(ncols_x % ggml_blck_size(type) == 0); GGML_ASSERT(ncols_dst <= MMVQ_MAX_BATCH_SIZE); const uint3 nchannels_y_fd = ids ? init_fastdiv_values(nchannels_y) : make_uint3(0, 0, 0); const uint3 channel_ratio_fd = ids ? make_uint3(0, 0, 0) : init_fastdiv_values(nchannels_dst / nchannels_x); const uint3 sample_ratio_fd = init_fastdiv_values(nsamples_dst / nsamples_x); const int device = ggml_cuda_get_device(); const int warp_size = ggml_cuda_info().devices[device].warp_size; const mmvq_parameter_table_id table_id = get_device_table_id(ggml_cuda_info().devices[device].cc); const bool has_fusion = fusion.gate != nullptr || fusion.x_bias != nullptr || fusion.gate_bias != nullptr; GGML_ASSERT(!ids || ncols_dst == 1); switch (ncols_dst) { case 1: { constexpr int c_ncols_dst = 1; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 2: { constexpr int c_ncols_dst = 2; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 3: { constexpr int c_ncols_dst = 3; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 4: { constexpr int c_ncols_dst = 4; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 5: { constexpr int c_ncols_dst = 5; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 6: { constexpr int c_ncols_dst = 6; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 7: { constexpr int c_ncols_dst = 7; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; case 8: { constexpr int c_ncols_dst = 8; std::pair dims = calc_launch_params(c_ncols_dst, nrows_x, nchannels_dst, nsamples_dst, warp_size, table_id); mul_mat_vec_q_switch_fusion(vx, vy, ids, fusion, dst, ncols_x, nchannels_y_fd, stride_row_x, stride_col_y, stride_col_dst, channel_ratio_fd, stride_channel_x, stride_channel_y, stride_channel_dst, sample_ratio_fd, stride_sample_x, stride_sample_y, stride_sample_dst, dims.first, dims.second, 0, stream); } break; default: GGML_ABORT("fatal error"); break; } GGML_UNUSED(has_fusion); } static void mul_mat_vec_q_switch_type( const void * vx, const ggml_type type_x, const void * vy, const int32_t * ids, const ggml_cuda_mm_fusion_args_device fusion, float * dst, const int ncols_x, const int nrows_x, const int ncols_dst, const int stride_row_x, const int stride_col_y, const int stride_col_dst, const int nchannels_x, const int nchannels_y, const int nchannels_dst, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, const int nsamples_x, const int nsamples_dst, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst, cudaStream_t stream) { switch (type_x) { case GGML_TYPE_Q4_0: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q4_1: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q5_0: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q5_1: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q8_0: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_MXFP4: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q2_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q3_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q4_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q5_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_Q6_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ2_XXS: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ2_XS: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ2_S: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ3_XXS: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ1_S: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ1_M: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ4_NL: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ4_XS: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; case GGML_TYPE_IQ3_S: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, fusion, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; default: GGML_ABORT("fatal error"); break; } } void ggml_cuda_mul_mat_vec_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, const ggml_cuda_mm_fusion_args_host * fusion) { GGML_ASSERT( src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); // Optional, used for batched GGML_MUL_MAT_ID. GGML_TENSOR_BINARY_OP_LOCALS; cudaStream_t stream = ctx.stream(); const size_t ts_src0 = ggml_type_size(src0->type); const size_t ts_src1 = ggml_type_size(src1->type); const size_t ts_dst = ggml_type_size(dst->type); GGML_ASSERT( nb00 == ts_src0); GGML_ASSERT( nb10 == ts_src1); GGML_ASSERT( nb0 == ts_dst); GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. const float * src1_d = (const float *) src1->data; const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; float * dst_d = (float *) dst->data; ggml_cuda_mm_fusion_args_device fusion_local{}; if (fusion) { GGML_ASSERT( !ids || dst->ne[2] == 1); GGML_ASSERT( ids || dst->ne[1] == 1); if (fusion->x_bias) { GGML_ASSERT(fusion->x_bias->type == GGML_TYPE_F32); GGML_ASSERT(fusion->x_bias->ne[0] == dst->ne[0]); GGML_ASSERT(!ids || fusion->x_bias->ne[1] == src0->ne[2]); fusion_local.x_bias = fusion->x_bias->data; } if (fusion->gate) { GGML_ASSERT(fusion->gate->type == src0->type && ggml_are_same_stride(fusion->gate, src0)); fusion_local.gate = fusion->gate->data; } if (fusion->gate_bias) { GGML_ASSERT(fusion->gate_bias->type == GGML_TYPE_F32); GGML_ASSERT(fusion->gate_bias->ne[0] == dst->ne[0]); GGML_ASSERT(!ids || fusion->gate_bias->ne[1] == src0->ne[2]); fusion_local.gate_bias = fusion->gate_bias->data; } fusion_local.glu_op = fusion->glu_op; } // If src0 is a temporary compute buffer, clear any potential padding. if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) { const size_t size_data = ggml_nbytes(src0); const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0); if (size_alloc > size_data) { GGML_ASSERT(ggml_is_contiguously_allocated(src0)); GGML_ASSERT(!src0->view_src); CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream)); } } const int64_t ne10_padded = GGML_PAD(ne10, MATRIX_ROW_PADDING); ggml_cuda_pool_alloc src1_q8_1(ctx.pool(), ne13*ne12 * ne11*ne10_padded * sizeof(block_q8_1)/QK8_1); { const int64_t s11 = src1->nb[1] / ts_src1; const int64_t s12 = src1->nb[2] / ts_src1; const int64_t s13 = src1->nb[3] / ts_src1; quantize_row_q8_1_cuda(src1_d, nullptr, src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11, ne12, ne13, stream); } const int64_t s01 = src0->nb[1] / ts_src0; const int64_t s11 = ne10_padded / QK8_1; const int64_t s1 = dst->nb[1] / ts_dst; const int64_t s02 = src0->nb[2] / ts_src0; const int64_t s2 = dst->nb[2] / ts_dst; const int64_t s03 = src0->nb[3] / ts_src0; const int64_t s3 = dst->nb[3] / ts_dst; const int64_t s12 = ne11*s11; const int64_t s13 = ne12*s12; // For MUL_MAT_ID the memory layout is different than for MUL_MAT: const int64_t ncols_dst = ids ? ne2 : ne1; const int64_t nchannels_y = ids ? ne11 : ne12; const int64_t nchannels_dst = ids ? ne1 : ne2; const int64_t stride_col_dst = ids ? s2 : s1; const int64_t stride_col_y = ids ? s12 : s11; const int64_t stride_channel_dst = ids ? s1 : s2; const int64_t stride_channel_y = ids ? s11 : s12; mul_mat_vec_q_switch_type( src0->data, src0->type, src1_q8_1.get(), ids_d, fusion_local, dst_d, ne00, ne01, ncols_dst, s01, stride_col_y, stride_col_dst, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, stream); } void ggml_cuda_op_mul_mat_vec_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream) { const int64_t ne00 = src0->ne[0]; const int64_t row_diff = row_high - row_low; const int64_t ne10 = src1->ne[0]; GGML_ASSERT(ne10 % QK8_1 == 0); const int64_t ne0 = dst->ne[0]; int id = ggml_cuda_get_device(); // the main device has a larger memory buffer to hold the results from all GPUs // nrows_dst == nrows of the matrix that the kernel writes into const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff; const int stride_row_x = ne00 / ggml_blck_size(src0->type); const int stride_col_y = src1_padded_row_size / QK8_1; ggml_cuda_mm_fusion_args_device fusion_local{}; mul_mat_vec_q_switch_type( src0_dd_i, src0->type, src1_ddq_i, nullptr, fusion_local, dst_dd_i, ne00, row_diff, src1_ncols, stride_row_x, stride_col_y, nrows_dst, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, stream); GGML_UNUSED_VARS(src1, dst, src1_ddf_i, src1_ncols, src1_padded_row_size); } ggml-org-ggml-3678254/src/ggml-cuda/mmvq.cuh000066400000000000000000000012751512524704700205010ustar00rootroot00000000000000#include "common.cuh" #define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels. void ggml_cuda_mul_mat_vec_q(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, const ggml_cuda_mm_fusion_args_host * fusion = nullptr); void ggml_cuda_op_mul_mat_vec_q( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); ggml-org-ggml-3678254/src/ggml-cuda/norm.cu000066400000000000000000000721271512524704700203300ustar00rootroot00000000000000#include "norm.cuh" #include template static __global__ void norm_f32( const float * x, float * dst, const int ncols, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps) { const int nrows = gridDim.x; const int nchannels = gridDim.y; const int row = blockIdx.x; const int channel = blockIdx.y; const int sample = blockIdx.z; const int tid = threadIdx.x; x += sample*stride_sample + channel*stride_channel + row*stride_row; dst += ((sample*nchannels + channel)*nrows + row)*ncols; float2 mean_var = make_float2(0.0f, 0.0f); for (int col = tid; col < ncols; col += block_size) { const float xi = x[col]; mean_var.x += xi; mean_var.y += xi * xi; } // sum up partial sums mean_var = warp_reduce_sum(mean_var); if constexpr (block_size > WARP_SIZE) { static_assert(block_size == 1024, "unexpected block_size"); __shared__ float2 s_sum[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = mean_var; } __syncthreads(); mean_var = s_sum[lane_id]; mean_var = warp_reduce_sum(mean_var); } const float mean = mean_var.x / ncols; const float var = mean_var.y / ncols - mean * mean; const float inv_std = rsqrtf(var + eps); for (int col = tid; col < ncols; col += block_size) { dst[col] = (x[col] - mean) * inv_std; } } template static __global__ void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps) { // blockIdx.x: num_groups idx // threadIdx.x: block_size idx const int start = blockIdx.x*group_size + threadIdx.x; const int end = min(blockIdx.x*group_size + group_size, ne_elements); float tmp = 0.0f; // partial sum for thread in warp for (int j = start; j < end; j += block_size) { tmp += x[j]; } tmp = warp_reduce_sum(tmp); if constexpr (block_size > WARP_SIZE) { static_assert(block_size == 1024, "unexpected block_size"); __shared__ float s_sum[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = s_sum[lane_id]; tmp = warp_reduce_sum(tmp); } const float mean = tmp / group_size; tmp = 0.0f; for (int j = start; j < end; j += block_size) { const float xi = x[j] - mean; dst[j] = xi; tmp += xi * xi; } tmp = warp_reduce_sum(tmp); if (block_size > WARP_SIZE) { __shared__ float s_sum[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = s_sum[lane_id]; tmp = warp_reduce_sum(tmp); } const float variance = tmp / group_size; const float scale = rsqrtf(variance + eps); for (int j = start; j < end; j += block_size) { dst[j] *= scale; } } template static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, const float * mul = nullptr, const int64_t mul_stride_row = 0, const int64_t mul_stride_channel = 0, const int64_t mul_stride_sample = 0, const uint3 mul_ncols_packed = make_uint3(0, 0, 0), const uint3 mul_nrows_packed = make_uint3(0, 0, 0), const uint3 mul_nchannels_packed = make_uint3(0, 0, 0), const uint3 mul_nsamples_packed = make_uint3(0, 0, 0), const float * add = nullptr, const int64_t add_stride_row = 0, const int64_t add_stride_channel = 0, const int64_t add_stride_sample = 0, const uint3 add_ncols_packed = make_uint3(0, 0, 0), const uint3 add_nrows_packed = make_uint3(0, 0, 0), const uint3 add_nchannels_packed = make_uint3(0, 0, 0), const uint3 add_nsamples_packed = make_uint3(0, 0, 0)) { const int nrows = gridDim.x; const int nchannels = gridDim.y; const int row = blockIdx.x; const int channel = blockIdx.y; const int sample = blockIdx.z; const int tid = threadIdx.x; static_assert(!do_add || do_multiply, "fusing add is not supported without multiplying"); x += sample*stride_sample + channel*stride_channel + row*stride_row; dst += ((sample*nchannels + channel)*nrows + row)*ncols; if constexpr (do_multiply) { const uint32_t mul_row = fastmodulo(row, mul_nrows_packed); const uint32_t mul_channel = fastmodulo(channel, mul_nchannels_packed); const uint32_t mul_sample = fastmodulo(sample, mul_nsamples_packed); mul += mul_sample * mul_stride_sample + mul_channel * mul_stride_channel + mul_row * mul_stride_row; } if constexpr (do_add) { const int add_row = fastmodulo(row, add_nrows_packed); const int add_channel = fastmodulo(channel, add_nchannels_packed); const int add_sample = fastmodulo(sample, add_nsamples_packed); add += add_sample * add_stride_sample + add_channel * add_stride_channel + add_row * add_stride_row; } float tmp = 0.0f; // partial sum for thread in warp for (int col = tid; col < ncols; col += block_size) { const float xi = x[col]; tmp += xi * xi; } // sum up partial sums tmp = warp_reduce_sum(tmp); if constexpr (block_size > WARP_SIZE) { static_assert((block_size <= 1024) && (block_size % 32 == 0), "unexpected block_size"); __shared__ float s_sum[32]; const int warp_id = tid / WARP_SIZE; const int lane_id = tid % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = 0.0f; if (lane_id < (block_size / WARP_SIZE)) { tmp = s_sum[lane_id]; } tmp = warp_reduce_sum(tmp); } const float mean = tmp / ncols; const float scale = rsqrtf(mean + eps); for (int col = tid; col < ncols; col += block_size) { if constexpr (do_multiply && do_add) { const int mul_col = fastmodulo(col, mul_ncols_packed); const int add_col = fastmodulo(col, add_ncols_packed); dst[col] = scale * x[col] * mul[mul_col] + add[add_col]; } else if constexpr (do_multiply) { const int mul_col = fastmodulo(col, mul_ncols_packed); dst[col] = scale * x[col] * mul[mul_col]; } else { dst[col] = scale * x[col]; } } } template static __global__ void rms_norm_back_f32( const float * grad, const float * xf, float * dst, const int ncols, const float eps) { const int row = blockIdx.x*blockDim.y + threadIdx.y; const int tid = threadIdx.x; grad += int64_t(row)*ncols; xf += int64_t(row)*ncols; dst += int64_t(row)*ncols; float sum_xx = 0.0f; // sum for squares of x, equivalent to forward pass float sum_xg = 0.0f; // sum for x * gradient, needed because RMS norm mixes inputs for (int col = tid; col < ncols; col += block_size) { const float xfi = xf[col]; sum_xx += xfi * xfi; sum_xg += xfi * grad[col]; } // sum up partial sums sum_xx = warp_reduce_sum(sum_xx); sum_xg = warp_reduce_sum(sum_xg); if constexpr (block_size > WARP_SIZE) { static_assert(block_size == 1024, "unexpected block_size"); __shared__ float s_sum_xx[32]; __shared__ float s_sum_xg[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum_xx[warp_id] = sum_xx; s_sum_xg[warp_id] = sum_xg; } __syncthreads(); sum_xx = s_sum_xx[lane_id]; sum_xx = warp_reduce_sum(sum_xx); sum_xg = s_sum_xg[lane_id]; sum_xg = warp_reduce_sum(sum_xg); } const float mean_eps = sum_xx / ncols + eps; const float sum_eps = sum_xx + ncols*eps; const float scale_grad = rsqrtf(mean_eps); const float scale_x = -scale_grad * sum_xg/sum_eps; for (int col = tid; col < ncols; col += block_size) { dst[col] = scale_grad*grad[col] + scale_x*xf[col]; } } // template // static __global__ void l2_norm_f32(const float * x, float * dst, const int ncols, const float eps) { // const int row = blockIdx.x*blockDim.y + threadIdx.y; // const int tid = threadIdx.x; // float tmp = 0.0f; // partial sum for thread in warp // for (int col = tid; col < ncols; col += block_size) { // const float xi = x[row*ncols + col]; // tmp += xi * xi; // } // // sum up partial sums // tmp = warp_reduce_sum(tmp); // if (block_size > WARP_SIZE) { // __shared__ float s_sum[32]; // int warp_id = threadIdx.x / WARP_SIZE; // int lane_id = threadIdx.x % WARP_SIZE; // if (lane_id == 0) { // s_sum[warp_id] = tmp; // } // __syncthreads(); // tmp = s_sum[lane_id]; // tmp = warp_reduce_sum(tmp); // } // // from https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html // const float scale = rsqrtf(fmaxf(tmp, eps * eps)); // for (int col = tid; col < ncols; col += block_size) { // dst[row*ncols + col] = scale * x[row*ncols + col]; // } // } template static __global__ void l2_norm_f32( const float * x, float * dst, const int ncols, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps) { const int nrows = gridDim.x; const int nchannels = gridDim.y; const int row = blockIdx.x; const int channel = blockIdx.y; const int sample = blockIdx.z; const int tid = threadIdx.x; x += sample*stride_sample + channel*stride_channel + row*stride_row; dst += ((sample*nchannels + channel)*nrows + row)*ncols; float tmp = 0.0f; // partial sum for thread in warp for (int col = tid; col < ncols; col += block_size) { const float xi = x[col]; tmp += xi * xi; } // sum up partial sums tmp = warp_reduce_sum(tmp); if constexpr (block_size > WARP_SIZE) { static_assert(block_size == 1024, "unexpected block_size"); __shared__ float s_sum[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = s_sum[lane_id]; tmp = warp_reduce_sum(tmp); } // from https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html const float scale = rsqrtf(fmaxf(tmp, eps * eps)); for (int col = tid; col < ncols; col += block_size) { dst[col] = scale * x[col]; } } static void norm_f32_cuda( const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, cudaStream_t stream) { const dim3 blocks_num(nrows, nchannels, nsamples); if (ncols < 1024) { const dim3 block_dims(WARP_SIZE, 1, 1); norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } else { const dim3 block_dims(1024, 1, 1); norm_f32<1024><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } } static void group_norm_f32_cuda( const float * x, float * dst, const int num_groups, const float eps, const int group_size, const int ne_elements, cudaStream_t stream) { if (group_size < 1024) { const dim3 block_dims(WARP_SIZE, 1, 1); group_norm_f32<<>>(x, dst, group_size, ne_elements, eps); } else { const dim3 block_dims(1024, 1, 1); group_norm_f32<1024><<>>(x, dst, group_size, ne_elements, eps); } } static void rms_norm_f32_cuda( const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, cudaStream_t stream) { const dim3 blocks_num(nrows, nchannels, nsamples); if (ncols < 1024) { const dim3 block_dims(256, 1, 1); rms_norm_f32<256, false><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } else { const dim3 block_dims(1024, 1, 1); rms_norm_f32<1024, false><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } } static void rms_norm_mul_f32_cuda(const float * x, const float * mul, const float * add, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const int64_t mul_stride_row, const int64_t mul_stride_channel, const int64_t mul_stride_sample, const uint32_t mul_ncols, const uint32_t mul_nrows, const uint32_t mul_nchannels, const uint32_t mul_nsamples, const int64_t add_stride_row, const int64_t add_stride_channel, const int64_t add_stride_sample, const uint32_t add_ncols, const uint32_t add_nrows, const uint32_t add_nchannels, const uint32_t add_nsamples, const float eps, cudaStream_t stream) { const dim3 blocks_num(nrows, nchannels, nsamples); if (mul == nullptr) { rms_norm_f32_cuda(x, dst, ncols, nrows, nchannels, nsamples, stride_row, stride_channel, stride_sample, eps, stream); return; } if (add == nullptr) { const uint3 mul_ncols_packed = init_fastdiv_values(mul_ncols); const uint3 mul_nrows_packed = init_fastdiv_values(mul_nrows); const uint3 mul_nchannels_packed = init_fastdiv_values(mul_nchannels); const uint3 mul_nsamples_packed = init_fastdiv_values(mul_nsamples); if (ncols < 1024) { const dim3 block_dims(256, 1, 1); rms_norm_f32<256, true><<>>( x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols_packed, mul_nrows_packed, mul_nchannels_packed, mul_nsamples_packed); } else { const dim3 block_dims(1024, 1, 1); rms_norm_f32<1024, true><<>>( x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols_packed, mul_nrows_packed, mul_nchannels_packed, mul_nsamples_packed); } } else { const uint3 mul_ncols_packed = init_fastdiv_values(mul_ncols); const uint3 mul_nrows_packed = init_fastdiv_values(mul_nrows); const uint3 mul_nchannels_packed = init_fastdiv_values(mul_nchannels); const uint3 mul_nsamples_packed = init_fastdiv_values(mul_nsamples); const uint3 add_ncols_packed = init_fastdiv_values(add_ncols); const uint3 add_nrows_packed = init_fastdiv_values(add_nrows); const uint3 add_nchannels_packed = init_fastdiv_values(add_nchannels); const uint3 add_nsamples_packed = init_fastdiv_values(add_nsamples); if (ncols < 1024) { const dim3 block_dims(256, 1, 1); rms_norm_f32<256, true, true><<>>( x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols_packed, mul_nrows_packed, mul_nchannels_packed, mul_nsamples_packed, add, add_stride_row, add_stride_channel, add_stride_sample, add_ncols_packed, add_nrows_packed, add_nchannels_packed, add_nsamples_packed); } else { const dim3 block_dims(1024, 1, 1); rms_norm_f32<1024, true, true><<>>( x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols_packed, mul_nrows_packed, mul_nchannels_packed, mul_nsamples_packed, add, add_stride_row, add_stride_channel, add_stride_sample, add_ncols_packed, add_nrows_packed, add_nchannels_packed, add_nsamples_packed); } } } static void rms_norm_back_f32_cuda(const float * grad, const float * xf, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) { if (ncols < 1024) { const dim3 block_dims(WARP_SIZE, 1, 1); rms_norm_back_f32<<>>(grad, xf, dst, ncols, eps); } else { const dim3 block_dims(1024, 1, 1); rms_norm_back_f32<1024><<>>(grad, xf, dst, ncols, eps); } } static void l2_norm_f32_cuda( const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, cudaStream_t stream) { const dim3 blocks_num(nrows, nchannels, nsamples); if (ncols < 1024) { const dim3 block_dims(WARP_SIZE, 1, 1); l2_norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } else { const dim3 block_dims(1024, 1, 1); l2_norm_f32<1024><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } } void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_UNARY_OP_LOCALS; float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); const size_t ts0 = ggml_type_size(src0->type); GGML_ASSERT(nb00 == ts0); const int64_t s01 = nb01 / ts0; const int64_t s02 = nb02 / ts0; const int64_t s03 = nb03 / ts0; norm_f32_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, eps, stream); } void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); int num_groups = dst->op_params[0]; float eps; memcpy(&eps, dst->op_params + 1, sizeof(float)); GGML_ASSERT(eps >= 0.0f); int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups); group_norm_f32_cuda(src0_d, dst_d, num_groups * src0->ne[3], eps, group_size, ggml_nelements(src0), stream); } void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_UNARY_OP_LOCALS; float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); const size_t ts0 = ggml_type_size(src0->type); GGML_ASSERT(nb00 == ts0); const int64_t s01 = nb01 / ts0; const int64_t s02 = nb02 / ts0; const int64_t s03 = nb03 / ts0; rms_norm_f32_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, eps, stream); } void ggml_cuda_op_rms_norm_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor) { const ggml_tensor * rms_norm_src = (ggml_tensor *) dst->src[0]; float eps = 0.0f; memcpy(&eps, dst->op_params, sizeof(float)); const float * src0_d = (const float *) rms_norm_src->data; const float * mul_d = nullptr; const ggml_tensor * mul_src = nullptr; if (mul_tensor->src[0] == dst) { mul_d = (float *) mul_tensor->src[1]->data; mul_src = mul_tensor->src[1]; } else if(mul_tensor->src[1] == dst) { mul_d = (float *) mul_tensor->src[0]->data; mul_src = mul_tensor->src[0]; } else { GGML_ASSERT(false); } float * dst_d = (float *) mul_tensor->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(rms_norm_src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(mul_tensor->type == GGML_TYPE_F32); GGML_ASSERT(eps >= 0.0f); const int64_t ne00 = rms_norm_src->ne[0]; const int64_t ne01 = rms_norm_src->ne[1]; const int64_t ne02 = rms_norm_src->ne[2]; const int64_t ne03 = rms_norm_src->ne[3]; const size_t ts0 = ggml_type_size(rms_norm_src->type); GGML_ASSERT(rms_norm_src->nb[0] == ts0); const int64_t s01 = rms_norm_src->nb[1] / ts0; const int64_t s02 = rms_norm_src->nb[2] / ts0; const int64_t s03 = rms_norm_src->nb[3] / ts0; const size_t ts_mul = ggml_type_size(mul_src->type); GGML_ASSERT(mul_src->nb[0] == ts_mul); const int64_t mul_s01 = mul_src->nb[1] / ts_mul; const int64_t mul_s02 = mul_src->nb[2] / ts_mul; const int64_t mul_s03 = mul_src->nb[3] / ts_mul; const int mul_ncols = mul_src->ne[0]; const int mul_nrows = mul_src->ne[1]; const int mul_nchannels = mul_src->ne[2]; const int mul_nsamples = mul_src->ne[3]; rms_norm_mul_f32_cuda(src0_d, mul_d, nullptr, dst_d, ne00, ne01, ne02, ne03, /*s00*/ s01, s02, s03, /*mul_s00*/ mul_s01, mul_s02, mul_s03, mul_ncols, mul_nrows, mul_nchannels, mul_nsamples, /*add_s00*/ 0, 0, 0, 0, 0, 0, 0, eps, stream); } void ggml_cuda_op_rms_norm_fused_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor, ggml_tensor * add_tensor) { const ggml_tensor * rms_norm_src = (ggml_tensor *) dst->src[0]; float eps = 0.0f; memcpy(&eps, dst->op_params, sizeof(float)); const float * src0_d = (const float *) rms_norm_src->data; const float * mul_d = nullptr; const ggml_tensor * mul_src = nullptr; if (mul_tensor->src[0] == dst) { mul_d = (float *) mul_tensor->src[1]->data; mul_src = mul_tensor->src[1]; } else if (mul_tensor->src[1] == dst) { mul_d = (float *) mul_tensor->src[0]->data; mul_src = mul_tensor->src[0]; } else { GGML_ASSERT(false); } const float * add_d = nullptr; const ggml_tensor * add_src = nullptr; if (add_tensor->src[0] == mul_tensor) { add_d = (float *) add_tensor->src[1]->data; add_src = add_tensor->src[1]; } else if (add_tensor->src[1] == mul_tensor) { add_d = (float *) add_tensor->src[0]->data; add_src = add_tensor->src[0]; } else { GGML_ASSERT(false); } float * dst_d = (float *) add_tensor->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(rms_norm_src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(mul_tensor->type == GGML_TYPE_F32); GGML_ASSERT(add_tensor->type == GGML_TYPE_F32); GGML_ASSERT(eps >= 0.0f); const int64_t ne00 = rms_norm_src->ne[0]; const int64_t ne01 = rms_norm_src->ne[1]; const int64_t ne02 = rms_norm_src->ne[2]; const int64_t ne03 = rms_norm_src->ne[3]; const size_t ts0 = ggml_type_size(rms_norm_src->type); GGML_ASSERT(rms_norm_src->nb[0] == ts0); const int64_t s01 = rms_norm_src->nb[1] / ts0; const int64_t s02 = rms_norm_src->nb[2] / ts0; const int64_t s03 = rms_norm_src->nb[3] / ts0; const size_t ts_mul = ggml_type_size(mul_src->type); GGML_ASSERT(mul_src->nb[0] == ts_mul); const int64_t mul_s01 = mul_src->nb[1] / ts_mul; const int64_t mul_s02 = mul_src->nb[2] / ts_mul; const int64_t mul_s03 = mul_src->nb[3] / ts_mul; const int mul_ncols = mul_src->ne[0]; const int mul_nrows = mul_src->ne[1]; const int mul_nchannels = mul_src->ne[2]; const int mul_nsamples = mul_src->ne[3]; const size_t ts_add = ggml_type_size(add_src->type); GGML_ASSERT(add_src->nb[0] == ts_add); const int64_t add_s01 = add_src->nb[1] / ts_add; const int64_t add_s02 = add_src->nb[2] / ts_add; const int64_t add_s03 = add_src->nb[3] / ts_add; const int add_ncols = add_src->ne[0]; const int add_nrows = add_src->ne[1]; const int add_nchannels = add_src->ne[2]; const int add_nsamples = add_src->ne[3]; rms_norm_mul_f32_cuda(src0_d, mul_d,add_d,dst_d, ne00,ne01, ne02, ne03, /*s00*/ s01, s02, s03, /*mul_s00*/ mul_s01, mul_s02, mul_s03, mul_ncols, mul_nrows, mul_nchannels, mul_nsamples, /*add_s00*/ add_s01, add_s02, add_s03, add_ncols, add_nrows, add_nchannels, add_nsamples, eps, stream); } void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; // gradients const ggml_tensor * src0f = dst->src[1]; // src0 from forward pass const float * grad_d = (const float *) grad->data; const float * src0f_d = (const float *) src0f->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(grad)); GGML_ASSERT( grad->type == GGML_TYPE_F32); GGML_ASSERT(src0f->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int64_t ne00 = src0f->ne[0]; const int64_t nrows = ggml_nrows(src0f); float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); rms_norm_back_f32_cuda(grad_d, src0f_d, dst_d, ne00, nrows, eps, stream); } void ggml_cuda_op_l2_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_TENSOR_UNARY_OP_LOCALS; float eps; memcpy(&eps, dst->op_params, sizeof(float)); GGML_ASSERT(eps >= 0.0f); const size_t ts0 = ggml_type_size(src0->type); GGML_ASSERT(nb00 == ts0); const int64_t s01 = nb01 / ts0; const int64_t s02 = nb02 / ts0; const int64_t s03 = nb03 / ts0; l2_norm_f32_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, eps, stream); } ggml-org-ggml-3678254/src/ggml-cuda/norm.cuh000066400000000000000000000015061512524704700204710ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rms_norm_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor); void ggml_cuda_op_rms_norm_fused_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor, ggml_tensor * add_tensor); void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_l2_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/opt-step-adamw.cu000066400000000000000000000056621512524704700222170ustar00rootroot00000000000000#include "ggml-impl.h" #include "opt-step-adamw.cuh" #include static __global__ void opt_step_adamw_f32( float * __restrict__ x, const float * __restrict__ g, float * __restrict__ g_m, float * __restrict__ g_v, const float * __restrict__ pars, const int64_t k) { const int64_t i = (int64_t) blockIdx.x*blockDim.x + threadIdx.x; if (i >= k) { return; } const float alpha = pars[0]; const float beta1 = pars[1]; const float beta2 = pars[2]; const float eps = pars[3]; const float wd = pars[4]; const float beta1h = pars[5]; const float beta2h = pars[6]; const float gi = g[i]; const float gmi = g_m[i]*beta1 + gi*(1.0f - beta1); const float gvi = g_v[i]*beta2 + gi*gi*(1.0f - beta2); g_m[i] = gmi; g_v[i] = gvi; const float mh = gmi*beta1h; const float vh = sqrtf(gvi*beta2h) + eps; x[i] = x[i]*(1.0f - alpha*wd) - alpha*mh/vh; } static void opt_step_adamw_f32_cuda( float * x, const float * g, float * g_m, float * g_v, const float * pars, const int64_t k, cudaStream_t stream) { const dim3 block_dims(CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1); const dim3 block_nums((k + CUDA_OPT_STEP_ADAMW_BLOCK_SIZE - 1) / CUDA_OPT_STEP_ADAMW_BLOCK_SIZE, 1, 1); opt_step_adamw_f32<<>>(x, g, g_m, g_v, pars, k); } void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src0_grad = dst->src[1]; const ggml_tensor * src0_grad_m = dst->src[2]; const ggml_tensor * src0_grad_v = dst->src[3]; const ggml_tensor * adamw_params = dst->src[4]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src0_grad->type == GGML_TYPE_F32); GGML_ASSERT(src0_grad_m->type == GGML_TYPE_F32); GGML_ASSERT(src0_grad_v->type == GGML_TYPE_F32); GGML_ASSERT(adamw_params->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src0_grad)); GGML_ASSERT(ggml_is_contiguous(src0_grad_m)); GGML_ASSERT(ggml_is_contiguous(src0_grad_v)); GGML_ASSERT(ggml_is_contiguous(adamw_params)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); GGML_ASSERT(ggml_nelements(adamw_params) == 7); float * src0_d = (float *) src0->data; const float * src0_grad_d = (const float *) src0_grad->data; float * src0_grad_m_d = (float *) src0_grad_m->data; float * src0_grad_v_d = (float *) src0_grad_v->data; const float * adamw_params_d = (const float *) adamw_params->data; cudaStream_t stream = ctx.stream(); const int64_t ne = ggml_nelements(src0); opt_step_adamw_f32_cuda(src0_d, src0_grad_d, src0_grad_m_d, src0_grad_v_d, adamw_params_d, ne, stream); } ggml-org-ggml-3678254/src/ggml-cuda/opt-step-adamw.cuh000066400000000000000000000002261512524704700223560ustar00rootroot00000000000000#include "common.cuh" #define CUDA_OPT_STEP_ADAMW_BLOCK_SIZE 256 void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/opt-step-sgd.cu000066400000000000000000000033371512524704700217000ustar00rootroot00000000000000#include "ggml-impl.h" #include "opt-step-sgd.cuh" #include static __global__ void opt_step_sgd_f32( float * __restrict__ x, const float * __restrict__ g, const float * __restrict__ pars, const int64_t k) { const int64_t i = (int64_t) blockIdx.x*blockDim.x + threadIdx.x; if (i >= k) { return; } x[i] = x[i] * (1.0f - pars[0] * pars[1]) - pars[0] * g[i]; } static void opt_step_sgd_f32_cuda( float * x, const float * g, const float * __restrict__ pars, const int64_t k, cudaStream_t stream) { const dim3 block_dims(CUDA_OPT_STEP_SGD_BLOCK_SIZE, 1, 1); const dim3 block_nums((k + CUDA_OPT_STEP_SGD_BLOCK_SIZE - 1) / CUDA_OPT_STEP_SGD_BLOCK_SIZE, 1, 1); opt_step_sgd_f32<<>>(x, g, pars, k); } void ggml_cuda_opt_step_sgd(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src0_grad = dst->src[1]; const ggml_tensor * params = dst->src[2]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src0_grad->type == GGML_TYPE_F32); GGML_ASSERT(params->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src0_grad)); GGML_ASSERT(ggml_is_contiguous(params)); GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); GGML_ASSERT(ggml_nelements(params) == 2); float * src0_d = (float *) src0->data; const float * src0_grad_d = (const float *) src0_grad->data; const float * params_d = (const float *) params->data; cudaStream_t stream = ctx.stream(); const int64_t ne = ggml_nelements(src0); opt_step_sgd_f32_cuda(src0_d, src0_grad_d, params_d, ne, stream); } ggml-org-ggml-3678254/src/ggml-cuda/opt-step-sgd.cuh000066400000000000000000000002221512524704700220360ustar00rootroot00000000000000#include "common.cuh" #define CUDA_OPT_STEP_SGD_BLOCK_SIZE 256 void ggml_cuda_opt_step_sgd(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/out-prod.cu000066400000000000000000000045251512524704700211230ustar00rootroot00000000000000#include "out-prod.cuh" #include void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ne01 == ne11); GGML_ASSERT(ne0 == ne00); GGML_ASSERT(ne1 == ne10); GGML_ASSERT(ne2 % src0->ne[2] == 0); GGML_ASSERT(ne3 % src0->ne[3] == 0); GGML_ASSERT(ne2 == src1->ne[2]); GGML_ASSERT(ne3 == src1->ne[3]); const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); cublasHandle_t handle = ctx.cublas_handle(); const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK(cublasSetStream(handle, stream)); const int64_t lda = nb01 / sizeof(float); const int64_t ldc = nb1 / sizeof(float); const bool src1_T = ggml_is_transposed(src1); const cublasOperation_t src1_cublas_op = src1_T ? CUBLAS_OP_N : CUBLAS_OP_T; const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float); GGML_ASSERT( (src1_T ? nb11 : nb10) == sizeof(float)); // data strides in dimensions 2/3 const size_t s02 = nb02 / sizeof(float); const size_t s03 = nb03 / sizeof(float); const size_t s12 = nb12 / sizeof(float); const size_t s13 = nb13 / sizeof(float); const size_t s2 = nb2 / sizeof(float); const size_t s3 = nb3 / sizeof(float); // dps == dst per src0, used for group query attention const int64_t dps2 = ne2 / ne02; const int64_t dps3 = ne3 / ne03; // TODO batched matrix multiplication for (int64_t i3 = 0; i3 < ne3; ++i3) { for (int64_t i2 = 0; i2 < ne2; ++i2) { CUBLAS_CHECK( cublasSgemm(handle, CUBLAS_OP_N, src1_cublas_op, ne0, ne1, ne01, &alpha, src0_d + (i3/dps3)*s03 + (i2/dps2)*s02, lda, src1_d + i3 *s13 + i2 *s12, ldb, &beta, dst_d + i3 *s3 + i2 *s2, ldc)); } } } ggml-org-ggml-3678254/src/ggml-cuda/out-prod.cuh000066400000000000000000000001441512524704700212640ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/pad.cu000066400000000000000000000103111512524704700201040ustar00rootroot00000000000000#include "pad.cuh" #include __device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { // + size ensures negatives are handled properly return (coord + size) % size; } static __global__ void pad_f32(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, const int ne0, const int ne1, const int ne2, const int ne3, const bool circular) { // blockIdx.z: i3*ne2+i2 // blockIdx.y: i1 // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE // gridDim.y: ne1 int i0 = threadIdx.x + blockIdx.x * blockDim.x; int i1 = blockIdx.y; int i2 = blockIdx.z % ne2; int i3 = blockIdx.z / ne2; if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; if (!circular) { if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { const int64_t i00 = i0 - lp0; const int64_t i01 = i1 - lp1; const int64_t i02 = i2 - lp2; const int64_t i03 = i3 - lp3; const int64_t ne02 = ne2 - lp2 - rp2; const int64_t ne01 = ne1 - lp1 - rp1; const int64_t ne00 = ne0 - lp0 - rp0; const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; dst[dst_idx] = src[src_idx]; } else { dst[dst_idx] = 0.0f; } } // circular means on a torus, so x and y wrap around else { const int64_t ne00 = ne0 - lp0 - rp0; const int64_t ne01 = ne1 - lp1 - rp1; const int64_t ne02 = ne2 - lp2 - rp2; const int64_t ne03 = ne3 - lp3 - rp3; const int64_t i00 = wrap_around(i0 - lp0, ne00); const int64_t i01 = wrap_around(i1 - lp1, ne01); const int64_t i02 = wrap_around(i2 - lp2, ne02); const int64_t i03 = wrap_around(i3 - lp3, ne03); const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; dst[dst_idx] = src[src_idx]; } } static void pad_f32_cuda(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, const int ne0, const int ne1, const int ne2, const int ne3, const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2 * ne3); pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); } void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; const int32_t circular = ((const int32_t *) (dst->op_params))[8]; pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool) circular, stream); } ggml-org-ggml-3678254/src/ggml-cuda/pad.cuh000066400000000000000000000002031512524704700202530ustar00rootroot00000000000000#include "common.cuh" #define CUDA_PAD_BLOCK_SIZE 256 void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/pad_reflect_1d.cu000066400000000000000000000066551512524704700222140ustar00rootroot00000000000000#include "pad_reflect_1d.cuh" static __global__ __launch_bounds__(CUDA_PAD_REFLECT_1D_BLOCK_SIZE, 1) void pad_reflect_1d_kernel_f32( const void * __restrict__ src0, void * __restrict__ dst, const int64_t ne0, const int64_t ne00, const uint3 ne01, const int64_t ne02, const int64_t ne03, const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03, const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3, const int p0, const int p1) { const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; const uint2 div_mod_packed = fast_div_modulo(blockIdx.x, ne01); const int64_t tile1 = div_mod_packed.y; // i1 const int64_t tile0 = div_mod_packed.x; // nth i0 tile const int64_t i1 = tile1; const int64_t i0 = threadIdx.x + tile0 * blockDim.x; // ne01.z is original value of unpacked ne01 (see init_fastdiv_values in common.cuh) if (i0 >= ne0 || i1 >= ne01.z || i2 >= ne02 || i3 >= ne03) { return; } const char * src0_ptr = (const char *) src0 + i3 * nb03 + i2 * nb02 + i1 * nb01; char * dst_ptr = (char *) dst + i3 * nb3 + i2 * nb2 + i1 * nb1; const int64_t rel_i0 = i0 - p0; // relative i0 in src0 int64_t src_idx; if (rel_i0 < 0) { // Left padding - reflect src_idx = -rel_i0; } else if (rel_i0 < ne00) { // Middle - copy src_idx = rel_i0; } else { // Right padding - reflect src_idx = 2 * ne00 - 2 - rel_i0; } const float value = *(const float *) (src0_ptr + src_idx * nb00); *(float *) (dst_ptr + i0 * nb0) = value; GGML_UNUSED(p1); } void ggml_cuda_op_pad_reflect_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); const int32_t * opts = (const int32_t *) dst->op_params; const int p0 = opts[0]; const int p1 = opts[1]; const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const uint3 ne01_packed = init_fastdiv_values(ne01); const int64_t ne02 = src0->ne[2]; const int64_t ne03 = src0->ne[3]; const int64_t ne0 = dst->ne[0]; // sanity: padded length matches GGML_ASSERT(ne0 == ne00 + p0 + p1); constexpr int64_t bx = CUDA_PAD_REFLECT_1D_BLOCK_SIZE; // threads per block (x) const int64_t tiles0 = (ne0 + bx - 1) / bx; // number of tiles along i0 // grid.x covers i1 and all tiles of i0: [ne01 * tiles0] // grid.y covers i2: [ne02] // grid.z covers i3: [ne03] const dim3 grid_dims((unsigned) (ne01 * tiles0), (unsigned) ne02, (unsigned) ne03); const dim3 block_dims((unsigned) bx, 1, 1); pad_reflect_1d_kernel_f32<<>>( src0->data, dst->data, ne0, ne00, ne01_packed, ne02, ne03, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], p0, p1); } ggml-org-ggml-3678254/src/ggml-cuda/pad_reflect_1d.cuh000066400000000000000000000002311512524704700223440ustar00rootroot00000000000000#include "common.cuh" #define CUDA_PAD_REFLECT_1D_BLOCK_SIZE 256 void ggml_cuda_op_pad_reflect_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/pool2d.cu000066400000000000000000000063521512524704700205510ustar00rootroot00000000000000#include "pool2d.cuh" template static __global__ void pool2d_nchw_kernel( const int ih, const int iw, const int oh, const int ow, const int kh, const int kw, const int sh, const int sw, const int ph, const int pw, const int parallel_elements, const Ti* src, To* dst, const enum ggml_op_pool op) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= parallel_elements) { return; } const int I_HW = ih * iw; const int O_HW = oh * ow; const int nc = idx / O_HW; const int cur_oh = idx % O_HW / ow; const int cur_ow = idx % O_HW % ow; const Ti* i_ptr = src + nc * I_HW; To* o_ptr = dst + nc * O_HW; const int start_h = cur_oh * sh - ph; const int bh = max(0, start_h); const int eh = min(ih, start_h + kh); const int start_w = cur_ow * sw - pw; const int bw = max(0, start_w); const int ew = min(iw, start_w + kw); const To scale = 1. / (kh * kw); To res = 0; switch (op) { case GGML_OP_POOL_AVG: res = 0; break; case GGML_OP_POOL_MAX: res = -FLT_MAX; break; default: assert(false); } for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { #if __CUDA_ARCH__ >= 350 Ti cur = __ldg(i_ptr + i * iw + j); #else Ti cur = i_ptr[i * iw + j]; #endif switch (op) { case GGML_OP_POOL_AVG: res += cur * scale; break; case GGML_OP_POOL_MAX: res = max(res, (To)cur); break; default: assert(false); } } } o_ptr[cur_oh * ow + cur_ow] = res; } static void pool2d_nchw_kernel_f32_f32_cuda( const int ih, const int iw, const int oh, const int ow, const int kh, const int kw, const int sh, const int sw, const int ph, const int pw, const int parallel_elements, const float * src, float * dst, const enum ggml_op_pool op, cudaStream_t stream) { const int num_blocks = (parallel_elements + CUDA_POOL2D_BLOCK_SIZE - 1) / CUDA_POOL2D_BLOCK_SIZE; dim3 block_nums(num_blocks); pool2d_nchw_kernel<<>>(ih, iw, oh, ow, kh, kw, sh, sw, ph, pw, parallel_elements, src, dst, op); } void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int32_t * opts = (const int32_t *)dst->op_params; enum ggml_op_pool op = static_cast(opts[0]); const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; const int64_t IH = src0->ne[1]; const int64_t IW = src0->ne[0]; const int64_t N = dst->ne[3]; const int64_t OC = dst->ne[2]; const int64_t OH = dst->ne[1]; const int64_t OW = dst->ne[0]; const int parallel_elements = N * OC * OH * OW; pool2d_nchw_kernel_f32_f32_cuda(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0, parallel_elements, src0_d, dst_d, op, stream); } ggml-org-ggml-3678254/src/ggml-cuda/pool2d.cuh000066400000000000000000000002111512524704700207050ustar00rootroot00000000000000#include "common.cuh" #define CUDA_POOL2D_BLOCK_SIZE 256 void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/quantize.cu000066400000000000000000000305521512524704700212110ustar00rootroot00000000000000#include "quantize.cuh" #include __launch_bounds__(CUDA_QUANTIZE_BLOCK_SIZE, 1) static __global__ void quantize_q8_1( const float * __restrict__ x, void * __restrict__ vy, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const uint32_t ne1, const uint3 ne2) { const int64_t i0 = (int64_t)blockDim.x*blockIdx.x + threadIdx.x; if (i0 >= ne0) { return; } const int64_t i3 = fastdiv(blockIdx.z, ne2); const int64_t i2 = blockIdx.z - i3*ne2.z; const int64_t i1 = blockIdx.y; const int64_t & i00 = i0; const int64_t & i01 = i1; const int64_t & i02 = i2; const int64_t & i03 = i3; const int64_t i_cont = ((i3*ne2.z + i2) * ne1 + i1) * ne0 + i0; block_q8_1 * y = (block_q8_1 *) vy; const int64_t ib = i_cont / QK8_1; // block index const int64_t iqs = i_cont % QK8_1; // quant index const float xi = i0 < ne00 ? x[i03*s03 + i02*s02 + i01*s01 + i00] : 0.0f; float amax = fabsf(xi); float sum = xi; amax = warp_reduce_max(amax); sum = warp_reduce_sum(sum); const float d = amax / 127.0f; const int8_t q = amax == 0.0f ? 0 : roundf(xi / d); y[ib].qs[iqs] = q; if (iqs > 0) { return; } y[ib].ds = make_half2(d, sum); } __device__ __forceinline__ uint8_t compute_e8m0_scale(float amax) { if (!(amax > 0.0f)) { return 0; } // FP4 E2M1: max exponent (unbiased) is 2. constexpr int FP4_E2M1_EMAX = 2; const float e = log2f(amax); // "even" -> round-to-nearest integer, ties-to-even const int e_int = __float2int_rn(e); const int shared_exp = e_int - FP4_E2M1_EMAX; int biased = shared_exp + 127; biased = max(biased, 0); biased = min(biased, 254); return static_cast(biased); } // quantize values in the format mxfp4 is stored which is interleaved nibbles // i.e. a block a0-a31 is represented as a0a16,a1a17 ...a15a31 static __global__ void quantize_mmq_mxfp4(const float * __restrict__ x, const int32_t * __restrict__ ids, void * __restrict__ vy, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const int ne1, const int ne2) { constexpr int vals_per_scale = 32; constexpr int vals_per_warp = 2 * vals_per_scale; // Each warp processes 2 blocks of 32 = 64 values const int warp_id = threadIdx.y; const int lane_id_32 = threadIdx.x; const int nwarps = blockDim.y; const int64_t warp_start_offset = (blockIdx.y * nwarps + warp_id) * vals_per_warp; if (warp_start_offset >= ne0) { return; } const int64_t i1 = blockIdx.x; const int64_t i2 = blockIdx.z % ne2; const int64_t i3 = blockIdx.z / ne2; const int64_t i01 = ids ? ids[i1] : i1; const int64_t i02 = i2; const int64_t i03 = i3; block_fp4_mmq * y = (block_fp4_mmq *) vy; const int64_t block_fp4_mmq_size = 8 * QK_MXFP4; // 256 values const int64_t ib0 = blockIdx.z * ((int64_t) ne1 * (ne0 / block_fp4_mmq_size)); const int64_t ib = ib0 + (warp_start_offset / block_fp4_mmq_size) * ne1 + blockIdx.x; const int64_t quad_idx_in_block = (warp_start_offset % block_fp4_mmq_size) / vals_per_warp; const int group_id = lane_id_32 / 4; const int lane_in_group = lane_id_32 % 4; const int base = group_id * 2; char2 * yqs2 = (char2 *) y[ib].qs; const int64_t base_pos = i03 * s03 + i02 * s02 + i01 * s01; uint8_t scales[2]; #pragma unroll for (int b = 0; b < 2; ++b) { const int64_t i0 = warp_start_offset + b * vals_per_scale + lane_id_32; const float xi = (i0 < ne00) ? x[base_pos + i0] : 0.0f; float amax = fabsf(xi); #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, mask, WARP_SIZE)); } const uint8_t e = compute_e8m0_scale(amax); scales[b] = e; const float inv_s = (amax == 0.0f) ? 0.0f : __frcp_rn(ggml_cuda_e8m0_to_fp32(e)); #if CUDART_VERSION >= 12080 const float scaled_val = xi * inv_s; const float val0 = __shfl_sync(0xFFFFFFFF, scaled_val, base, WARP_SIZE); const float val1 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 16, WARP_SIZE); const float val2 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 1, WARP_SIZE); const float val3 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 17, WARP_SIZE); if (lane_in_group == 0) { __nv_fp4x4_e2m1 fp4_packed(make_float4(val0, val1, val2, val3)); yqs2[quad_idx_in_block * 16 + b * 8 + group_id] = *(char2 *) &fp4_packed; } #else // Fallback: manual FP4 conversion using LUT const uint8_t q_val = ggml_cuda_float_to_fp4_e2m1(xi, inv_s); const uint8_t q_lo_0 = __shfl_sync(0xFFFFFFFF, q_val, base, WARP_SIZE); const uint8_t q_lo_1 = __shfl_sync(0xFFFFFFFF, q_val, base + 1, WARP_SIZE); const uint8_t q_hi_0 = __shfl_sync(0xFFFFFFFF, q_val, base + 16, WARP_SIZE); const uint8_t q_hi_1 = __shfl_sync(0xFFFFFFFF, q_val, base + 17, WARP_SIZE); if (lane_in_group == 0) { char2 q; q.x = (q_hi_0 << 4) | q_lo_0; q.y = (q_hi_1 << 4) | q_lo_1; yqs2[quad_idx_in_block * 16 + b * 8 + group_id] = q; } #endif // CUDART_VERSION >= 12080 } if (lane_id_32 == 0) { // Store 2 scales packed into 1 uint32 y[ib].d4[quad_idx_in_block] = (scales[1] << 8) | scales[0]; } } template static __global__ void quantize_mmq_q8_1( const float * __restrict__ x, const int32_t * __restrict__ ids, void * __restrict__ vy, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const int ne1, const int ne2) { constexpr int vals_per_scale = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 64 : 32; constexpr int vals_per_sum = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 16 : 32; const int64_t i0 = ((int64_t)blockDim.x*blockIdx.y + threadIdx.x)*4; if (i0 >= ne0) { return; } const int64_t i1 = blockIdx.x; const int64_t i2 = blockIdx.z % ne2; const int64_t i3 = blockIdx.z / ne2; const int64_t i00 = i0; const int64_t i01 = ids ? ids[i1] : i1; const int64_t i02 = i2; const int64_t i03 = i3; const float4 * x4 = (const float4 *) x; block_q8_1_mmq * y = (block_q8_1_mmq *) vy; const int64_t ib0 = blockIdx.z*((int64_t)gridDim.x*gridDim.y*blockDim.x/QK8_1); // first block of channel const int64_t ib = ib0 + (i0 / (4*QK8_1))*ne1 + blockIdx.x; // block index in channel const int64_t iqs = i0 % (4*QK8_1); // quant index in block // Load 4 floats per thread and calculate max. abs. value between them: const float4 xi = i0 < ne00 ? x4[(i03*s03 + i02*s02 + i01*s01 + i00)/4] : make_float4(0.0f, 0.0f, 0.0f, 0.0f); float amax = fabsf(xi.x); amax = fmaxf(amax, fabsf(xi.y)); amax = fmaxf(amax, fabsf(xi.z)); amax = fmaxf(amax, fabsf(xi.w)); // Exchange max. abs. value between vals_per_scale/4 threads. #pragma unroll for (int offset = vals_per_scale/8; offset > 0; offset >>= 1) { amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, offset, WARP_SIZE)); } float sum; if (ds_layout != MMQ_Q8_1_DS_LAYOUT_D4) { sum = xi.x + xi.y + xi.z + xi.w; // Calculate sums across vals_per_sum/4 threads. #pragma unroll for (int offset = vals_per_sum/8; offset > 0; offset >>= 1) { sum += __shfl_xor_sync(0xFFFFFFFF, sum, offset, WARP_SIZE); } } const float d_inv = 127.0f / amax; char4 q; q.x = roundf(xi.x*d_inv); q.y = roundf(xi.y*d_inv); q.z = roundf(xi.z*d_inv); q.w = roundf(xi.w*d_inv); // Write back 4 int8 values as a single 32 bit value for better memroy bandwidth: char4 * yqs4 = (char4 *) y[ib].qs; yqs4[iqs/4] = q; if (ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6) { if (iqs % 16 != 0 || iqs >= 96) { return; } y[ib].d2s6[2 + iqs/16] = sum; if (iqs % 64 != 0) { return; } const float d = 1.0f / d_inv; y[ib].d2s6[iqs/64] = d; return; } if (iqs % 32 != 0) { return; } const float d = 1.0f / d_inv; if (ds_layout == MMQ_Q8_1_DS_LAYOUT_DS4) { y[ib].ds4[iqs/32] = make_half2(d, sum); } else { y[ib].d4[iqs/32] = d; } } void quantize_row_q8_1_cuda( const float * x, const int32_t * ids, void * vy, const ggml_type type_src0, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) { GGML_ASSERT(!ids); GGML_ASSERT(ne0 % QK8_1 == 0); const uint3 ne2_fastdiv = init_fastdiv_values(ne2); const int64_t block_num_x = (ne0 + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE; const dim3 num_blocks(block_num_x, ne1, ne2*ne3); const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE, 1, 1); quantize_q8_1<<>>(x, vy, ne00, s01, s02, s03, ne0, ne1, ne2_fastdiv); GGML_UNUSED(type_src0); } void quantize_mmq_q8_1_cuda( const float * x, const int32_t * ids, void * vy, const ggml_type type_src0, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) { GGML_ASSERT(ne00 % 4 == 0); GGML_ASSERT(ne0 % (4*QK8_1) == 0); // ne1 tends to assume the highest values, therefore use it as the "x" dimension of the CUDA grid: const int64_t block_num_y = (ne0 + 4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ - 1) / (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ); const dim3 num_blocks(ne1, block_num_y, ne2*ne3); const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE_MMQ, 1, 1); switch (mmq_get_q8_1_ds_layout(type_src0)) { case MMQ_Q8_1_DS_LAYOUT_D4: quantize_mmq_q8_1 <<>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2); break; case MMQ_Q8_1_DS_LAYOUT_DS4: quantize_mmq_q8_1 <<>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2); break; case MMQ_Q8_1_DS_LAYOUT_D2S6: quantize_mmq_q8_1 <<>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2); break; default: GGML_ABORT("fatal error"); break; } } void quantize_mmq_mxfp4_cuda(const float * x, const int32_t * ids, void * vy, [[maybe_unused]] const ggml_type type_src0, const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) { GGML_ASSERT(ne0 % (2 * QK_MXFP4) == 0); constexpr int nwarps = 8; constexpr int vals_per_warp = 2 * QK_MXFP4; constexpr int vals_per_block = nwarps * vals_per_warp; const int64_t block_num_y = (ne0 + vals_per_block - 1) / vals_per_block; const dim3 num_blocks(ne1, block_num_y, ne2 * ne3); const dim3 block_size(WARP_SIZE, nwarps, 1); quantize_mmq_mxfp4<<>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2); } ggml-org-ggml-3678254/src/ggml-cuda/quantize.cuh000066400000000000000000000033771512524704700213660ustar00rootroot00000000000000#pragma once #include "common.cuh" #include "mmq.cuh" #include #define CUDA_QUANTIZE_BLOCK_SIZE 256 #define CUDA_QUANTIZE_BLOCK_SIZE_MMQ 128 static_assert(MATRIX_ROW_PADDING % CUDA_QUANTIZE_BLOCK_SIZE == 0, "Risk of out-of-bounds access."); static_assert(MATRIX_ROW_PADDING % (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ) == 0, "Risk of out-of-bounds access."); typedef void (*quantize_cuda_t)( const float * x, const int32_t * ids, void * vy, ggml_type type_src0, int64_t ne00, int64_t s01, int64_t s02, int64_t s03, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, cudaStream_t stream); void quantize_row_q8_1_cuda( const float * x, const int32_t * ids, void * vy, ggml_type type_src0, int64_t ne00, int64_t s01, int64_t s02, int64_t s03, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, cudaStream_t stream); void quantize_mmq_q8_1_cuda( const float * x, const int32_t * ids, void * vy, ggml_type type_src0, int64_t ne00, int64_t s01, int64_t s02, int64_t s03, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, cudaStream_t stream); void quantize_mmq_mxfp4_cuda(const float * x, const int32_t * ids, void * vy, ggml_type type_src0, int64_t ne00, int64_t s01, int64_t s02, int64_t s03, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, cudaStream_t stream); ggml-org-ggml-3678254/src/ggml-cuda/reduce_rows.cuh000066400000000000000000000030321512524704700220330ustar00rootroot00000000000000#include "common.cuh" // Row reduction kernel template - compute sum (norm=false) or mean (norm=true) template static __global__ void reduce_rows_f32(const float * __restrict__ x, float * __restrict__ dst, const int ncols) { const int row = blockIdx.x; const int col = threadIdx.x; float sum = 0.0f; const int num_unroll = 8; float temp[num_unroll]; float sum_temp[num_unroll] = { 0.0f }; for (int i = col; i < ncols;) { for (int j = 0; j < num_unroll; ++j) { if (i < ncols) { temp[j] = x[row * ncols + i]; } else { temp[j] = 0; } i += blockDim.x; } for (int j = 0; j < num_unroll; ++j) { sum_temp[j] += temp[j]; } } for (int j = 0; j < num_unroll; ++j) { sum += sum_temp[j]; } // sum up partial sums sum = warp_reduce_sum(sum); if (blockDim.x > WARP_SIZE) { assert((blockDim.x <= 1024) && (blockDim.x % WARP_SIZE) == 0); __shared__ float s_sum[32]; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = sum; } __syncthreads(); sum = 0.0f; if (lane_id < (static_cast(blockDim.x) / WARP_SIZE)) { sum = s_sum[lane_id]; } sum = warp_reduce_sum(sum); } if (col != 0) { return; } dst[row] = norm ? sum / ncols : sum; } ggml-org-ggml-3678254/src/ggml-cuda/roll.cu000066400000000000000000000045621512524704700203230ustar00rootroot00000000000000#include "ggml-cuda/common.cuh" #include "roll.cuh" static __forceinline__ __device__ int64_t wrap_index(const int64_t idx, const int64_t ne) { if (idx < 0) { return idx + ne; } if (idx >= ne) { return idx - ne; } return idx; } static __global__ void roll_f32_cuda(const float * __restrict__ src, float * __restrict__ dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int s0, const int s1, const int s2, const int s3) { const int64_t idx = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; const int64_t n_elements = ne00 * ne01 * ne02 * ne03; if (idx >= n_elements) { return; } const int64_t i0 = idx % ne00; const int64_t i1 = (idx / ne00) % ne01; const int64_t i2 = (idx / (ne00 * ne01)) % ne02; const int64_t i3 = (idx / (ne00 * ne01 * ne02)) % ne03; const int64_t d0 = wrap_index(i0 - s0, ne00); const int64_t d1 = wrap_index(i1 - s1, ne01); const int64_t d2 = wrap_index(i2 - s2, ne02); const int64_t d3 = wrap_index(i3 - s3, ne03); dst[i3 * (ne00 * ne01 * ne02) + i2 * (ne01 * ne00) + i1 * ne00 + i0] = src[d3 * (ne00 * ne01 * ne02) + d2 * (ne01 * ne00) + d1 * ne00 + d0]; } void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { int s0 = dst->op_params[0]; int s1 = dst->op_params[1]; int s2 = dst->op_params[2]; int s3 = dst->op_params[3]; const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *) dst->src[0]->data; float * dst_d = (float *) dst->data; GGML_TENSOR_UNARY_OP_LOCALS; GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(ggml_are_same_shape(dst->src[0], dst)); cudaStream_t stream = ctx.stream(); int64_t sz = (ne00 * ne01 * ne02 * ne03); int64_t num_blocks = (sz + CUDA_ROLL_BLOCK_SIZE - 1) / CUDA_ROLL_BLOCK_SIZE; roll_f32_cuda<<>>( src0_d, dst_d, ne00, ne01, ne02, ne03, s0, s1, s2, s3); } ggml-org-ggml-3678254/src/ggml-cuda/roll.cuh000066400000000000000000000002051512524704700204610ustar00rootroot00000000000000#include "common.cuh" #define CUDA_ROLL_BLOCK_SIZE 256 void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/rope.cu000066400000000000000000000612761512524704700203250ustar00rootroot00000000000000#include "convert.cuh" #include "ggml-cuda/common.cuh" #include "ggml.h" #include "rope.cuh" struct rope_corr_dims { float v[2]; }; struct mrope_sections { int v[4]; }; static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. template static __device__ void rope_yarn( const float theta_extrap, const float freq_scale, const rope_corr_dims corr_dims, const int64_t i0, const float ext_factor, float mscale, float & cos_theta, float & sin_theta) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); } cos_theta = cosf(theta) * mscale; sin_theta = sinf(theta) * mscale; if (!forward) { sin_theta *= -1.0f; } } template static __global__ void rope_norm(const T * x, D * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors, const int64_t * row_indices, const int set_rows_stride) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { return; } const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; int idst = row_dst * ne0 + i0; const int ix = channel_x*s2 + row_x*s1 + i0; // Fusion optimization: ROPE + VIEW + SET_ROWS. // The rope output is viewed as a 1D tensor and offset based on a row index in row_indices. if (set_rows_stride != 0) { idst = row_x * ne0 + i0; idst += row_indices[channel_x] * set_rows_stride; } const auto & store_coaelsced = [&](float x0, float x1) { if constexpr (std::is_same_v) { float2 v = make_float2(x0, x1); ggml_cuda_memcpy_1<8>(dst + idst, &v); } else if constexpr (std::is_same_v) { half2 v = make_half2(x0, x1); ggml_cuda_memcpy_1<4>(dst + idst, &v); } }; if (i0 >= n_dims) { store_coaelsced(x[ix + 0], x[ix + 1]); return; } const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; float cos_theta; float sin_theta; rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, cos_theta, sin_theta); const float x0 = x[ix + 0]; const float x1 = x[ix + 1]; store_coaelsced(x0 * cos_theta - x1 * sin_theta, x0 * sin_theta + x1 * cos_theta); } template static __global__ void rope_neox(const T * x, D * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors, const int64_t * row_indices, const int set_rows_stride) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { return; } const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; int idst = row_dst * ne0 + i0 / 2; const int ix = channel_x*s2 + row_x*s1 + i0/2; // Fusion optimization: ROPE + VIEW + SET_ROWS. // The rope output is viewed as a 1D tensor and offset based on a row index in row_indices. if (set_rows_stride != 0) { idst = row_x * ne0 + i0 / 2; idst += row_indices[channel_x] * set_rows_stride; } if (i0 >= n_dims) { dst[idst + i0 / 2 + 0] = ggml_cuda_cast(x[ix + i0 / 2 + 0]); dst[idst + i0 / 2 + 1] = ggml_cuda_cast(x[ix + i0 / 2 + 1]); return; } const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; float cos_theta; float sin_theta; rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, cos_theta, sin_theta); const float x0 = x[ix + 0]; const float x1 = x[ix + n_dims/2]; dst[idst + 0] = ggml_cuda_cast(x0 * cos_theta - x1 * sin_theta); dst[idst + n_dims / 2] = ggml_cuda_cast(x0 * sin_theta + x1 * cos_theta); } template static __global__ void rope_multi( const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors, const mrope_sections sections, const bool is_imrope) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { return; } const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; const int idst = row_dst*ne0 + i0/2; const int ix = channel_x*s2 + row_x*s1 + i0/2; if (i0 >= n_dims) { dst[idst + i0/2 + 0] = x[ix + i0/2 + 0]; dst[idst + i0/2 + 1] = x[ix + i0/2 + 1]; return; } const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3]; const int sec_w = sections.v[1] + sections.v[0]; const int sector = (i0 / 2) % sect_dims; float theta_base = 0.0; if (is_imrope) { if (sector % 3 == 1 && sector < 3 * sections.v[1]) { // h theta_base = pos[channel_x + ne2 * 1]*powf(theta_scale, i0/2.0f); } else if (sector % 3 == 2 && sector < 3 * sections.v[2]) { // w theta_base = pos[channel_x + ne2 * 2]*powf(theta_scale, i0/2.0f); } else if (sector % 3 == 0 && sector < 3 * sections.v[0]) { // t theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); } else { theta_base = pos[channel_x + ne2 * 3]*powf(theta_scale, i0/2.0f); } } else { if (sector < sections.v[0]) { theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); } else if (sector >= sections.v[0] && sector < sec_w) { theta_base = pos[channel_x + ne2 * 1]*powf(theta_scale, i0/2.0f); } else if (sector >= sec_w && sector < sec_w + sections.v[2]) { theta_base = pos[channel_x + ne2 * 2]*powf(theta_scale, i0/2.0f); } else if (sector >= sec_w + sections.v[2]) { theta_base = pos[channel_x + ne2 * 3]*powf(theta_scale, i0/2.0f); } } const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; float cos_theta; float sin_theta; rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, cos_theta, sin_theta); const float x0 = x[ix + 0]; const float x1 = x[ix + n_dims/2]; dst[idst + 0] = x0*cos_theta - x1*sin_theta; dst[idst + n_dims/2] = x0*sin_theta + x1*cos_theta; } template static __global__ void rope_vision( const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors, const mrope_sections sections) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { return; } const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; const int idst = row_dst*ne0 + i0/2; const int ix = channel_x*s2 + row_x*s1 + i0/2; const int sect_dims = sections.v[0] + sections.v[1]; const int sec_w = sections.v[1] + sections.v[0]; const int sector = (i0 / 2) % sect_dims; float theta_base = 0.0; if (sector < sections.v[0]) { const int p = sector; theta_base = pos[channel_x]*powf(theta_scale, p); } else if (sector >= sections.v[0] && sector < sec_w) { const int p = sector - sections.v[0]; theta_base = pos[channel_x + ne2]*powf(theta_scale, p); } const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; float cos_theta; float sin_theta; rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, cos_theta, sin_theta); const float x0 = x[ix + 0]; const float x1 = x[ix + n_dims]; dst[idst + 0] = x0*cos_theta - x1*sin_theta; dst[idst + n_dims] = x0*sin_theta + x1*cos_theta; } template static void rope_norm_cuda(const T * x, D * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr, const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, const int64_t * row_indices, const int set_rows_stride, cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nr, n_blocks_x, 1); const float theta_scale = powf(freq_base, -2.0f/n_dims); if (freq_factors == nullptr) { rope_norm<<>>( x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, row_indices, set_rows_stride); } else { rope_norm<<>>( x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, row_indices, set_rows_stride); } } template static void rope_neox_cuda(const T * x, D * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr, const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, const int64_t * row_indices, const int set_rows_stride, cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nr, n_blocks_x, 1); const float theta_scale = powf(freq_base, -2.0f/n_dims); if (freq_factors == nullptr) { rope_neox<<>>( x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, row_indices, set_rows_stride); } else { rope_neox<<>>( x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, row_indices, set_rows_stride); } } template static void rope_multi_cuda( const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr, const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, const mrope_sections sections, const bool is_imrope, cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nr, n_blocks_x, 1); const float theta_scale = powf(freq_base, -2.0f/n_dims); if (freq_factors == nullptr) { rope_multi<<>>( x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, is_imrope); } else { rope_multi<<>>( x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, is_imrope); } } template static void rope_vision_cuda( const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr, const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, const mrope_sections sections, cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nr, n_blocks_x, 1); // break down (head_dim, heads, seq) into (CUDA_ROPE_BLOCK_SIZE, x, heads * seq) // where x ~= ceil(head_dim / CUDA_ROPE_BLOCK_SIZE); const float theta_scale = powf(freq_base, -2.0f/n_dims); if (freq_factors == nullptr) { rope_vision<<>>( x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections); } else { rope_vision<<>>( x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections); } } template void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst, const ggml_tensor * set_rows = nullptr) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; void * dst_d = dst->data; const int64_t * row_indices = nullptr; ggml_type dst_type = dst->type; int set_rows_stride = 0; if (set_rows != nullptr) { GGML_ASSERT(forward); dst_d = set_rows->data; row_indices = (const int64_t *) set_rows->src[1]->data; dst_type = set_rows->type; set_rows_stride = set_rows->nb[1] / ggml_type_size(set_rows->type); } cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); // When not fused, src0 and dst types must match // When fused (ROPE+VIEW+SET_ROWS), src0 may be F32 and dst may be F16 GGML_ASSERT(src0->type == dst->type || (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16)); const int64_t ne00 = src0->ne[0]; // head dims const int64_t ne01 = src0->ne[1]; // num heads const int64_t ne02 = src0->ne[2]; // num heads const int64_t nr = ggml_nrows(src0); const size_t s01 = src0->nb[1] / ggml_type_size(src0->type); const size_t s02 = src0->nb[2] / ggml_type_size(src0->type); //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; mrope_sections sections; // RoPE alteration for extended context float freq_base; float freq_scale; float ext_factor; float attn_factor; float beta_fast; float beta_slow; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(§ions.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; if (is_mrope) { GGML_ASSERT(sections.v[0] > 0 || sections.v[1] > 0 || sections.v[2] > 0); } if (is_vision) { GGML_ASSERT(n_dims == ne00/2); } const int32_t * pos = (const int32_t *) src1_d; const float * freq_factors = nullptr; if (src2 != nullptr) { freq_factors = (const float *) src2->data; } rope_corr_dims corr_dims; ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims.v); // compute if (is_neox) { if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F32) { rope_neox_cuda((const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F16) { rope_neox_cuda((const float *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else if (src0->type == GGML_TYPE_F16 && dst_type == GGML_TYPE_F16) { rope_neox_cuda((const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else { GGML_ABORT("fatal error"); } } else if (is_mrope && !is_vision) { if (src0->type == GGML_TYPE_F32) { rope_multi_cuda( (const float *) src0_d, (float *) dst_d, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, is_imrope, stream); } else if (src0->type == GGML_TYPE_F16) { rope_multi_cuda( (const half *) src0_d, (half *) dst_d, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, is_imrope, stream); } else { GGML_ABORT("fatal error"); } } else if (is_vision) { if (src0->type == GGML_TYPE_F32) { rope_vision_cuda( (const float *) src0_d, (float *) dst_d, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); } else if (src0->type == GGML_TYPE_F16) { rope_vision_cuda( (const half *) src0_d, (half *) dst_d, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); } else { GGML_ABORT("fatal error"); } } else { if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F32) { rope_norm_cuda((const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F16) { rope_norm_cuda((const float *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else if (src0->type == GGML_TYPE_F16 && dst_type == GGML_TYPE_F16) { rope_norm_cuda((const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, row_indices, set_rows_stride, stream); } else { GGML_ABORT("fatal error"); } } } void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_rope_impl(ctx, dst); } void ggml_cuda_op_rope_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_rope_impl(ctx, dst); } void ggml_cuda_op_rope_fused(ggml_backend_cuda_context & ctx, ggml_tensor * rope, ggml_tensor * set_rows) { ggml_cuda_op_rope_impl(ctx, rope, set_rows); } ggml-org-ggml-3678254/src/ggml-cuda/rope.cuh000066400000000000000000000005021512524704700204560ustar00rootroot00000000000000#include "common.cuh" #define CUDA_ROPE_BLOCK_SIZE 256 void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rope_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rope_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * set_rows); ggml-org-ggml-3678254/src/ggml-cuda/scale.cu000066400000000000000000000025521512524704700204370ustar00rootroot00000000000000#include "scale.cuh" #define MAX_GRIDDIM_X 0x7FFFFFFF static __global__ void scale_f32(const float * x, float * dst, const float scale, const float bias, const int64_t nelements) { int64_t tid = (int64_t)blockIdx.x * (int64_t)blockDim.x + (int64_t)threadIdx.x; int64_t stride = (int64_t)blockDim.x * (int64_t)gridDim.x; for (int64_t i = tid; i < nelements; i += stride) { dst[i] = scale * x[i] + bias; } } static void scale_f32_cuda(const float * x, float * dst, const float scale, const float bias, const int64_t nelements, cudaStream_t stream) { const int64_t num_blocks = (nelements + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; scale_f32<<>>(x, dst, scale, bias, nelements); } void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); float scale; float bias; memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&bias, (float *) dst->op_params + 1, sizeof(float)); scale_f32_cuda(src0_d, dst_d, scale, bias, ggml_nelements(src0), stream); } ggml-org-ggml-3678254/src/ggml-cuda/scale.cuh000066400000000000000000000002071512524704700206020ustar00rootroot00000000000000#include "common.cuh" #define CUDA_SCALE_BLOCK_SIZE 256 void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/set-rows.cu000066400000000000000000000315501512524704700211330ustar00rootroot00000000000000#include "set-rows.cuh" #include "cpy-utils.cuh" typedef void (*set_rows_kernel_t)(const char * src, char * dst); // Generic quantized set_rows kernel template template static __global__ void k_set_rows_quant(const float * __restrict__ src0, const idx_t * __restrict__ src1, block_type * __restrict__ dst, const int64_t ne_total, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t s10, const int64_t s11, const int64_t s12, const int64_t s1, const int64_t s2, const int64_t s3, const uint3 ne00, const uint3 ne01, const uint3 ne02, const uint3 ne11_fd, const uint3 ne12_fd) { const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; if (i >= ne_total) { return; } const int64_t i_base = i * qk; uint32_t tmp = (uint32_t) i_base; uint2 div_mod; div_mod = fast_div_modulo(tmp, ne00); const int64_t i00 = div_mod.y; tmp = div_mod.x; div_mod = fast_div_modulo(tmp, ne01); const int64_t i01 = div_mod.y; tmp = div_mod.x; div_mod = fast_div_modulo(tmp, ne02); const int64_t i02 = div_mod.y; const int64_t i03 = div_mod.x; const int64_t i12 = fastmodulo((uint32_t) i03, ne12_fd); const int64_t i11 = fastmodulo((uint32_t) i02, ne11_fd); const int64_t i10 = i01; const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12); const float * src0_row = src0 + i01*s01 + i02*s02 + i03*s03; block_type * dst_row_ptr = dst + (dst_row*s1 + i02*s2 + i03*s3) / sizeof(block_type); const float * src_block = src0_row + i00; block_type * dst_block = dst_row_ptr + i00 / qk; quantize_func(src_block, dst_block); GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); } // Template dispatch function for quantized set_rows template static void set_rows_cuda_quant( const float * src0_d, const idx_t * src1_d, block_type * dst_d, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const size_t nb01, const size_t nb02, const size_t nb03, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb1, const size_t nb2, const size_t nb3, cudaStream_t stream) { GGML_ASSERT(ne00 % qk == 0); const int64_t ne_total = (ne00 * ne01 * ne02 * ne03) / qk; const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE; const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE); const dim3 grid_size(num_blocks); const int64_t s01 = nb01/sizeof(float); const int64_t s02 = nb02/sizeof(float); const int64_t s03 = nb03/sizeof(float); const int64_t s10 = nb10/sizeof(idx_t); const int64_t s11 = nb11/sizeof(idx_t); const int64_t s12 = nb12/sizeof(idx_t); const int64_t s1 = nb1; const int64_t s2 = nb2; const int64_t s3 = nb3; if (ne_total > 0 && ne00 > 0 && ne01 > 0 && ne02 > 0 && ne11 > 0 && ne12 > 0) { const uint3 ne00_fd = init_fastdiv_values((uint32_t) ne00); const uint3 ne01_fd = init_fastdiv_values((uint32_t) ne01); const uint3 ne02_fd = init_fastdiv_values((uint32_t) ne02); const uint3 ne11_fd = init_fastdiv_values((uint32_t) ne11); const uint3 ne12_fd = init_fastdiv_values((uint32_t) ne12); k_set_rows_quant<<>>( src0_d, src1_d, dst_d, ne_total, ne10, ne11, ne12, ne13, s01, s02, s03, s10, s11, s12, s1, s2, s3, ne00_fd, ne01_fd, ne02_fd, ne11_fd, ne12_fd); } } template static __global__ void k_set_rows(const src_t * __restrict__ src0, const idx_t * __restrict__ src1, dst_t * __restrict__ dst, const int64_t ne_total, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const int64_t s01, const int64_t s02, const int64_t s03, const int64_t s10, const int64_t s11, const int64_t s12, const int64_t s1, const int64_t s2, const int64_t s3, const uint3 ne00, const uint3 ne01, const uint3 ne02, const uint3 ne11_fd, const uint3 ne12_fd) { const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; if (i >= ne_total) { return; } uint32_t tmp = (uint32_t) i; uint2 div_mod; div_mod = fast_div_modulo(tmp, ne00); const int64_t i00 = div_mod.y; tmp = div_mod.x; div_mod = fast_div_modulo(tmp, ne01); const int64_t i01 = div_mod.y; tmp = div_mod.x; div_mod = fast_div_modulo(tmp, ne02); const int64_t i02 = div_mod.y; const int64_t i03 = div_mod.x; const int64_t i12 = fastmodulo((uint32_t) i03, ne12_fd); const int64_t i11 = fastmodulo((uint32_t) i02, ne11_fd); const int64_t i10 = i01; const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12); const src_t * src0_row = src0 + i01*s01 + i02*s02 + i03*s03; dst_t * dst_row_ptr = dst + dst_row*s1 + i02*s2 + i03*s3; dst_row_ptr[i00] = ggml_cuda_cast(src0_row[i00]); GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); } template static void set_rows_cuda( const src_t * src0_d, const idx_t * src1_d, dst_t * dst_d, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const size_t nb01, const size_t nb02, const size_t nb03, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb1, const size_t nb2, const size_t nb3, cudaStream_t stream) { const int64_t ne_total = ne00 * ne01 * ne02 * ne03; const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE; const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE); const dim3 grid_size(num_blocks); const int64_t s01 = nb01/sizeof(src_t); const int64_t s02 = nb02/sizeof(src_t); const int64_t s03 = nb03/sizeof(src_t); const int64_t s10 = nb10/sizeof(idx_t); const int64_t s11 = nb11/sizeof(idx_t); const int64_t s12 = nb12/sizeof(idx_t); const int64_t s1 = nb1/sizeof(dst_t); const int64_t s2 = nb2/sizeof(dst_t); const int64_t s3 = nb3/sizeof(dst_t); if (ne_total > 0 && ne00 > 0 && ne01 > 0 && ne02 > 0 && ne11 > 0 && ne12 > 0) { const uint3 ne00_fd = init_fastdiv_values((uint32_t) ne00); const uint3 ne01_fd = init_fastdiv_values((uint32_t) ne01); const uint3 ne02_fd = init_fastdiv_values((uint32_t) ne02); const uint3 ne11_fd = init_fastdiv_values((uint32_t) ne11); const uint3 ne12_fd = init_fastdiv_values((uint32_t) ne12); k_set_rows<<>>(src0_d, src1_d, dst_d, ne_total, ne10, ne11, ne12, ne13, s01, s02, s03, s10, s11, s12, s1, s2, s3, ne00_fd, ne01_fd, ne02_fd, ne11_fd, ne12_fd); } } template static void set_rows_cuda(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const src_t * src0_d = (const src_t *)src0->data; const idx_t * src1_d = (const idx_t *)src1->data; GGML_TENSOR_BINARY_OP_LOCALS cudaStream_t stream = ctx.stream(); if (dst->type == GGML_TYPE_F32) { set_rows_cuda( src0_d, src1_d, (float*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_F16) { set_rows_cuda( src0_d, src1_d, (half*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_BF16) { set_rows_cuda( src0_d, src1_d, (nv_bfloat16*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_Q4_0) { set_rows_cuda_quant( src0_d, src1_d, (block_q4_0*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_Q4_1) { set_rows_cuda_quant( src0_d, src1_d, (block_q4_1*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_Q5_0) { set_rows_cuda_quant( src0_d, src1_d, (block_q5_0*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_Q5_1) { set_rows_cuda_quant( src0_d, src1_d, (block_q5_1*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_Q8_0) { set_rows_cuda_quant( src0_d, src1_d, (block_q8_0*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else if (dst->type == GGML_TYPE_IQ4_NL) { set_rows_cuda_quant( src0_d, src1_d, (block_iq4_nl*)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb01, nb02, nb03, nb10, nb11, nb12, nb1, nb2, nb3, stream ); } else { GGML_ABORT("unsupported type %s", ggml_type_name(dst->type)); } } void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_I64 || src1->type == GGML_TYPE_I32); if (src1->type == GGML_TYPE_I64) { set_rows_cuda(ctx, src0, src1, dst); } else { set_rows_cuda(ctx, src0, src1, dst); } } ggml-org-ggml-3678254/src/ggml-cuda/set-rows.cuh000066400000000000000000000002331512524704700212750ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_SET_ROWS_BLOCK_SIZE 256 void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/set.cu000066400000000000000000000023641512524704700201440ustar00rootroot00000000000000#include "set.cuh" #include "cpy.cuh" void ggml_cuda_op_set(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_I32)); GGML_ASSERT(src1->type == src0->type); GGML_ASSERT(dst ->type == src0->type); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); const size_t nb1 = ((int32_t *) dst->op_params)[0]; const size_t nb2 = ((int32_t *) dst->op_params)[1]; const size_t nb3 = ((int32_t *) dst->op_params)[2]; const size_t offset = ((int32_t *) dst->op_params)[3]; const bool inplace= (bool) ((int32_t *) dst->op_params)[4]; if (!inplace) { ggml_cuda_cpy(ctx, src0, dst); } ggml_tensor dst_view = *dst; dst_view.data = (void *)((char *)dst->data + offset); dst_view.ne[0] = src1->ne[0]; dst_view.ne[1] = src1->ne[1]; dst_view.ne[2] = src1->ne[2]; dst_view.ne[3] = src1->ne[3]; dst_view.nb[0] = ggml_element_size(dst); dst_view.nb[1] = nb1; dst_view.nb[2] = nb2; dst_view.nb[3] = nb3; ggml_cuda_cpy(ctx, src1, &dst_view); } ggml-org-ggml-3678254/src/ggml-cuda/set.cuh000066400000000000000000000002211512524704700203020ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_SET_BLOCK_SIZE 256 void ggml_cuda_op_set(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/softcap.cu000066400000000000000000000024211512524704700210020ustar00rootroot00000000000000#include "softcap.cuh" static __global__ void softcap_f32(const float * x, float * dst, const float scale, const float softcap, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = tanhf(scale * x[i]) * softcap; } static void softcap_f32_cuda(const float * x, float * dst, const float scale, const float softcap, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_SOFTCAP_BLOCK_SIZE - 1) / CUDA_SOFTCAP_BLOCK_SIZE; softcap_f32<<>>(x, dst, scale, softcap, k); } // fused GGML_OP_SCALE + GGML_UNARY_OP_TANH + GGML_OP_SCALE void ggml_cuda_op_softcap(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * src) { const ggml_tensor * src0 = src->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); float scale; float softcap; memcpy(&scale, (float *) src->op_params + 0, sizeof(float)); memcpy(&softcap, (float *) dst->op_params + 0, sizeof(float)); softcap_f32_cuda(src0_d, dst_d, scale, softcap, ggml_nelements(src0), stream); } ggml-org-ggml-3678254/src/ggml-cuda/softcap.cuh000066400000000000000000000002361512524704700211540ustar00rootroot00000000000000#include "common.cuh" #define CUDA_SOFTCAP_BLOCK_SIZE 256 void ggml_cuda_op_softcap(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * src); ggml-org-ggml-3678254/src/ggml-cuda/softmax.cu000066400000000000000000000254241512524704700210340ustar00rootroot00000000000000#include "common.cuh" #include "ggml.h" #include "softmax.cuh" #include #include template static __device__ __forceinline__ float t2f32(T val) { return (float) val; } template <> __device__ float __forceinline__ t2f32(half val) { return __half2float(val); } struct soft_max_params { int64_t nheads; uint32_t n_head_log2; int64_t ncols; int64_t nrows_x; int64_t nrows_y; int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; int64_t nb11; int64_t nb12; int64_t nb13; int64_t ne12; int64_t ne13; float scale; float max_bias; float m0; float m1; }; // When ncols_template == 0 the bounds for the loops in this function are not known and can't be unrolled. // As we want to keep pragma unroll for all other cases we supress the clang transformation warning here. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" #endif // __clang__ template static __global__ void soft_max_f32( const float * x, const T * mask, const float * sinks, float * dst, const soft_max_params p) { const int ncols = ncols_template == 0 ? p.ncols : ncols_template; const int tid = threadIdx.x; const int64_t i03 = blockIdx.z; const int64_t i02 = blockIdx.y; const int64_t i01 = blockIdx.x; //TODO: noncontigous inputs/outputs const int rowx = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y; const int64_t i11 = i01; const int64_t i12 = i02 % p.ne12; const int64_t i13 = i03 % p.ne13; x += int64_t(rowx)*ncols; mask += (i11*p.nb11 + i12*p.nb12 + i13*p.nb13) / sizeof(T) * (mask != nullptr); dst += int64_t(rowx)*ncols; const int block_size = block_size_template == 0 ? blockDim.x : block_size_template; const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; const float slope = get_alibi_slope(p.max_bias, i02, p.n_head_log2, p.m0, p.m1); extern __shared__ float data_soft_max_f32[]; float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication // shared memory buffer to cache values between iterations: float * vals = use_shared ? buf_iw + WARP_SIZE : dst; float max_val = sinks ? sinks[i02] : -INFINITY; #pragma unroll for (int col0 = 0; col0 < ncols; col0 += block_size) { const int col = col0 + tid; if (ncols_template == 0 && col >= ncols) { break; } const float val = x[col]*p.scale + (mask ? slope*t2f32(mask[col]) : 0.0f); vals[col] = val; max_val = max(max_val, val); } // find the max value in the block max_val = warp_reduce_max(max_val); if (block_size > WARP_SIZE) { if (warp_id == 0) { buf_iw[lane_id] = -INFINITY; } __syncthreads(); if (lane_id == 0) { buf_iw[warp_id] = max_val; } __syncthreads(); max_val = buf_iw[lane_id]; max_val = warp_reduce_max(max_val); } float tmp = 0.0f; // partial sum #pragma unroll for (int col0 = 0; col0 < ncols; col0 += block_size) { const int col = col0 + tid; if (ncols_template == 0 && col >= ncols) { break; } const float val = expf(vals[col] - max_val); tmp += val; vals[col] = val; } // find the sum of exps in the block tmp = warp_reduce_sum(tmp); if (block_size > WARP_SIZE) { __syncthreads(); if (warp_id == 0) { buf_iw[lane_id] = 0.0f; } __syncthreads(); if (lane_id == 0) { buf_iw[warp_id] = tmp; } __syncthreads(); tmp = buf_iw[lane_id]; tmp = warp_reduce_sum(tmp); } if (sinks) { tmp += expf(sinks[i02] - max_val); } const float inv_sum = 1.0f / tmp; #pragma unroll for (int col0 = 0; col0 < ncols; col0 += block_size) { const int col = col0 + tid; if (ncols_template == 0 && col >= ncols) { return; } dst[col] = vals[col] * inv_sum; } } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ static __global__ void soft_max_back_f32( const float * grad, const float * dstf, float * dst, const int ncols, const float scale) { const int tid = threadIdx.x; const int rowx = blockIdx.x; grad += int64_t(rowx)*ncols; dstf += int64_t(rowx)*ncols; dst += int64_t(rowx)*ncols; float dgf_dot = 0.0f; // dot product of dst from forward pass and gradients for (int col = tid; col < ncols; col += WARP_SIZE) { dgf_dot += dstf[col]*grad[col]; } dgf_dot = warp_reduce_sum(dgf_dot); for (int col = tid; col < ncols; col += WARP_SIZE) { dst[col] = scale * (grad[col] - dgf_dot) * dstf[col]; } } template static void launch_soft_max_kernels(const float * x, const T * mask, const float * sinks, float * dst, const soft_max_params & p, cudaStream_t stream, dim3 block_dims, dim3 block_nums, size_t nbytes_shared) { const int id = ggml_cuda_get_device(); const size_t smpbo = ggml_cuda_info().devices[id].smpbo; auto launch_kernel = [=](auto I) -> bool { constexpr int ncols = decltype(I)::value; constexpr int block = (ncols > 1024 ? 1024 : ncols); if (p.ncols == ncols) { CUDA_SET_SHARED_MEMORY_LIMIT((soft_max_f32), smpbo); soft_max_f32<<>> (x, mask, sinks, dst, p); return true; } return false; }; // unary fold over launch_kernel if ((launch_kernel(std::integral_constant{}) || ...)) { return; } //default case CUDA_SET_SHARED_MEMORY_LIMIT((soft_max_f32), smpbo); soft_max_f32<<>>(x, mask, sinks, dst, p); } template static void soft_max_f32_cuda(const float * x, const T * mask, const float * sinks, float * dst, const soft_max_params & params, cudaStream_t stream) { int nth = WARP_SIZE; const int64_t ncols_x = params.ncols; while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2; const dim3 block_dims(nth, 1, 1); const dim3 block_nums(params.ne01, params.ne02, params.ne03); const size_t nbytes_shared = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float); static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted."); const int id = ggml_cuda_get_device(); const size_t smpbo = ggml_cuda_info().devices[id].smpbo; if (nbytes_shared <= smpbo) { launch_soft_max_kernels<32, 64, 128, 256, 512, 1024, 2048, 4096>(x, mask, sinks, dst, params, stream, block_dims, block_nums, nbytes_shared); } else { const size_t nbytes_shared_low = WARP_SIZE*sizeof(float); soft_max_f32<<>>(x, mask, sinks, dst, params); } } static void soft_max_back_f32_cuda( const float * grad, const float * dstf, float * dst, const int ncols, const int nrows, const float scale, cudaStream_t stream) { const dim3 block_dims(WARP_SIZE, 1, 1); const dim3 block_nums(nrows, 1, 1); soft_max_back_f32<<>>(grad, dstf, dst, ncols, scale); } void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; const float * src0_d = (const float *) src0->data; const void * src1_d = src1 ? (const void *) src1->data : nullptr; const void * src2_d = src2 ? (const void *) src2->data : nullptr; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional const int64_t nrows_x = ggml_nrows(src0); const int64_t nrows_y = src0->ne[1]; const int64_t ne00 = src0->ne[0]; float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); const int64_t nb11 = src1 ? src1->nb[1] : 1; const int64_t nb12 = src1 ? src1->nb[2] : 1; const int64_t nb13 = src1 ? src1->nb[3] : 1; const int64_t ne12 = src1 ? src1->ne[2] : 1; const int64_t ne13 = src1 ? src1->ne[3] : 1; const uint32_t n_head = src0->ne[2]; const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); soft_max_params params = {}; params.nheads = src0->ne[2]; params.n_head_log2 = n_head_log2; params.ncols = ne00; params.nrows_x = nrows_x; params.nrows_y = nrows_y; params.ne00 = src0->ne[0]; params.ne01 = src0->ne[1]; params.ne02 = src0->ne[2]; params.ne03 = src0->ne[3]; params.nb11 = nb11; params.nb12 = nb12; params.nb13 = nb13; params.ne12 = ne12; params.ne13 = ne13; params.scale = scale; params.max_bias = max_bias; params.m0 = m0; params.m1 = m1; if (use_f16) { soft_max_f32_cuda(src0_d, (const half *) src1_d, (const float *) src2_d, dst_d, params, stream); } else { soft_max_f32_cuda(src0_d, (const float *) src1_d, (const float *) src2_d, dst_d, params, stream); } } void ggml_cuda_op_soft_max_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // grad const ggml_tensor * src1 = dst->src[1]; // forward pass output const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int64_t ncols = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); GGML_ASSERT(max_bias == 0.0f); soft_max_back_f32_cuda(src0_d, src1_d, dst_d, ncols, nrows, scale, stream); } ggml-org-ggml-3678254/src/ggml-cuda/softmax.cuh000066400000000000000000000003441512524704700211760ustar00rootroot00000000000000#include "common.cuh" #define CUDA_SOFT_MAX_BLOCK_SIZE 1024 void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_soft_max_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/solve_tri.cu000066400000000000000000000264111512524704700213560ustar00rootroot00000000000000#include "common.cuh" #include "ggml.h" #include "solve_tri.cuh" #define MAX_N_FAST 64 #define MAX_K_FAST 32 static __global__ void get_batch_pointers(const float * A, float * X, const float ** A_ptrs, float ** X_ptrs, int64_t ne02, int64_t total_batches, size_t s02, size_t s03, size_t s2, size_t s3) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= total_batches) { return; } const int64_t i3 = idx / ne02; const int64_t i2 = idx % ne02; A_ptrs[idx] = A + i3 * s03 + i2 * s02; X_ptrs[idx] = X + i3 * s3 + i2 * s2; } static void solve_tri_f32_cublas(ggml_backend_cuda_context & ctx, const float * A, const float * B, float * X, int n, int k, int64_t ne02, int64_t ne03, size_t s02, size_t s03, size_t s12, size_t s13, size_t s2, size_t s3, cudaStream_t stream) { const float alpha = 1.0f; const int64_t total_batches = ne02 * ne03; if (total_batches == 0) { return; } // Bulk copy B -> X (contiguous tensors) if (X != B) { const int64_t total_elements_BX = n * k * total_batches; CUDA_CHECK(cudaMemcpyAsync(X, B, total_elements_BX * sizeof(float), cudaMemcpyDeviceToDevice, stream)); } const int id = ggml_cuda_get_device(); ggml_cuda_pool_alloc A_ptrs_alloc(ctx.pool(id), total_batches); ggml_cuda_pool_alloc X_ptrs_alloc(ctx.pool(id), total_batches); const float ** A_ptrs_dev = A_ptrs_alloc.get(); float ** X_ptrs_dev = X_ptrs_alloc.get(); get_batch_pointers<<<(total_batches + 255) / 256, 256, 0, stream>>>(A, X, A_ptrs_dev, X_ptrs_dev, ne02, total_batches, s02, s03, s2, s3); CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); // Yes, this is necessary, without this we get RMSE errors CUBLAS_CHECK(cublasSetMathMode(ctx.cublas_handle(id), CUBLAS_DEFAULT_MATH)); CUBLAS_CHECK(cublasStrsmBatched(ctx.cublas_handle(id), CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, k, n, &alpha, A_ptrs_dev, n, X_ptrs_dev, k, total_batches)); // revert to standard mode from common.cuh CUBLAS_CHECK(cublasSetMathMode(ctx.cublas_handle(id), CUBLAS_TF32_TENSOR_OP_MATH)); GGML_UNUSED_VARS(s12, s13); } // ====================== // Fast Kernel (n <= 64, k <= 32) - Warp-based parallel reduction // ====================== // When ncols_template == 0 the bounds for the loops in this function are not // known and can't be unrolled. As we want to keep pragma unroll for all other // cases we supress the clang transformation warning here. #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpass-failed" #endif // __clang__ template static __global__ void solve_tri_f32_fast(const float * __restrict__ A, const float * __restrict__ B, float * __restrict__ X, const uint3 ne02, const size_t nb02, const size_t nb03, const size_t nb12, const size_t nb13, const size_t nb2, const size_t nb3, const int n_arg, const int k_arg) { const int n = n_template == 0 ? n_arg : n_template; const int k = k_template == 0 ? k_arg : k_template; const int batch_idx = blockIdx.x; const int lane = threadIdx.x; const int col_idx = threadIdx.y; if (col_idx >= k) { return; } const uint2 i02_i03 = fast_div_modulo(batch_idx, ne02); const int64_t i02 = i02_i03.y; const int64_t i03 = i02_i03.x; const float * const A_batch = (const float *) (A + i02 * nb02 + i03 * nb03); const float * const B_batch = (const float *) (B + i02 * nb12 + i03 * nb13); float * X_batch = (float *) (X + i02 * nb2 + i03 * nb3); __shared__ float sA[MAX_N_FAST * MAX_N_FAST]; const int offset = threadIdx.x + threadIdx.y * blockDim.x; #pragma unroll for (int i = 0; i < n * n; i += k * WARP_SIZE) { const int i0 = i + offset; if (i0 < n * n) { sA[i0] = A_batch[i0]; } } __syncthreads(); float x_low = (lane < n) ? B_batch[lane * k + col_idx] : 0.0f; float x_high = (WARP_SIZE + lane < n) ? B_batch[(WARP_SIZE + lane) * k + col_idx] : 0.0f; const int half = WARP_SIZE; const int nrows_low = (n < half) ? n : half; #pragma unroll for (int row = 0; row < nrows_low; ++row) { float sum = 0.0f; if (lane < row) { sum += sA[row * n + lane] * x_low; } sum = warp_reduce_sum(sum); if (lane == row) { x_low = (x_low - sum) / sA[row * n + row]; } } #pragma unroll for (int row = half; row < n; ++row) { float sum = sA[row * n + lane] * x_low; const int j = half + lane; if (j < row) { sum += sA[row * n + j] * x_high; } sum = warp_reduce_sum(sum); if (lane == row - half) { x_high = (x_high - sum) / sA[row * n + row]; } } #pragma unroll for (int rr = 0; rr < 2; ++rr) { const int row = rr * WARP_SIZE + lane; if (row < n) { const float val = (row < half) ? x_low : x_high; X_batch[row * k + col_idx] = val; } } } #ifdef __clang__ # pragma clang diagnostic pop #endif // __clang__ static void solve_tri_f32_cuda(const float * A, const float * B, float * X, int n, int k, int64_t ne02, int64_t ne03, size_t nb02, size_t nb03, size_t nb12, size_t nb13, size_t nb2, size_t nb3, cudaStream_t stream) { const uint3 ne02_fd = init_fastdiv_values((uint32_t) ne02); dim3 threads(WARP_SIZE, k); dim3 grid(ne02 * ne03); if (n == 64) { switch (k) { case 32: solve_tri_f32_fast<64, 32> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 16: solve_tri_f32_fast<64, 16> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 14: solve_tri_f32_fast<64, 14> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 12: solve_tri_f32_fast<64, 12> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 10: solve_tri_f32_fast<64, 10> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 8: solve_tri_f32_fast<64, 8> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 6: solve_tri_f32_fast<64, 6> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 4: solve_tri_f32_fast<64, 4> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 2: solve_tri_f32_fast<64, 2> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; case 1: solve_tri_f32_fast<64, 1> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, 0, 0); break; default: solve_tri_f32_fast<0, 0> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, n, k); } } else { // run general case solve_tri_f32_fast<0, 0> <<>>(A, B, X, ne02_fd, nb02, nb03, nb12, nb13, nb2, nb3, n, k); } } void ggml_cuda_op_solve_tri(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // A (n×n, lower triangular) const ggml_tensor * src1 = dst->src[1]; // B (n×k) ggml_is_contiguous(src0); ggml_is_contiguous(src1); const int64_t n = src0->ne[0]; const int64_t k = src1->ne[0]; const int64_t ne02 = src0->ne[2]; const int64_t ne03 = src0->ne[3]; if (n <= MAX_N_FAST && k <= MAX_K_FAST) { solve_tri_f32_cuda((const float *) src0->data, (const float *) src1->data, (float *) dst->data, n, k, src0->ne[2], src0->ne[3], src0->nb[2] / sizeof(float), src0->nb[3] / sizeof(float), src1->nb[2] / sizeof(float), src1->nb[3] / sizeof(float), dst->nb[2] / sizeof(float), dst->nb[3] / sizeof(float), ctx.stream()); } else { solve_tri_f32_cublas(ctx, (const float *) src0->data, (const float *) src1->data, (float *) dst->data, n, k, ne02, ne03, src0->nb[2] / sizeof(float), src0->nb[3] / sizeof(float), src1->nb[2] / sizeof(float), src1->nb[3] / sizeof(float), dst->nb[2] / sizeof(float), dst->nb[3] / sizeof(float), ctx.stream()); } } ggml-org-ggml-3678254/src/ggml-cuda/solve_tri.cuh000066400000000000000000000001501512524704700215160ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_solve_tri(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/ssm-conv.cu000066400000000000000000000145301512524704700211140ustar00rootroot00000000000000#include "ssm-conv.cuh" template static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1, const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1, float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2, const int64_t n_t) { GGML_UNUSED(src0_nb0); const int tid = threadIdx.x; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1); const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1); float * y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidy * split_d_inner * dst_nb0); const int stride_x = src0_nb1 / sizeof(float); const int stride_w = src1_nb1 / sizeof(float); const int stride_y = dst_nb1 / sizeof(float); float x[d_conv] = { 0.0f }; float w[d_conv] = { 0.0f }; #pragma unroll for (size_t j = 0; j < d_conv; j++) { w[j] = w_block[tid * stride_w + j]; } for (int64_t i = 0; i < n_t; i++) { float sumf = 0.0f; if (i == 0) { for (size_t j = 0; j < d_conv; j++) { x[j] = x_block[tid * stride_x + j]; } } else { x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1]; } #pragma unroll for (size_t j = 0; j < d_conv; j++) { sumf += x[(i + j) % d_conv] * w[j]; } y_block[i * stride_y + tid] = sumf; } } template static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, const float * __restrict__ src1, const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1, float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2, const int64_t n_t) { const int tid = threadIdx.x; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const int bidz = blockIdx.z; const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1 + bidz * split_n_t * src0_nb0); const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1); float * y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidz * split_n_t * dst_nb1 + bidy * split_d_inner * dst_nb0); const int stride_x = src0_nb1 / sizeof(float); const int stride_w = src1_nb1 / sizeof(float); const int stride_y = dst_nb1 / sizeof(float); float x[d_conv] = { 0.0f }; float w[d_conv] = { 0.0f }; #pragma unroll for (size_t j = 0; j < d_conv; j++) { w[j] = w_block[tid * stride_w + j]; } #pragma unroll for (int64_t i = 0; i < split_n_t; i++) { if (bidz * split_n_t + i < n_t) { float sumf = 0.0f; if (i == 0) { for (size_t j = 0; j < d_conv; j++) { x[j] = x_block[tid * stride_x + j]; } } else { x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1]; } #pragma unroll for (size_t j = 0; j < d_conv; j++) { sumf += x[(i + j) % d_conv] * w[j]; } y_block[i * stride_y + tid] = sumf; } } } static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1, const int dst_nb2, const int64_t nc, const int64_t nr, const int64_t n_t, const int64_t n_s, cudaStream_t stream) { const int threads = 128; GGML_ASSERT(nr % threads == 0); auto launch_kernel = [&](auto NC) { constexpr int kNC = decltype(NC)::value; if (n_t <= 32) { const dim3 blocks(n_s, (nr + threads - 1) / threads, 1); ssm_conv_f32<<>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); } else { const int64_t split_n_t = 32; dim3 blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t); ssm_conv_long_token_f32<<>>( src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); } }; switch (nc) { case 3: launch_kernel(std::integral_constant{}); break; case 4: launch_kernel(std::integral_constant{}); break; case 9: launch_kernel(std::integral_constant{}); break; default: GGML_ABORT("Only support kernel sizes 3, 4, 9 right now."); } } void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; // conv_x const struct ggml_tensor * src1 = dst->src[1]; // conv1d.weight const int64_t nc = src1->ne[0]; // d_conv const int64_t nr = src0->ne[1]; // d_inner const int64_t n_t = dst->ne[1]; // tokens per sequence const int64_t n_s = dst->ne[2]; // number of sequences in the batch GGML_ASSERT(dst->ne[0] == nr); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1], dst->nb[2], nc, nr, n_t, n_s, stream); } ggml-org-ggml-3678254/src/ggml-cuda/ssm-conv.cuh000066400000000000000000000001471512524704700212630ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/ssm-scan.cu000066400000000000000000000430731512524704700210770ustar00rootroot00000000000000#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11070 #define USE_CUB #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11070 #ifdef USE_CUB #include using namespace cub; #endif // USE_CUB #include "ssm-scan.cuh" // We would like to keep pragma unroll for cases where L_template is not 0, // so we suppress the clang transformation warning. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" #endif // __clang__ template __global__ void __launch_bounds__(splitD, 1) ssm_scan_f32(const float *__restrict__ src0, const float *__restrict__ src1, const float *__restrict__ src2, const float *__restrict__ src3, const float *__restrict__ src4, const float *__restrict__ src5, const int32_t * __restrict__ src6, float * __restrict__ dst, const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, const int src2_nb1, const int src2_nb2, const int src3_nb1, const int src4_nb2, const int src4_nb3, const int src5_nb2, const int src5_nb3, const int64_t s_off, const int64_t d_inner, const int64_t L_param) { const size_t L = L_template == 0 ? L_param : L_template; const float *s0_block = (const float *)((const char *)src0 + src6[blockIdx.x] * src0_nb3 + blockIdx.y * splitD * src0_nb2); const float *x_block = (const float *)((const char *)src1 + (blockIdx.x * src1_nb3) + blockIdx.y * splitD * sizeof(float)); const float *dt_block = (const float *)((const char *)src2 + (blockIdx.x * src2_nb2) + blockIdx.y * splitD * sizeof(float)); const float *A_block = (const float *)((const char *)src3 + blockIdx.y * splitD * src3_nb1); const float *B_block = (const float *)((const char *)src4 + (blockIdx.x * src4_nb3)); const float *C_block = (const float *)((const char *)src5 + (blockIdx.x * src5_nb3)); float *y_block = (float *)((char *)dst + (blockIdx.x * d_inner * L * sizeof(float)) + blockIdx.y * splitD * sizeof(float)); float *s_block = (float *)((char *)dst + s_off + blockIdx.x * src0_nb3 + blockIdx.y * splitD * src0_nb2); const int stride_x = src1_nb2 / sizeof(float); const int stride_dt = src2_nb1 / sizeof(float); const int stride_B = src4_nb2 / sizeof(float); const int stride_C = src5_nb2 / sizeof(float); const int stride_y = d_inner; float regA[N]; float regs0[N]; __shared__ float smemB[N]; __shared__ float smemC[N]; #ifdef USE_CUB using BlockLoad = cub::BlockLoad; using BlockStore = cub::BlockStore; union CubTempStorage { typename BlockLoad::TempStorage load_temp; typename BlockStore::TempStorage store_temp; }; __shared__ CubTempStorage cub_temp_storage; BlockLoad(cub_temp_storage.load_temp).Load(A_block, regA); BlockLoad(cub_temp_storage.load_temp).Load(s0_block, regs0); #else const int stride_s0 = src0_nb2 / sizeof(float); const int stride_A = src3_nb1 / sizeof(float); #pragma unroll for (size_t n = 0; n < N; ++n) { regA[n] = A_block[threadIdx.x * stride_A + n]; regs0[n] = s0_block[threadIdx.x * stride_s0 + n]; } #endif #pragma unroll for (size_t i = 0; i < L; i++) { if (threadIdx.x < N) { smemB[threadIdx.x] = B_block[i * stride_B + threadIdx.x]; smemC[threadIdx.x] = C_block[i * stride_C + threadIdx.x]; } __syncthreads(); float dt_soft_plus = dt_block[i * stride_dt + threadIdx.x]; if (dt_soft_plus <= 20.0f) { dt_soft_plus = log1pf(expf(dt_soft_plus)); } float x_dt = x_block[i * stride_x + threadIdx.x] * dt_soft_plus; float sumf = 0.0f; #pragma unroll for (size_t n = 0; n < N; n++) { float state = regs0[n] * expf(dt_soft_plus * regA[n]) + smemB[n] * x_dt; sumf += state * smemC[n]; regs0[n] = state; } y_block[i * stride_y + threadIdx.x] = sumf; } #ifdef USE_CUB BlockStore(cub_temp_storage.store_temp).Store(s_block, regs0); #else const int stride_s = stride_s0; #pragma unroll for (size_t n = 0; n < N; ++n) { s_block[threadIdx.x * stride_s + n] = regs0[n]; } #endif } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ // assumes as many threads as d_state template __global__ void __launch_bounds__(d_state, 1) ssm_scan_f32_group( const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2, const float * __restrict__ src3, const float * __restrict__ src4, const float * __restrict__ src5, const int32_t * __restrict__ src6, float * __restrict__ dst, const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, const int src2_nb1, const int src2_nb2, const int src3_nb1, const int src4_nb2, const int src4_nb3, const int src5_nb2, const int src5_nb3, const int64_t s_off, const int64_t n_head, const int64_t d_head, const int64_t n_group, const int64_t n_tok) { const int head_idx = (blockIdx.x * splitH) / d_head; const int head_off = ((blockIdx.x * splitH) % d_head) * sizeof(float); const int seq_idx = blockIdx.y; const int group_off = (head_idx / (n_head / n_group)) * d_state * sizeof(float); const float * s0_block = (const float *) ((const char *) src0 + src6[seq_idx] * src0_nb3 + head_idx * src0_nb2 + head_off * d_state); const float * x_block = (const float *) ((const char *) src1 + (seq_idx * src1_nb3) + blockIdx.x * splitH * sizeof(float)); const float * dt_block = (const float *) ((const char *) src2 + (seq_idx * src2_nb2) + head_idx * sizeof(float)); const float * A_block = (const float *) ((const char *) src3 + head_idx * src3_nb1); const float * B_block = (const float *) ((const char *) src4 + (seq_idx * src4_nb3) + (group_off)); const float * C_block = (const float *) ((const char *) src5 + (seq_idx * src5_nb3) + (group_off)); float * y_block = dst + (seq_idx * n_tok * n_head * d_head) + blockIdx.x * splitH; float * s_block = (float *) ((char *) dst + s_off + seq_idx * src0_nb3 + head_idx * src0_nb2 + head_off * d_state); // strides across n_seq_tokens const int stride_x = src1_nb2 / sizeof(float); const int stride_dt = src2_nb1 / sizeof(float); const int stride_B = src4_nb2 / sizeof(float); const int stride_C = src5_nb2 / sizeof(float); const int stride_y = n_head * d_head; float state[splitH]; // for the parallel accumulation __shared__ float stateC[splitH * d_state]; #pragma unroll for (int j = 0; j < splitH; j++) { state[j] = s0_block[j * d_state + threadIdx.x]; } for (int64_t i = 0; i < n_tok; i++) { // TODO: only calculate dA and dt_soft_plus once per head instead of every splitH head elements // TODO: only calculate B and C once per head group // NOTE: dt_soft_plus, dA and x_dt have the same value across threads here. float dt_soft_plus = dt_block[i * stride_dt]; if (dt_soft_plus <= 20.0f) { dt_soft_plus = log1pf(expf(dt_soft_plus)); } const float dA = expf(dt_soft_plus * A_block[0]); const float B = B_block[i * stride_B + threadIdx.x]; const float C = C_block[i * stride_C + threadIdx.x]; // across d_head #pragma unroll for (int j = 0; j < splitH; j++) { const float x_dt = x_block[i * stride_x + j] * dt_soft_plus; state[j] = (state[j] * dA) + (B * x_dt); stateC[j * d_state + threadIdx.x] = state[j] * C; } __syncthreads(); // parallel accumulation for stateC // TODO: simplify { static_assert((d_state & -d_state) == d_state, "the state size has to be a power of 2"); static_assert((splitH & -splitH) == splitH, "splitH has to be a power of 2"); // reduce until w matches the warp size // TODO: does this work even when the physical warp size is 64? #pragma unroll for (int w = d_state; w > WARP_SIZE; w >>= 1) { // (assuming there are d_state threads) #pragma unroll for (int j = 0; j < ((w >> 1) * splitH + d_state - 1) / d_state; j++) { // TODO: check for bank conflicts const int k = (threadIdx.x % (w >> 1)) + (d_state * (threadIdx.x / (w >> 1))) + j * d_state * (d_state / (w >> 1)); stateC[k] += stateC[k + (w >> 1)]; } __syncthreads(); } static_assert(splitH >= d_state / WARP_SIZE); #pragma unroll for (int j = 0; j < splitH / (d_state / WARP_SIZE); j++) { float y = stateC[(threadIdx.x % WARP_SIZE) + d_state * (threadIdx.x / WARP_SIZE) + j * d_state * (d_state / WARP_SIZE)]; y = warp_reduce_sum(y); // store the above accumulations if (threadIdx.x % WARP_SIZE == 0) { const int k = threadIdx.x / WARP_SIZE + j * (d_state / WARP_SIZE); y_block[i * stride_y + k] = y; } } } } // write back the state #pragma unroll for (int j = 0; j < splitH; j++) { s_block[j * d_state + threadIdx.x] = state[j]; } } static void ssm_scan_f32_cuda(const float * src0, const float * src1, const float * src2, const float * src3, const float * src4, const float * src5, const int32_t * src6, float * dst, const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, const int src2_nb1, const int src2_nb2, const int src3_nb1, const int src4_nb2, const int src4_nb3, const int src5_nb2, const int src5_nb3, const int64_t s_off, const int64_t d_state, const int64_t head_dim, const int64_t n_head, const int64_t n_group, const int64_t n_tok, const int64_t n_seq, cudaStream_t stream) { const int threads = 128; // NOTE: if you change conditions here, be sure to update the corresponding supports_op condition! if (src3_nb1 == sizeof(float)) { // Mamba-2 if (d_state == 128) { GGML_ASSERT(d_state % threads == 0); // NOTE: can be any power of two between 4 and 64 const int splitH = 16; GGML_ASSERT(head_dim % splitH == 0); const dim3 blocks((n_head * head_dim + (splitH - 1)) / splitH, n_seq, 1); ssm_scan_f32_group<16, 128><<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, head_dim, n_group, n_tok); } else if (d_state == 256) { // Falcon-H1 const int threads = 256; // NOTE: can be any power of two between 8 and 64 const int splitH = 16; GGML_ASSERT(head_dim % splitH == 0); const dim3 blocks((n_head * head_dim + (splitH - 1)) / splitH, n_seq, 1); ssm_scan_f32_group<16, 256><<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, head_dim, n_group, n_tok); } else { GGML_ABORT("doesn't support d_state!=(128 or 256)."); } } else { // Mamba-1 GGML_ASSERT(n_head % threads == 0); GGML_ASSERT(head_dim == 1); GGML_ASSERT(n_group == 1); const dim3 blocks(n_seq, (n_head + threads - 1) / threads, 1); const int smem_size = (threads * (d_state + 1) * 2) * sizeof(float); if (d_state == 16) { switch (n_tok) { case 1: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 2: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 3: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 4: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 5: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 6: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 7: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; case 8: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; default: ssm_scan_f32<<>>( src0, src1, src2, src3, src4, src5, src6, dst, src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); break; } } else { GGML_ABORT("doesn't support d_state!=16."); } } } void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; // s const struct ggml_tensor * src1 = dst->src[1]; // x const struct ggml_tensor * src2 = dst->src[2]; // dt const struct ggml_tensor * src3 = dst->src[3]; // A const struct ggml_tensor * src4 = dst->src[4]; // B const struct ggml_tensor * src5 = dst->src[5]; // C const struct ggml_tensor * src6 = dst->src[6]; // ids const int64_t nc = src0->ne[0]; // d_state const int64_t nr = src0->ne[1]; // head_dim or 1 const int64_t nh = src1->ne[1]; // n_head const int64_t ng = src4->ne[1]; // n_group const int64_t n_t = src1->ne[2]; // number of tokens per sequence const int64_t n_s = src1->ne[3]; // number of sequences in the batch const int64_t s_off = ggml_nelements(src1) * sizeof(float); GGML_ASSERT(ggml_nelements(src1) + nc*nr*nh*n_s == ggml_nelements(dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src2->nb[0] == sizeof(float)); GGML_ASSERT(src3->nb[0] == sizeof(float)); GGML_ASSERT(src4->nb[0] == sizeof(float)); GGML_ASSERT(src5->nb[0] == sizeof(float)); GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; const float * src2_d = (const float *) src2->data; const float * src3_d = (const float *) src3->data; const float * src4_d = (const float *) src4->data; const float * src5_d = (const float *) src5->data; const int32_t * src6_d = (const int32_t *) src6->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src6->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src6_d, dst_d, src0->nb[2], src0->nb[3], src1->nb[2], src1->nb[3], src2->nb[1], src2->nb[2], src3->nb[1], src4->nb[2], src4->nb[3], src5->nb[2], src5->nb[3], s_off, nc, nr, nh, ng, n_t, n_s, stream); } ggml-org-ggml-3678254/src/ggml-cuda/ssm-scan.cuh000066400000000000000000000001471512524704700212420ustar00rootroot00000000000000#include "common.cuh" void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/sum.cu000066400000000000000000000025241512524704700201530ustar00rootroot00000000000000#include "sum.cuh" #include "sumrows.cuh" #ifdef GGML_CUDA_USE_CUB #include using namespace cub; #endif // GGML_CUDA_USE_CUB #include void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream) { #ifdef GGML_CUDA_USE_CUB size_t tmp_size = 0; DeviceReduce::Sum(nullptr, tmp_size, x, dst, ne, stream); ggml_cuda_pool_alloc tmp_alloc(pool, tmp_size); DeviceReduce::Sum(tmp_alloc.ptr, tmp_size, x, dst, ne, stream); #else // Use (inefficient) sum_rows implementation as a fallback. // For AMD there is rocPRIM which could be used as a drop-in replacement via hipcub but this would require C++11 -> C++14. sum_rows_f32_cuda(x, dst, ne, 1, stream); GGML_UNUSED(pool); #endif // GGML_CUDA_USE_CUB } void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguously_allocated(src0)); const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; const int64_t ne = ggml_nelements(src0); ggml_cuda_pool & pool = ctx.pool(); cudaStream_t stream = ctx.stream(); sum_f32_cuda(pool, src0_d, dst_d, ne, stream); } ggml-org-ggml-3678254/src/ggml-cuda/sum.cuh000066400000000000000000000003221512524704700203150ustar00rootroot00000000000000#include "common.cuh" void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream); void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/sumrows.cu000066400000000000000000000034051512524704700210650ustar00rootroot00000000000000#include "reduce_rows.cuh" #include "sumrows.cuh" void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { const int id = ggml_cuda_get_device(); const int nsm = ggml_cuda_info().devices[id].nsm; const dim3 block_nums(nrows, 1, 1); if ((nrows / nsm) < 2) { const dim3 block_dims(512, 1, 1); reduce_rows_f32<<>>(x, dst, ncols); } else { const dim3 block_dims(ncols < 1024 ? 32 : 128, 1, 1); reduce_rows_f32<<>>(x, dst, ncols); } } void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); const int64_t ncols = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); const dim3 block_nums(nrows, 1, 1); const int id = ggml_cuda_get_device(); const int nsm = ggml_cuda_info().devices[id].nsm; if ((nrows / nsm) < 2) { // Increase num threads to 512 for small nrows to better hide the latency const dim3 block_dims(512, 1, 1); reduce_rows_f32<<>>(src0_d, dst_d, ncols); } else { // Enough active SMs to hide latency, use smaller blocks to allow better scheduling const dim3 block_dims(ncols < 1024 ? 32 : 128, 1, 1); reduce_rows_f32<<>>(src0_d, dst_d, ncols); } } ggml-org-ggml-3678254/src/ggml-cuda/sumrows.cuh000066400000000000000000000003241512524704700212320ustar00rootroot00000000000000#include "common.cuh" void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/000077500000000000000000000000001512524704700226135ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu000066400000000000000000000002371512524704700323470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(576, 512, 1, 16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu000066400000000000000000000005451512524704700322720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 1, 8); DECL_FATTN_MMA_F16_CASE(80, 80, 1, 8); DECL_FATTN_MMA_F16_CASE(96, 96, 1, 8); DECL_FATTN_MMA_F16_CASE(112, 112, 1, 8); DECL_FATTN_MMA_F16_CASE(128, 128, 1, 8); DECL_FATTN_MMA_F16_CASE(256, 256, 1, 8); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu000066400000000000000000000005531512524704700323500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 16, 1); DECL_FATTN_MMA_F16_CASE(80, 80, 16, 1); DECL_FATTN_MMA_F16_CASE(96, 96, 16, 1); DECL_FATTN_MMA_F16_CASE(112, 112, 16, 1); DECL_FATTN_MMA_F16_CASE(128, 128, 16, 1); DECL_FATTN_MMA_F16_CASE(256, 256, 16, 1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu000066400000000000000000000005531512524704700323510ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 16, 2); DECL_FATTN_MMA_F16_CASE(80, 80, 16, 2); DECL_FATTN_MMA_F16_CASE(96, 96, 16, 2); DECL_FATTN_MMA_F16_CASE(112, 112, 16, 2); DECL_FATTN_MMA_F16_CASE(128, 128, 16, 2); DECL_FATTN_MMA_F16_CASE(256, 256, 16, 2); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu000066400000000000000000000005531512524704700323530ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 16, 4); DECL_FATTN_MMA_F16_CASE(80, 80, 16, 4); DECL_FATTN_MMA_F16_CASE(96, 96, 16, 4); DECL_FATTN_MMA_F16_CASE(112, 112, 16, 4); DECL_FATTN_MMA_F16_CASE(128, 128, 16, 4); DECL_FATTN_MMA_F16_CASE(256, 256, 16, 4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu000066400000000000000000000002371512524704700323500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(576, 512, 2, 16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu000066400000000000000000000005451512524704700322670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 2, 4); DECL_FATTN_MMA_F16_CASE(80, 80, 2, 4); DECL_FATTN_MMA_F16_CASE(96, 96, 2, 4); DECL_FATTN_MMA_F16_CASE(112, 112, 2, 4); DECL_FATTN_MMA_F16_CASE(128, 128, 2, 4); DECL_FATTN_MMA_F16_CASE(256, 256, 2, 4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu000066400000000000000000000005451512524704700322730ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 2, 8); DECL_FATTN_MMA_F16_CASE(80, 80, 2, 8); DECL_FATTN_MMA_F16_CASE(96, 96, 2, 8); DECL_FATTN_MMA_F16_CASE(112, 112, 2, 8); DECL_FATTN_MMA_F16_CASE(128, 128, 2, 8); DECL_FATTN_MMA_F16_CASE(256, 256, 2, 8); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu000066400000000000000000000005531512524704700323460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 32, 1); DECL_FATTN_MMA_F16_CASE(80, 80, 32, 1); DECL_FATTN_MMA_F16_CASE(96, 96, 32, 1); DECL_FATTN_MMA_F16_CASE(112, 112, 32, 1); DECL_FATTN_MMA_F16_CASE(128, 128, 32, 1); DECL_FATTN_MMA_F16_CASE(256, 256, 32, 1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu000066400000000000000000000005531512524704700323470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 32, 2); DECL_FATTN_MMA_F16_CASE(80, 80, 32, 2); DECL_FATTN_MMA_F16_CASE(96, 96, 32, 2); DECL_FATTN_MMA_F16_CASE(112, 112, 32, 2); DECL_FATTN_MMA_F16_CASE(128, 128, 32, 2); DECL_FATTN_MMA_F16_CASE(256, 256, 32, 2); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu000066400000000000000000000002371512524704700323520ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(576, 512, 4, 16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu000066400000000000000000000005451512524704700322670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 4, 2); DECL_FATTN_MMA_F16_CASE(80, 80, 4, 2); DECL_FATTN_MMA_F16_CASE(96, 96, 4, 2); DECL_FATTN_MMA_F16_CASE(112, 112, 4, 2); DECL_FATTN_MMA_F16_CASE(128, 128, 4, 2); DECL_FATTN_MMA_F16_CASE(256, 256, 4, 2); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu000066400000000000000000000005451512524704700322710ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 4, 4); DECL_FATTN_MMA_F16_CASE(80, 80, 4, 4); DECL_FATTN_MMA_F16_CASE(96, 96, 4, 4); DECL_FATTN_MMA_F16_CASE(112, 112, 4, 4); DECL_FATTN_MMA_F16_CASE(128, 128, 4, 4); DECL_FATTN_MMA_F16_CASE(256, 256, 4, 4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu000066400000000000000000000005451512524704700322750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 4, 8); DECL_FATTN_MMA_F16_CASE(80, 80, 4, 8); DECL_FATTN_MMA_F16_CASE(96, 96, 4, 8); DECL_FATTN_MMA_F16_CASE(112, 112, 4, 8); DECL_FATTN_MMA_F16_CASE(128, 128, 4, 8); DECL_FATTN_MMA_F16_CASE(256, 256, 4, 8); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu000066400000000000000000000005531512524704700323530ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 64, 1); DECL_FATTN_MMA_F16_CASE(80, 80, 64, 1); DECL_FATTN_MMA_F16_CASE(96, 96, 64, 1); DECL_FATTN_MMA_F16_CASE(112, 112, 64, 1); DECL_FATTN_MMA_F16_CASE(128, 128, 64, 1); DECL_FATTN_MMA_F16_CASE(256, 256, 64, 1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu000066400000000000000000000005451512524704700322720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 8, 1); DECL_FATTN_MMA_F16_CASE(80, 80, 8, 1); DECL_FATTN_MMA_F16_CASE(96, 96, 8, 1); DECL_FATTN_MMA_F16_CASE(112, 112, 8, 1); DECL_FATTN_MMA_F16_CASE(128, 128, 8, 1); DECL_FATTN_MMA_F16_CASE(256, 256, 8, 1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu000066400000000000000000000005451512524704700322730ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 8, 2); DECL_FATTN_MMA_F16_CASE(80, 80, 8, 2); DECL_FATTN_MMA_F16_CASE(96, 96, 8, 2); DECL_FATTN_MMA_F16_CASE(112, 112, 8, 2); DECL_FATTN_MMA_F16_CASE(128, 128, 8, 2); DECL_FATTN_MMA_F16_CASE(256, 256, 8, 2); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu000066400000000000000000000005451512524704700322750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 8, 4); DECL_FATTN_MMA_F16_CASE(80, 80, 8, 4); DECL_FATTN_MMA_F16_CASE(96, 96, 8, 4); DECL_FATTN_MMA_F16_CASE(112, 112, 8, 4); DECL_FATTN_MMA_F16_CASE(128, 128, 8, 4); DECL_FATTN_MMA_F16_CASE(256, 256, 8, 4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu000066400000000000000000000005451512524704700323010ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" DECL_FATTN_MMA_F16_CASE(64, 64, 8, 8); DECL_FATTN_MMA_F16_CASE(80, 80, 8, 8); DECL_FATTN_MMA_F16_CASE(96, 96, 8, 8); DECL_FATTN_MMA_F16_CASE(112, 112, 8, 8); DECL_FATTN_MMA_F16_CASE(128, 128, 8, 8); DECL_FATTN_MMA_F16_CASE(256, 256, 8, 8); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq112-dv112.cu000066400000000000000000000002221512524704700306450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(112, 112); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq128-dv128.cu000066400000000000000000000002221512524704700306630ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(128, 128); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq256-dv256.cu000066400000000000000000000002221512524704700306670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(256, 256); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq40-dv40.cu000066400000000000000000000002201512524704700305030ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(40, 40); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq576-dv512.cu000066400000000000000000000002221512524704700306670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(576, 512); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq64-dv64.cu000066400000000000000000000002201512524704700305170ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(64, 64); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq72-dv72.cu000066400000000000000000000002201512524704700305150ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(72, 72); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq80-dv80.cu000066400000000000000000000002201512524704700305130ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(80, 80); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-tile-instance-dkq96-dv96.cu000066400000000000000000000002201512524704700305310ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE(96, 96); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-f16.cu000066400000000000000000000004311512524704700276170ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_0.cu000066400000000000000000000004341512524704700277710ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_1.cu000066400000000000000000000004341512524704700277720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_0.cu000066400000000000000000000004341512524704700277720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_1.cu000066400000000000000000000004341512524704700277730ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q8_0.cu000066400000000000000000000004341512524704700277750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_F16, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-f16.cu000066400000000000000000000004341512524704700277710ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_0.cu000066400000000000000000000004371512524704700301430ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_1.cu000066400000000000000000000004371512524704700301440ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_0.cu000066400000000000000000000004371512524704700301440ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_1.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q8_0.cu000066400000000000000000000004371512524704700301470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-f16.cu000066400000000000000000000004341512524704700277720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_0.cu000066400000000000000000000004371512524704700301440ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_1.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_0.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_1.cu000066400000000000000000000004371512524704700301460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q8_0.cu000066400000000000000000000004371512524704700301500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-f16.cu000066400000000000000000000004341512524704700277720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_0.cu000066400000000000000000000004371512524704700301440ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_1.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_0.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_1.cu000066400000000000000000000004371512524704700301460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q8_0.cu000066400000000000000000000004371512524704700301500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-f16.cu000066400000000000000000000004341512524704700277730ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_0.cu000066400000000000000000000004371512524704700301450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_1.cu000066400000000000000000000004371512524704700301460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_0.cu000066400000000000000000000004371512524704700301460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_1.cu000066400000000000000000000004371512524704700301470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q8_0.cu000066400000000000000000000004371512524704700301510ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-f16.cu000066400000000000000000000004341512524704700277750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_F16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_0.cu000066400000000000000000000004371512524704700301470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_1.cu000066400000000000000000000004371512524704700301500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_0.cu000066400000000000000000000004371512524704700301500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_1.cu000066400000000000000000000004371512524704700301510ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q8_0.cu000066400000000000000000000004371512524704700301530ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); DECL_FATTN_VEC_CASE(256, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/generate_cu_files.py000077500000000000000000000065611512524704700266430ustar00rootroot00000000000000#!/usr/bin/env python3 from glob import glob import os HEAD_SIZES_KQ = [40, 64, 72, 80, 96, 112, 128, 256, 576] TYPES_KV = ["GGML_TYPE_F16", "GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0"] SOURCE_FATTN_TILE = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-tile.cuh" DECL_FATTN_TILE_CASE({head_size_kq}, {head_size_v}); """ SOURCE_FATTN_VEC = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-vec.cuh" DECL_FATTN_VEC_CASE( 64, {type_k}, {type_v}); DECL_FATTN_VEC_CASE(128, {type_k}, {type_v}); DECL_FATTN_VEC_CASE(256, {type_k}, {type_v}); """ SOURCE_FATTN_MMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../fattn-mma-f16.cuh" """ SOURCE_FATTN_MMA_CASE = "DECL_FATTN_MMA_F16_CASE({head_size_kq}, {head_size_v}, {ncols1}, {ncols2});\n" TYPES_MMQ = [ "GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", "GGML_TYPE_Q2_K", "GGML_TYPE_Q3_K", "GGML_TYPE_Q4_K", "GGML_TYPE_Q5_K", "GGML_TYPE_Q6_K", "GGML_TYPE_IQ2_XXS", "GGML_TYPE_IQ2_XS", "GGML_TYPE_IQ2_S", "GGML_TYPE_IQ3_XXS", "GGML_TYPE_IQ3_S", "GGML_TYPE_IQ1_S", "GGML_TYPE_IQ4_NL", "GGML_TYPE_IQ4_XS", "GGML_TYPE_MXFP4" ] SOURCE_MMQ = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE({type}); """ SOURCE_MMF = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE({type}); """ def get_short_name(long_quant_name): return long_quant_name.replace("GGML_TYPE_", "").lower() for filename in glob("*.cu"): os.remove(filename) for head_size_kq in HEAD_SIZES_KQ: head_size_v = head_size_kq if head_size_kq != 576 else 512 with open(f"fattn-tile-instance-dkq{head_size_kq}-dv{head_size_v}.cu", "w") as f: f.write(SOURCE_FATTN_TILE.format(head_size_kq=head_size_kq, head_size_v=head_size_v)) for type_k in TYPES_KV: for type_v in TYPES_KV: with open(f"fattn-vec-instance-{get_short_name(type_k)}-{get_short_name(type_v)}.cu", "w") as f: f.write(SOURCE_FATTN_VEC.format(type_k=type_k, type_v=type_v)) for ncols in [8, 16, 32, 64]: for ncols2 in [1, 2, 4, 8, 16]: if ncols2 > ncols: continue ncols1 = ncols // ncols2 with open(f"fattn-mma-f16-instance-ncols1_{ncols1}-ncols2_{ncols2}.cu", "w") as f: f.write(SOURCE_FATTN_MMA_START) for head_size_kq in HEAD_SIZES_KQ: if head_size_kq == 40: continue if head_size_kq == 72: continue if head_size_kq != 576 and ncols2 == 16: continue if head_size_kq == 576 and ncols2 != 16: continue head_size_v = head_size_kq if head_size_kq != 576 else 512 f.write(SOURCE_FATTN_MMA_CASE.format(ncols1=ncols1, ncols2=ncols2, head_size_kq=head_size_kq, head_size_v=head_size_v)) for type in TYPES_MMQ: with open(f"mmq-instance-{get_short_name(type)}.cu", "w") as f: f.write(SOURCE_MMQ.format(type=type)) for type in range(1, 17): with open(f"mmf-instance-ncols_{type}.cu", "w") as f: f.write(SOURCE_MMF.format(type=type)) ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_1.cu000066400000000000000000000001751512524704700270640ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_10.cu000066400000000000000000000001761512524704700271450ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(10); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_11.cu000066400000000000000000000001761512524704700271460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(11); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_12.cu000066400000000000000000000001761512524704700271470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(12); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_13.cu000066400000000000000000000001761512524704700271500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(13); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_14.cu000066400000000000000000000001761512524704700271510ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(14); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_15.cu000066400000000000000000000001761512524704700271520ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(15); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_16.cu000066400000000000000000000001761512524704700271530ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(16); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_2.cu000066400000000000000000000001751512524704700270650ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(2); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_3.cu000066400000000000000000000001751512524704700270660ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(3); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_4.cu000066400000000000000000000001751512524704700270670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_5.cu000066400000000000000000000001751512524704700270700ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(5); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_6.cu000066400000000000000000000001751512524704700270710ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(6); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_7.cu000066400000000000000000000001751512524704700270720ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(7); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_8.cu000066400000000000000000000001751512524704700270730ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(8); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmf-instance-ncols_9.cu000066400000000000000000000001751512524704700270740ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmf.cuh" DECL_MMF_CASE(9); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu000066400000000000000000000002131512524704700265460ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ1_S); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu000066400000000000000000000002131512524704700265470ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ2_S); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu000066400000000000000000000002141512524704700267400ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ2_XS); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu000066400000000000000000000002151512524704700271310ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ2_XXS); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu000066400000000000000000000002131512524704700265500ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ3_S); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu000066400000000000000000000002151512524704700271320ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ3_XXS); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu000066400000000000000000000002141512524704700267210ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ4_NL); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu000066400000000000000000000002141512524704700267420ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_IQ4_XS); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu000066400000000000000000000002131512524704700265700ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_MXFP4); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu000066400000000000000000000002121512524704700263650ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q2_K); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu000066400000000000000000000002121512524704700263660ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q3_K); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu000066400000000000000000000002121512524704700262740ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q4_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu000066400000000000000000000002121512524704700262750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q4_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu000066400000000000000000000002121512524704700263670ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q4_K); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu000066400000000000000000000002121512524704700262750ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q5_0); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu000066400000000000000000000002121512524704700262760ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q5_1); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu000066400000000000000000000002121512524704700263700ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q5_K); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu000066400000000000000000000002121512524704700263710ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q6_K); ggml-org-ggml-3678254/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu000066400000000000000000000002121512524704700263000ustar00rootroot00000000000000// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" DECL_MMQ_CASE(GGML_TYPE_Q8_0); ggml-org-ggml-3678254/src/ggml-cuda/topk-moe.cu000066400000000000000000000313561512524704700211070ustar00rootroot00000000000000#include "ggml-cuda/common.cuh" #include "ggml.h" #include "topk-moe.cuh" #include #include // Warp-local softmax used for both the pre-top-k logits and the post-top-k delayed path. template __device__ void softmax_warp_inplace(float (&vals)[experts_per_thread], const int limit, const int lane) { float max_val = -INFINITY; #pragma unroll for (int i = 0; i < experts_per_thread; i++) { const int idx = lane + i * WARP_SIZE; const bool active = !use_limit || (idx < limit); if (active) { max_val = max(max_val, vals[i]); } } max_val = warp_reduce_max(max_val); float sum = 0.f; #pragma unroll for (int i = 0; i < experts_per_thread; i++) { const int idx = lane + i * WARP_SIZE; const bool active = !use_limit || (idx < limit); if (active) { const float val = expf(vals[i] - max_val); vals[i] = val; sum += val; } else { vals[i] = 0.f; } } sum = warp_reduce_sum(sum); const float inv_sum = 1.0f / sum; #pragma unroll for (int i = 0; i < experts_per_thread; i++) { const int idx = lane + i * WARP_SIZE; const bool active = !use_limit || (idx < limit); if (active) { vals[i] *= inv_sum; } } } /* This kernel does the following: 1. optionally softmax over the logits per token [n_experts, n_tokens] 2. argmax reduce over the top-k (n_experts_used) logits 3. write weights + ids to global memory 4. optionally normalize the weights or apply softmax over the selected logits It is intended as fusion of softmax->top-k->get_rows pipeline for MoE models */ template __launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * logits, float * weights, int32_t * ids, const int n_rows, const int n_expert_used, const float clamp_val) { const int row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= n_rows) { return; } logits += n_experts * row; weights += n_expert_used * row; ids += n_experts * row; constexpr int experts_per_thread = (n_experts > WARP_SIZE) ? n_experts / WARP_SIZE : 1; float wt[experts_per_thread]; #pragma unroll for (int i = 0; i < n_experts; i += WARP_SIZE) { const int expert = i + threadIdx.x; wt[i / WARP_SIZE] = (n_experts % WARP_SIZE == 0 || expert < n_experts) ? logits[expert] : -INFINITY; } if constexpr (!delayed_softmax) { softmax_warp_inplace(wt, n_experts, threadIdx.x); } //at this point, each thread holds either a portion of the softmax distribution //or the raw logits. We do the argmax reduce over n_expert_used, each time marking //the expert weight as -inf to exclude from the next iteration float wt_sum = 0.f; float output_weights[experts_per_thread]; #pragma unroll for (int i = 0; i < experts_per_thread; i++) { output_weights[i] = 0.f; } for (int k = 0; k < n_expert_used; k++) { float max_val = wt[0]; int max_expert = threadIdx.x; #pragma unroll for (int i = 1; i < experts_per_thread; i++) { const int expert = threadIdx.x + i * WARP_SIZE; if ((n_experts % WARP_SIZE == 0 || expert < n_experts) && wt[i] > max_val) { max_val = wt[i]; max_expert = expert; } } #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) { const float val = __shfl_xor_sync(0xFFFFFFFF, max_val, mask, WARP_SIZE); const int expert = __shfl_xor_sync(0xFFFFFFFF, max_expert, mask, WARP_SIZE); if (val > max_val || (val == max_val && expert < max_expert)) { max_val = val; max_expert = expert; } } if ((k & (WARP_SIZE - 1)) == threadIdx.x) { output_weights[k / WARP_SIZE] = max_val; } if ((max_expert & (WARP_SIZE - 1)) == threadIdx.x) { wt[max_expert / WARP_SIZE] = -INFINITY; ids[k] = max_expert; if constexpr (with_norm) { wt_sum += max_val; } } } if constexpr (with_norm) { wt_sum = warp_reduce_sum(wt_sum); wt_sum = max(wt_sum, clamp_val); const float inv_sum = 1.0f / wt_sum; for (int i = 0; i < experts_per_thread; i++) { output_weights[i] *= inv_sum; } } if constexpr (delayed_softmax) { softmax_warp_inplace(output_weights, n_expert_used, threadIdx.x); } #pragma unroll for (int i = 0; i < experts_per_thread; i++) { const int idx = i * WARP_SIZE + threadIdx.x; if (idx < n_expert_used) { weights[idx] = output_weights[i]; } } if (!with_norm) { GGML_UNUSED(clamp_val); } } template static void launch_topk_moe_cuda(ggml_backend_cuda_context & ctx, const float * logits, float * weights, int32_t * ids, const int n_rows, const int n_expert, const int n_expert_used, const float clamp_val) { static_assert(!(with_norm && delayed_softmax), "delayed softmax is not supported with weight normalization"); const int rows_per_block = 4; dim3 grid_dims((n_rows + rows_per_block - 1) / rows_per_block, 1, 1); dim3 block_dims(WARP_SIZE, rows_per_block, 1); cudaStream_t stream = ctx.stream(); switch (n_expert) { case 1: topk_moe_cuda<1, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 2: topk_moe_cuda<2, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 4: topk_moe_cuda<4, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 8: topk_moe_cuda<8, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 16: topk_moe_cuda<16, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 32: topk_moe_cuda<32, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 64: topk_moe_cuda<64, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 128: topk_moe_cuda<128, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 256: topk_moe_cuda<256, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; case 512: topk_moe_cuda<512, with_norm, delayed_softmax> <<>>(logits, weights, ids, n_rows, n_expert_used, clamp_val); break; default: GGML_ASSERT(false && "fatal error"); break; } } void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, const ggml_tensor * logits, ggml_tensor * weights, ggml_tensor * ids, const bool with_norm, const bool delayed_softmax, ggml_tensor * clamp) { GGML_ASSERT(logits->type == GGML_TYPE_F32); GGML_ASSERT(weights->type == GGML_TYPE_F32); GGML_ASSERT(ids->type == GGML_TYPE_I32); const int n_experts = logits->ne[0]; const int n_rows = logits->ne[1]; const float * logits_d = (const float *) logits->data; float * weights_d = (float *) weights->data; int32_t * ids_d = (int32_t *) ids->data; GGML_ASSERT(ids->nb[1] / ggml_type_size(ids->type) == (size_t) n_experts); const int n_expert_used = weights->ne[1]; float clamp_val = -INFINITY; if (with_norm) { if (clamp) { clamp_val = ggml_get_op_params_f32(clamp, 0); } launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used, clamp_val); } else { GGML_ASSERT(clamp == nullptr); if (delayed_softmax) { launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used, clamp_val); } else { launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used, clamp_val); } } } bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tensor * weights, const ggml_tensor * get_rows, const ggml_tensor * argsort, const ggml_tensor * clamp, int n_expert) { ggml_tensor * probs = get_rows->src[0]; if (probs->op != GGML_OP_RESHAPE) { return false; } probs = probs->src[0]; ggml_tensor * selection_probs = argsort->src[0]; if (probs != selection_probs) { return false; } float scale = 1.0f; float max_bias = 0.0f; memcpy(&scale, (const float *) softmax->op_params + 0, sizeof(float)); memcpy(&max_bias, (const float *) softmax->op_params + 1, sizeof(float)); if (!ggml_is_contiguous(softmax->src[0]) || !ggml_is_contiguous(weights)) { return false; } if (scale != 1.0f || max_bias != 0.0f) { return false; } // don't fuse when masks or sinks are present if (softmax->src[1] || softmax->src[2]) { return false; } // n_expert must be a power of 2 if ((n_expert & (n_expert - 1)) != 0 || n_expert > 512) { return false; } if (clamp) { if (clamp->op != GGML_OP_CLAMP) { return false; } float max_val = ggml_get_op_params_f32(clamp, 1); if (max_val != INFINITY) { return false; } } return true; } std::initializer_list ggml_cuda_topk_moe_ops(bool norm, bool delayed_softmax) { static std::initializer_list norm_ops = { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE, GGML_OP_SUM_ROWS, GGML_OP_CLAMP, GGML_OP_DIV, GGML_OP_RESHAPE }; static std::initializer_list no_norm_ops = { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, GGML_OP_VIEW, GGML_OP_GET_ROWS }; static std::initializer_list delayed_softmax_ops = { GGML_OP_ARGSORT, GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE, GGML_OP_SOFT_MAX, GGML_OP_RESHAPE }; GGML_ASSERT(!norm || !delayed_softmax); if (delayed_softmax) { return delayed_softmax_ops; } if (norm) { return norm_ops; } return no_norm_ops; } ggml-org-ggml-3678254/src/ggml-cuda/topk-moe.cuh000066400000000000000000000017771512524704700212630ustar00rootroot00000000000000#include "common.cuh" #include "ggml.h" #include void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, const ggml_tensor * logits, ggml_tensor * weights, ggml_tensor * ids, const bool with_norm, const bool delayed_softmax = false, ggml_tensor * weight_clamp = nullptr); bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tensor * weights, const ggml_tensor * get_rows, const ggml_tensor * argsort, const ggml_tensor * clamp, int n_expert); std::initializer_list ggml_cuda_topk_moe_ops(bool with_norm, bool delayed_softmax = false); ggml-org-ggml-3678254/src/ggml-cuda/tri.cu000066400000000000000000000123571512524704700201520ustar00rootroot00000000000000#include "common.cuh" #include "convert.cuh" #include "tri.cuh" #include "ggml.h" template static __global__ void tri_kernel( const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03, const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3) { const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; const int64_t i1 = blockIdx.x; const int64_t split_point = i1 + add_to_split; GGML_UNUSED_VARS(nb00, nb0); if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } const T * src_row = src + i1*nb01 + i2*nb02 + i3*nb03; T * dst_row = dst + i1*nb1 + i2*nb2 + i3*nb3; if constexpr (prefix_keep) { for (int64_t i0 = threadIdx.x; i0 < split_point; i0 += blockDim.x) { dst_row[i0] = src_row[i0]; } for (int64_t i0 = threadIdx.x + split_point; i0 < ne00; i0 += blockDim.x) { dst_row[i0] = ggml_cuda_cast(0.0f); } } else { for (int64_t i0 = threadIdx.x; i0 < split_point; i0 += blockDim.x) { dst_row[i0] = ggml_cuda_cast(0.0f); } for (int64_t i0 = threadIdx.x + split_point; i0 < ne00; i0 += blockDim.x) { dst_row[i0] = src_row[i0]; } } } template static void tri_cuda( const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03, const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3, const ggml_tri_type ttype, cudaStream_t stream) { dim3 block_dims(CUDA_TRI_BLOCK_SIZE, 1, 1); dim3 grid_dims(ne01, ne02, ne03); const size_t type_size = sizeof(T); const int add_to_split = (ttype == GGML_TRI_TYPE_LOWER_DIAG || ttype == GGML_TRI_TYPE_UPPER) ? 1 : 0; const bool prefix_keep = (ttype == GGML_TRI_TYPE_LOWER || ttype == GGML_TRI_TYPE_LOWER_DIAG); if (prefix_keep) { if (add_to_split == 0) { tri_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb00 / type_size, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb0 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } else { // only 0 and 1 supported tri_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb00 / type_size, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb0 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } } else { if (add_to_split == 0) { tri_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb00 / type_size, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb0 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } else { tri_kernel<<>>( src, dst, ne00, ne01, ne02, ne03, nb00 / type_size, nb01 / type_size, nb02 / type_size, nb03 / type_size, nb0 / type_size, nb1 / type_size, nb2 / type_size, nb3 / type_size ); } } } void ggml_cuda_op_tri(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; cudaStream_t stream = ctx.stream(); const ggml_tri_type ttype = static_cast(ggml_get_op_params_i32(dst, 0)); GGML_ASSERT(src0->type == dst->type); switch(src0->type) { case GGML_TYPE_F32: { tri_cuda( (const float *)src0->data, (float *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], ttype, stream ); } break; case GGML_TYPE_F16: { tri_cuda( (const half *)src0->data, (half *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], ttype, stream ); } break; case GGML_TYPE_BF16: { tri_cuda( (const nv_bfloat16 *)src0->data, (nv_bfloat16 *)dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], ttype, stream ); } break; default: GGML_ABORT("fatal error"); } } ggml-org-ggml-3678254/src/ggml-cuda/tri.cuh000066400000000000000000000002031512524704700203050ustar00rootroot00000000000000#include "common.cuh" #define CUDA_TRI_BLOCK_SIZE 256 void ggml_cuda_op_tri(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/tsembd.cu000066400000000000000000000034051512524704700206240ustar00rootroot00000000000000#include "tsembd.cuh" static __global__ void timestep_embedding_f32(const float * timesteps, float * dst, const int nb1, const int dim, const int max_period) { // blockIDx.y: idx of timesteps->ne[0] // blockIDx.x: idx of ((dim + 1) / 2) / BLOCK_SIZE int i = blockIdx.y; int j = threadIdx.x + blockIdx.x * blockDim.x; float * embed_data = (float *)((char *)dst + i*nb1); int half = dim / 2; if (dim % 2 != 0 && j == half) { embed_data[2 * half] = 0.f; } if (j >= half) { return; } float timestep = timesteps[i]; float freq = (float)expf(-logf(max_period) * j / half); float arg = timestep * freq; embed_data[j] = cosf(arg); embed_data[j + half] = sinf(arg); } static void timestep_embedding_f32_cuda(const float * x, float * dst, const int ne00, const int nb1, const int dim, const int max_period, cudaStream_t stream) { int half_ceil = (dim + 1) / 2; int num_blocks = (half_ceil + CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE - 1) / CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE; dim3 gridDim(num_blocks, ne00, 1); timestep_embedding_f32<<>>(x, dst, nb1, dim, max_period); } void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); const int dim = dst->op_params[0]; const int max_period = dst->op_params[1]; timestep_embedding_f32_cuda(src0_d, dst_d, src0->ne[0], dst->nb[1], dim, max_period, stream); } ggml-org-ggml-3678254/src/ggml-cuda/tsembd.cuh000066400000000000000000000002411512524704700207670ustar00rootroot00000000000000#include "common.cuh" #define CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE 256 void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/unary.cu000066400000000000000000000445671512524704700205220ustar00rootroot00000000000000#include "unary.cuh" #include "convert.cuh" static __device__ __forceinline__ float op_abs(float x) { return fabsf(x); } static __device__ __forceinline__ float op_sgn(float x) { return (x > 0.f ? 1.f : ((x < 0.f ? -1.f : 0.f))); } static __device__ __forceinline__ float op_neg(float x) { return -x; } static __device__ __forceinline__ float op_step(float x) { return x > 0.0f; } static __device__ __forceinline__ float op_gelu(float x) { return ggml_cuda_op_gelu_single(x); } static __device__ __forceinline__ float op_gelu_erf(float x) { const float SQRT_2_INV = 0.70710678118654752440084436210484f; return 0.5f*x*(1.0f + erff(x*SQRT_2_INV)); } static __device__ __forceinline__ float op_gelu_quick(float x) { const float GELU_QUICK_COEF = -1.702f; return x * (1.0f / (1.0f + expf(GELU_QUICK_COEF * x))); } static __device__ __forceinline__ float op_silu(float x) { return ggml_cuda_op_silu_single(x); } static __device__ __forceinline__ float op_tanh(float x) { return tanhf(x); } static __device__ __forceinline__ float op_relu(float x) { return fmaxf(x, 0); } static __device__ __forceinline__ float op_sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } static __device__ __forceinline__ float op_hardsigmoid(float x) { return fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); } static __device__ __forceinline__ float op_hardswish(float x) { return x * fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); } static __device__ __forceinline__ float op_exp(float x) { return expf(x); } static __device__ __forceinline__ float op_sqr(float x) { return x * x; } static __device__ __forceinline__ float op_sqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ float op_sin(float x) { return sinf(x); } static __device__ __forceinline__ float op_cos(float x) { return cosf(x); } static __device__ __forceinline__ float op_log(float x) { return logf(x); } static __device__ __forceinline__ float op_expm1(float x) { return expm1f(x); } static __device__ __forceinline__ float op_softplus(float x) { return (x > 20.0f) ? x : logf(1.0f + expf(x)); } static __device__ __forceinline__ float op_elu(float x) { return (x > 0.f) ? x : expm1f(x); } static __device__ __forceinline__ float op_floor(float x) { return floorf(x); } static __device__ __forceinline__ float op_ceil(float x) { return ceilf(x); } static __device__ __forceinline__ float op_round(float x) { return round(x); } static __device__ __forceinline__ float op_trunc(float x) { return trunc(x); } template static __global__ void unary_op_kernel(const T * x, T * dst, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = (T)op((float)x[i]); } template static void unary_cuda(const T * x, T * dst, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_NEG_BLOCK_SIZE - 1) / CUDA_NEG_BLOCK_SIZE; unary_op_kernel<<>>(x, dst, k); } template void ggml_cuda_op_unary(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const void * src0_d = src0->data; void * dst_d = dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); if (src0->type == GGML_TYPE_F16) { unary_cuda((const half *)src0_d, (half *)dst_d, ggml_nelements(src0), stream); } else { unary_cuda((const float *)src0_d, (float *)dst_d, ggml_nelements(src0), stream); } } void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_sgn(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_neg(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_step(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_gelu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_exp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_sin(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_floor(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_ceil(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_round(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_trunc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_expm1(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } void ggml_cuda_op_softplus(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } /* gated ops */ template static __global__ void unary_gated_op_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1) { const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x; if (i >= k) { return; } // perform base op and multiply with gate (either offset in same tensor or a separate one) const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = (T)(op((float)x[j0]) * (float)g[j1]); } template static void unary_gated_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, cudaStream_t stream) { const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; unary_gated_op_kernel<<>>(x, g, dst, k, n, o0, o1); } template void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; void * src0_d = src0->data; void * src1_d = src1 ? src1->data : src0->data; const int64_t src0_o = src0->nb[1]; const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; void * dst_d = dst->data; const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); GGML_ASSERT(src1->ne[0] == nc); GGML_ASSERT(src0->type == src1->type); } const int32_t swapped = ((const int32_t *) dst->op_params)[1]; if (src0->type == GGML_TYPE_F16) { half * src0_p = (half *) src0_d; half * src1_p = (half *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } unary_gated_cuda(src0_p, src1_p, (half *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(half), src1_o / sizeof(half), stream); } else { float * src0_p = (float *) src0_d; float * src1_p = (float *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } unary_gated_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), stream); } } void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary_gated(ctx, dst); } void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary_gated(ctx, dst); } void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary_gated(ctx, dst); } void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary_gated(ctx, dst); } void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary_gated(ctx, dst); } // swiglu_oai template static __global__ void swiglu_oai_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, float alpha, float limit) { const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x; if (i >= k) { return; } // perform base op and multiply with gate (either offset in same tensor or a separate one) const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); float xi = x[j0]; float gi = g[j1]; dst[i] = ggml_cuda_op_swiglu_oai_single(xi, gi, alpha, limit); } template static void swiglu_oai_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, const float alpha, const float limit, cudaStream_t stream) { const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; swiglu_oai_kernel<<>>(x, g, dst, k, n, o0, o1, alpha, limit); } void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; void * src0_d = src0->data; void * src1_d = src1 ? src1->data : src0->data; const int64_t src0_o = src0->nb[1]; const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; void * dst_d = dst->data; const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); GGML_ASSERT(src1->ne[0] == nc); GGML_ASSERT(src0->type == src1->type); } //const int32_t swapped = ((const int32_t *) dst->op_params)[1]; const int32_t swapped = ggml_get_op_params_i32(dst, 1); const float alpha = ggml_get_op_params_f32(dst, 2); const float limit = ggml_get_op_params_f32(dst, 3); float * src0_p = (float *) src0_d; float * src1_p = (float *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } swiglu_oai_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream); } /* CUDA kernel + launcher for xIELU */ template static __global__ void xielu_kernel(const T * x, T * dst, const int k, float alpha_n, float alpha_p, float beta, float eps) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } const float xi = ggml_cuda_cast(x[i]); const float gate_pos = (xi > 0.0f); const float y_pos = alpha_p * xi * xi + beta * xi; const float min_v_eps = fminf(xi, eps); const float y_neg = (expm1f(min_v_eps) - xi) * alpha_n + beta * xi; const float out = gate_pos * y_pos + (1.0f - gate_pos) * y_neg; dst[i] = ggml_cuda_cast(out); } template static void xielu_cuda(const T * x, T * dst, const int k, float alpha_n, float alpha_p, float beta, float eps, cudaStream_t stream) { const int num_blocks = (k + CUDA_XIELU_BLOCK_SIZE) / CUDA_XIELU_BLOCK_SIZE; xielu_kernel<<>>(x, dst, k, alpha_n, alpha_p, beta, eps); } void ggml_cuda_op_xielu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const void * src0_d = src0->data; void * dst_d = dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); const float alpha_n = ggml_get_op_params_f32(dst, 1); const float alpha_p = ggml_get_op_params_f32(dst, 2); const float beta = ggml_get_op_params_f32(dst, 3); const float eps = ggml_get_op_params_f32(dst, 4); if (src0->type == GGML_TYPE_F16) { xielu_cuda((const half *)src0_d, (half *)dst_d, ggml_nelements(src0), alpha_n, alpha_p, beta, eps, stream); } else { xielu_cuda((const float *)src0_d, (float *)dst_d, ggml_nelements(src0), alpha_n, alpha_p, beta, eps, stream); } } /* silu_back */ static __device__ __forceinline__ float op_silu_back(float grad, float x) { const float s = 1.0f / (1.0f + expf(-x)); return grad * s * (1.0f + x * (1.0f - s)); } template static __global__ void silu_back_kernel(const T * grad, const T * xf, T * dst, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = (T)op_silu_back((float)grad[i], (float)xf[i]); } template static void silu_back_cuda(const T * grad, const T * x, T * dst, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_SILU_BACK_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE; silu_back_kernel<<>>(grad, x, dst, k); } void ggml_cuda_op_silu_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; // input from forward pass const ggml_tensor * src1 = dst->src[1]; // grads of forward pass output const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); if (src0->type == GGML_TYPE_F16) { silu_back_cuda((const half *)src0_d, (const half *)src1_d, (half *)dst_d, ggml_nelements(src0), stream); } else { silu_back_cuda((const float*)src0_d, (const float*)src1_d, (float *)dst_d, ggml_nelements(src0), stream); } } /* leaky relu */ static __device__ __forceinline__ float op_leaky_relu(float x, const float negative_slope) { return fmaxf(x, 0) + fminf(x, 0.0f) * negative_slope; } template static __global__ void leaky_relu_kernel(const T * x, T * dst, const int k, const float negative_slope) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } dst[i] = (T)op_leaky_relu((float)x[i], negative_slope); } template static void leaky_relu_cuda(const T * x, T * dst, const int k, const float negative_slope, cudaStream_t stream) { const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; leaky_relu_kernel<<>>(x, dst, k, negative_slope); } void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const void * src0_d = src0->data; void * dst_d = dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); if (src0->type == GGML_TYPE_F16) { leaky_relu_cuda((const half *)src0_d, (half *)dst_d, ggml_nelements(src0), negative_slope, stream); } else { leaky_relu_cuda((const float *)src0_d, (float *)dst_d, ggml_nelements(src0), negative_slope, stream); } } ggml-org-ggml-3678254/src/ggml-cuda/unary.cuh000066400000000000000000000077211512524704700206610ustar00rootroot00000000000000#pragma once #include "common.cuh" #define CUDA_NEG_BLOCK_SIZE 256 #define CUDA_STEP_BLOCK_SIZE 256 #define CUDA_GELU_BLOCK_SIZE 256 #define CUDA_SILU_BLOCK_SIZE 256 #define CUDA_SILU_BACK_BLOCK_SIZE 256 #define CUDA_TANH_BLOCK_SIZE 256 #define CUDA_RELU_BLOCK_SIZE 256 #define CUDA_SIGMOID_BLOCK_SIZE 256 #define CUDA_HARDSIGMOID_BLOCK_SIZE 256 #define CUDA_EXP_BLOCK_SIZE 256 #define CUDA_HARDSWISH_BLOCK_SIZE 256 #define CUDA_SQR_BLOCK_SIZE 256 #define CUDA_SQRT_BLOCK_SIZE 256 #define CUDA_SIN_BLOCK_SIZE 256 #define CUDA_COS_BLOCK_SIZE 256 #define CUDA_GLU_BLOCK_SIZE 256 #define CUDA_XIELU_BLOCK_SIZE 256 void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sgn(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_neg(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_step(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_silu_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_gelu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_exp(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_sin(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_expm1(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_softplus(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_floor(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_ceil(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_round(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_trunc(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_xielu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); __device__ __forceinline__ float ggml_cuda_op_silu_single(float x) { return x / (1.0f + expf(-x)); } __device__ __forceinline__ float ggml_cuda_op_gelu_single(float x) { const float GELU_COEF_A = 0.044715f; const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; return 0.5f * x * (1.0f + tanhf(SQRT_2_OVER_PI * x * (1.0f + GELU_COEF_A * x * x))); } __device__ __forceinline__ float ggml_cuda_op_swiglu_oai_single(float x, float g, float alpha = 1.702f, float limit = 7.0f) { x = fminf(x, limit); g = fmaxf(fminf(g, limit), -limit); float out_glu = x / (1.0f + expf(-x * alpha)); out_glu = out_glu * (1.0f + g); return out_glu; } ggml-org-ggml-3678254/src/ggml-cuda/upscale.cu000066400000000000000000000321711512524704700210040ustar00rootroot00000000000000#include "upscale.cuh" static __global__ void upscale_f32(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= ne10 * ne11 * ne12 * ne13) { return; } int i10 = index % ne10; int i11 = (index / ne10) % ne11; int i12 = (index / (ne10 * ne11)) % ne12; int i13 = (index / (ne10 * ne11 * ne12)) % ne13; int i00 = i10 / sf0; int i01 = i11 / sf1; int i02 = i12 / sf2; int i03 = i13 / sf3; dst[index] = *( (const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00) ); } static __global__ void upscale_f32_bilinear(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne00_src, const int ne01_src, const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, const float sf0, const float sf1, const float sf2, const float sf3, const float pixel_offset) { const int64_t index = threadIdx.x + blockIdx.x * blockDim.x; const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; if (index >= dst_total_elements) { return; } const int i10_dst = index % ne10_dst; const int i11_dst = (index / ne10_dst) % ne11_dst; const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); const int i02_src = (int)(i12_dst / sf2); const int i03_src = (int)(i13_dst / sf3); const float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset; int y0_src = (int)floorf(y_src_f); int y1_src = y0_src + 1; y0_src = max(0, min(y0_src, ne01_src - 1)); y1_src = max(0, min(y1_src, ne01_src - 1)); float dy = y_src_f - (float)y0_src; dy = max(0.0f, min(dy, 1.0f)); float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset; int x0_src = (int)floorf(x_src_f); int x1_src = x0_src + 1; x0_src = max(0, min(x0_src, ne00_src - 1)); x1_src = max(0, min(x1_src, ne00_src - 1)); float dx = x_src_f - (float)x0_src; dx = max(0.0f, min(dx, 1.0f)); const float * p_a = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); const float * p_b = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); const float * p_c = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); const float * p_d = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); const float val_a = *p_a; const float val_b = *p_b; const float val_c = *p_c; const float val_d = *p_d; float result = val_a * (1.0f - dx) * (1.0f - dy) + val_b * dx * (1.0f - dy) + val_c * (1.0f - dx) * dy + val_d * dx * dy; dst[index] = result; } // Similar to F.interpolate(..., mode="bilinear", align_corners=False, antialias=True) // https://github.com/pytorch/pytorch/blob/8871ff29b743948d1225389d5b7068f37b22750b/aten/src/ATen/native/cpu/UpSampleKernel.cpp static __global__ void upscale_f32_bilinear_antialias(const float * src0, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne00_src, const int ne01_src, const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, const float sf0, const float sf1, const float sf2, const float sf3, const float pixel_offset) { const int64_t index = threadIdx.x + blockIdx.x * blockDim.x; const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; if (index >= dst_total_elements) { return; } const int i10_dst = index % ne10_dst; const int i11_dst = (index / ne10_dst) % ne11_dst; const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); const int i02_src = (int)(i12_dst / sf2); const int i03_src = (int)(i13_dst / sf3); const float y = ((float)i11_dst + pixel_offset) / sf1; const float x = ((float)i10_dst + pixel_offset) / sf0; // support and invscale, minimum 1 pixel for bilinear const float support1 = max(1.0f / sf1, 1.0f); const float invscale1 = 1.0f / support1; const float support0 = max(1.0f / sf0, 1.0f); const float invscale0 = 1.0f / support0; // the range of source pixels that contribute const int64_t x_min = max(int64_t(0), int64_t(x - support0 + pixel_offset)); const int64_t x_max = min(int64_t(ne00_src), int64_t(x + support0 + pixel_offset)); const int64_t y_min = max(int64_t(0), int64_t(y - support1 + pixel_offset)); const int64_t y_max = min(int64_t(ne01_src), int64_t(y + support1 + pixel_offset)); // bilinear filter with antialiasing float val = 0.0f; float total_weight = 0.0f; auto triangle_filter = [](float x) -> float { return max(1.0f - fabsf(x), 0.0f); }; for (int64_t sy = y_min; sy < y_max; sy++) { const float weight_y = triangle_filter((sy - y + pixel_offset) * invscale1); for (int64_t sx = x_min; sx < x_max; sx++) { const float weight_x = triangle_filter((sx - x + pixel_offset) * invscale0); const float weight = weight_x * weight_y; if (weight <= 0.0f) { continue; } const float pixel = *(const float *)((const char *)src0 + sx*nb00 + sy*nb01 + i02_src*nb02 + i03_src*nb03); val += pixel * weight; total_weight += weight; } } if (total_weight > 0.0f) { val /= total_weight; } dst[index] = val; } namespace bicubic_interpolation { // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm __device__ const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) static __device__ float weight1(float x) { return ((a + 2) * x - (a + 3)) * x * x + 1; }; static __device__ float weight2(float x) { return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; }; static __device__ float bicubic(float p0, float p1, float p2, float p3, float x) { const float w0 = weight2(x + 1); const float w1 = weight1(x + 0); const float w2 = weight1(1 - x); const float w3 = weight2(2 - x); return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; }; } // namespace bicubic_interpolation static __global__ void upscale_f32_bicubic(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne00_src, const int ne01_src, const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, const float sf0, const float sf1, const float sf2, const float sf3, const float pixel_offset) { using bicubic_interpolation::bicubic; const int64_t index = threadIdx.x + blockIdx.x * blockDim.x; const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; if (index >= dst_total_elements) { return; } const int i10_dst = index % ne10_dst; const int i11_dst = (index / ne10_dst) % ne11_dst; const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); const int i02_src = (int)(i12_dst / sf2); const int i03_src = (int)(i13_dst / sf3); const float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset; const int y0_src = (int)floorf(y_src_f); const float dy = y_src_f - (float)y0_src; const float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset; const int x0_src = (int)floorf(x_src_f); const float dx = x_src_f - (float)x0_src; const char * x_base = (const char *)x + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03; auto load = [=](int x_off, int y_off) -> float { int i00_src = max(0, min(x0_src + x_off, ne00_src - 1)); int i01_src = max(0, min(y0_src + y_off, ne01_src - 1)); return *(const float *)(x_base + (int64_t)i00_src * nb00 + (int64_t)i01_src * nb01); }; const float result = bicubic( bicubic(load(-1,-1), load(0,-1), load(1,-1), load(2,-1), dx), bicubic(load(-1, 0), load(0, 0), load(1, 0), load(2, 0), dx), bicubic(load(-1, 1), load(0, 1), load(1, 1), load(2, 1), dx), bicubic(load(-1, 2), load(0, 2), load(1, 2), load(2, 2), dx), dy); dst[index] = result; } static void upscale_f32_cuda(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, cudaStream_t stream) { const int64_t dst_size = ne10 * ne11 * ne12 * ne13; const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; upscale_f32<<>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3); } static void upscale_f32_bilinear_cuda(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne00_src, const int ne01_src, const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, const float sf0, const float sf1, const float sf2, const float sf3, const float pixel_offset, bool antialias, cudaStream_t stream) { const int64_t dst_size = ne10_dst * ne11_dst * ne12_dst * ne13_dst; const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; if (antialias) { upscale_f32_bilinear_antialias<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); } else { upscale_f32_bilinear<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); } } static void upscale_f32_bicubic_cuda(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne00_src, const int ne01_src, const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, const float sf0, const float sf1, const float sf2, const float sf3, const float pixel_offset, cudaStream_t stream) { const int64_t dst_size = ne10_dst * ne11_dst * ne12_dst * ne13_dst; const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; upscale_f32_bicubic<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); } void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); const int mode_flags = dst->op_params[0]; const ggml_scale_mode mode = (ggml_scale_mode)(mode_flags & 0xFF); float sf0 = (float)dst->ne[0]/src0->ne[0]; float sf1 = (float)dst->ne[1]/src0->ne[1]; float sf2 = (float)dst->ne[2]/src0->ne[2]; const float sf3 = (float)dst->ne[3]/src0->ne[3]; float pixel_offset = 0.5f; if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { sf0 = dst->ne[0] > 1 && src0->ne[0] > 1 ? (float)(dst->ne[0] - 1) / (src0->ne[0] - 1) : sf0; sf1 = dst->ne[1] > 1 && src0->ne[1] > 1 ? (float)(dst->ne[1] - 1) / (src0->ne[1] - 1) : sf1; pixel_offset = 0.0f; } if (mode == GGML_SCALE_MODE_NEAREST) { upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); } else if (mode == GGML_SCALE_MODE_BILINEAR) { const bool antialias = (mode_flags & GGML_SCALE_FLAG_ANTIALIAS); upscale_f32_bilinear_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, pixel_offset, antialias, stream); } else if (mode == GGML_SCALE_MODE_BICUBIC) { upscale_f32_bicubic_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, pixel_offset, stream); } } ggml-org-ggml-3678254/src/ggml-cuda/upscale.cuh000066400000000000000000000002131512524704700211440ustar00rootroot00000000000000#include "common.cuh" #define CUDA_UPSCALE_BLOCK_SIZE 256 void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-cuda/vecdotq.cuh000066400000000000000000001242031512524704700211630ustar00rootroot00000000000000#pragma once #include "common.cuh" #include static __device__ __forceinline__ int get_int_b1(const void * x, const int & i32) { const uint8_t * x8 = (const uint8_t *) x; int x32 = x8[4*i32 + 0] << 0; x32 |= x8[4*i32 + 1] << 8; x32 |= x8[4*i32 + 2] << 16; x32 |= x8[4*i32 + 3] << 24; return x32; } static __device__ __forceinline__ int get_int_b2(const void * x, const int & i32) { const uint16_t * x16 = (const uint16_t *) x; // assume at least 2 byte alignment int x32 = x16[2*i32 + 0] << 0; x32 |= x16[2*i32 + 1] << 16; return x32; } static __device__ __forceinline__ int get_int_b4(const void * x, const int & i32) { return ((const int *) x)[i32]; // assume at least 4 byte alignment } // q4 contains 8 indices with 4 bit each. // This function selects those bytes from table that are at those indices and returns them as int2. // The first int contains the bytes with even indices in q4, the second int contains the bytes with odd indices in q4. static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4, const int8_t * table) { #if defined(GGML_USE_HIP) // Load the 16-byte table into four 32-bit unsigned integers. const uint32_t *values = (const uint32_t *)table; const uint32_t q_even = q4; const uint32_t q_odd = (q4 >> 4); // Perform lookups in the lower half of the table (indices 0-7). uint32_t v_even_low = __builtin_amdgcn_perm(values[1], values[0], q_even & 0x07070707); uint32_t v_odd_low = __builtin_amdgcn_perm(values[1], values[0], q_odd & 0x07070707); // Perform lookups in the upper half of the table (indices 8-15). uint32_t v_even_high = __builtin_amdgcn_perm(values[3], values[2], q_even & 0x07070707); uint32_t v_odd_high = __builtin_amdgcn_perm(values[3], values[2], q_odd & 0x07070707); // Select between the low and high results based on the MSB of each index nibble. uint32_t mask_even = 0x03020100 | ((q_even & 0x08080808) >> 1); uint32_t res_x = __builtin_amdgcn_perm(v_even_high, v_even_low, mask_even); uint32_t mask_odd = 0x03020100 | ((q_odd & 0x08080808) >> 1); uint32_t res_y = __builtin_amdgcn_perm(v_odd_high, v_odd_low, mask_odd); return make_int2(res_x, res_y); #elif !defined(GGML_USE_MUSA) // CUDA does not have an instruction for selecting bytes with 4 bit indices. // However, __byte_perm is an instruction that selects bytes with 3 bit indices that can be used instead. const uint32_t * table32 = (const uint32_t *) table; // __byte_perm selects bytes based on the lower 16 bits in its third argument. // Therefore, do 2 iterations over the 32 bits in q4 with 0 and 16 shift. // To handle the fourth bit, first call _byte_perm both for the low and the high 64 bit of table, using the low 3 bits. // Then, call __byte_perm again to select from the low and high bytes based on the fourth bit. uint32_t tmp[2]; const uint32_t low_high_selection_indices = (0x32103210 | ((q4 & 0x88888888) >> 1)); #pragma unroll for (uint32_t i = 0; i < 2; ++i) { const uint32_t shift = 16 * i; const uint32_t low = __byte_perm(table32[0], table32[1], q4 >> shift); const uint32_t high = __byte_perm(table32[2], table32[3], q4 >> shift); tmp[i] = __byte_perm(low, high, low_high_selection_indices >> shift); } // tmp contains the bytes from tyble in the same order as the 4 bit indices in q4. // However, for the result we need ints with all even/odd 4 bit indices in q4. // Therefore, 2 more calls to __byte_perm to put the bytes in the correct order. return make_int2(__byte_perm(tmp[0], tmp[1], 0x6420), __byte_perm(tmp[0], tmp[1], 0x7531)); #else // Generic implementation. const int q0_32 = (q4 >> 0) & 0x0F0F0F0F; const int8_t * q0_8 = (const int8_t *) &q0_32; const char4 val0_8 = make_char4( table[q0_8[0]], table[q0_8[1]], table[q0_8[2]], table[q0_8[3]]); const int q1_32 = (q4 >> 4) & 0x0F0F0F0F; const int8_t * q1_8 = (const int8_t *) &q1_32; const char4 val1_8 = make_char4( table[q1_8[0]], table[q1_8[1]], table[q1_8[2]], table[q1_8[3]]); return make_int2(*((const int *) &val0_8), *((const int *) &val1_8)); #endif } // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q #define VDR_Q4_0_Q8_1_MMVQ 2 #define VDR_Q4_0_Q8_1_MMQ 4 template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 8 from each quant value return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); } #define VDR_Q4_1_Q8_1_MMVQ 2 #define VDR_Q4_1_Q8_1_MMQ 4 template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( const int * v, const int * u, const half2 & dm4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); } #ifdef FAST_FP16_AVAILABLE const float2 tmp = __half22float2(__hmul2(dm4, ds8)); const float d4d8 = tmp.x; const float m4s8 = tmp.y; #else const float2 dm4f = __half22float2(dm4); const float2 ds8f = __half22float2(ds8); const float d4d8 = dm4f.x * ds8f.x; const float m4s8 = dm4f.y * ds8f.y; #endif // FAST_FP16_AVAILABLE // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); } #define VDR_Q5_0_Q8_1_MMVQ 2 #define VDR_Q5_0_Q8_1_MMQ 4 template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 16 from each quant value return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); } #define VDR_Q5_1_Q8_1_MMVQ 2 #define VDR_Q5_1_Q8_1_MMQ 4 template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } #ifdef FAST_FP16_AVAILABLE const float2 tmp = __half22float2(__hmul2(dm5, ds8)); const float d5d8 = tmp.x; const float m5s8 = tmp.y; #else const float2 dm5f = __half22float2(dm5); const float2 ds8f = __half22float2(ds8); const float d5d8 = dm5f.x * ds8f.x; const float m5s8 = dm5f.y * ds8f.y; #endif // FAST_FP16_AVAILABLE // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it return sumi*d5d8 + m5s8 / (QI5_1 / vdr); } #define VDR_Q8_0_Q8_1_MMVQ 2 #define VDR_Q8_0_Q8_1_MMQ 8 template static __device__ __forceinline__ T vec_dot_q8_0_q8_1_impl( const int * v, const int * u, const T & d8_0, const T & d8_1) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(v[i], u[i], sumi); } return d8_0*d8_1 * ((T) sumi); } template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( const int * v, const int * u, const half2 & dm8, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(v[i], u[i], sumi); } #ifdef FAST_FP16_AVAILABLE const float2 tmp = __half22float2(__hmul2(dm8, ds8)); const float d8d8 = tmp.x; const float m8s8 = tmp.y; #else const float2 dm8f = __half22float2(dm8); const float2 ds8f = __half22float2(ds8); const float d8d8 = dm8f.x * ds8f.x; const float m8s8 = dm8f.y * ds8f.y; #endif // FAST_FP16_AVAILABLE // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it return sumi*d8d8 + m8s8 / (QI8_1 / vdr); } template static __device__ __forceinline__ float vec_dot_q8_0_16_q8_1_impl( const int * v, const int * u, const float * d8_0, const float & d8_1) { float sumf = 0.0f; #pragma unroll for (int i0 = 0; i0 < vdr; i0 += QI8_0/2) { int sumi = 0; #pragma unroll for (int i = i0; i < i0 + QI8_0/2; ++i) { // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(v[i], u[i], sumi); } sumf += d8_0[i0/(QI8_0/2)]*sumi; } return d8_1*sumf; } #define VDR_MXFP4_Q8_1_MMVQ 2 #define VDR_MXFP4_Q8_1_MMQ 4 static __device__ __forceinline__ float vec_dot_mxfp4_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_mxfp4 * bq4 = (const block_mxfp4 *) vbq + kbx; const int * q8 = (const int *) bq8_1->qs + iqs; int sumi = 0; #pragma unroll for (int l = 0; l < VDR_MXFP4_Q8_1_MMVQ; ++l) { const int aux_q4 = get_int_b1(bq4->qs, iqs + l); const int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4); sumi = ggml_cuda_dp4a(v.x, q8[l + 0], sumi); sumi = ggml_cuda_dp4a(v.y, q8[l + 4], sumi); } const float d = ggml_cuda_e8m0_to_fp32(bq4->e) * 0.5f * __low2float(bq8_1->ds); return d * sumi; } #define VDR_Q2_K_Q8_1_MMVQ 1 #define VDR_Q2_K_Q8_1_MMQ 4 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR2_K; ++i) { const int sc = scales[2*i]; const int vi = (v >> (2*i)) & 0x03030303; sumf_d += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product // fill int with 4x m int m = sc >> 4; m |= m << 8; m |= m << 16; sumf_m += d8[i] * ggml_cuda_dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values } const float2 dm2f = __half22float2(dm2); return dm2f.x*sumf_d - dm2f.y*sumf_m; } // contiguous v/x + u/y values template static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const half2 * dm2, const float & d8, const half2 * s8) { float sumf = 0.0f; float sumf_d8 = 0.0f; #pragma unroll for (int i0 = 0; i0 < QR2_K*VDR_Q2_K_Q8_1_MMQ; i0 += QI8_1) { const float2 dm2f0 = __half22float2(dm2[i0/(QI8_1/2) + 0]); int sumi_d0 = 0; const float2 dm2f1 = __half22float2(dm2[i0/(QI8_1/2) + 1]); int sumi_d1 = 0; #pragma unroll for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_d0 = ggml_cuda_dp4a(v[i], u[i], sumi_d0); } sumf_d8 += dm2f0.x * sumi_d0; #pragma unroll for (int i = i0 + QI8_1/2; i < i0 + QI8_1; ++i) { sumi_d1 = ggml_cuda_dp4a(v[i], u[i], sumi_d1); } sumf_d8 += dm2f1.x * sumi_d1; if (i0/QI8_1 < ns8) { const float2 s8f = __half22float2(s8[i0/QI8_1]); sumf -= dm2f0.y*s8f.x; sumf -= dm2f1.y*s8f.y; } else { int sumi_m0 = 0; #pragma unroll for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_m0 = ggml_cuda_dp4a(0x01010101, u[i], sumi_m0); } sumf_d8 -= dm2f0.y * sumi_m0; int sumi_m1 = 0; #pragma unroll for (int i = i0 + QI8_1/2; i < i0 + QI8_1; ++i) { sumi_m1 = ggml_cuda_dp4a(0x01010101, u[i], sumi_m1); } sumf_d8 -= dm2f1.y * sumi_m1; } } return sumf + d8*sumf_d8; } #define VDR_Q3_K_Q8_1_MMVQ 1 #define VDR_Q3_K_Q8_1_MMQ 2 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, const int & scale_offset, const float & d3, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR3_K; ++i) { const int isc = scale_offset + 2*i; const int isc_low = isc % (QK_K/32); const int sc_shift_low = 4 * (isc / (QK_K/32)); const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF; const int isc_high = isc % (QK_K/64); const int sc_shift_high = 2 * (isc / (QK_K/64)); const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4; const int sc = (sc_low | sc_high) - 32; const int vil = (vl >> (2*i)) & 0x03030303; const int vih = ((vh >> i) << 2) & 0x04040404; const int vi = __vsubss4(vil, vih); sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d3 * sumf; } // contiguous v/x + u/y values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d3, const float & d8) { int sumi = 0; #pragma unroll for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) { int sumi_sc = 0; #pragma unroll for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_sc = ggml_cuda_dp4a(v[i], u[i], sumi_sc); // SIMD dot product } sumi += sumi_sc * scales[i0 / (QI8_1/2)]; } return d3*d8 * sumi; } #define VDR_Q4_K_Q8_1_MMVQ 2 #define VDR_Q4_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K; ++i) { const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F; const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F; const int dot1 = ggml_cuda_dp4a(v1i, u[2*i+1], ggml_cuda_dp4a(v0i, u[2*i+0], 0)); // SIMD dot product const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+1], ggml_cuda_dp4a(0x01010101, u[2*i+0], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } // contiguous v/x + u/y values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = ggml_cuda_dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q5_K_Q8_1_MMVQ 2 #define VDR_Q5_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F; const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F; const int vh0i = ((vh[0] >> i) << 4) & 0x10101010; const int vh1i = ((vh[1] >> i) << 4) & 0x10101010; const int v0i = vl0i | vh0i; const int v1i = vl1i | vh1i; const int dot1 = ggml_cuda_dp4a(v0i, u[2*i+0], ggml_cuda_dp4a(v1i, u[2*i+1], 0)); // SIMD dot product const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+0], ggml_cuda_dp4a(0x01010101, u[2*i+1], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); } const float2 dm5f = __half22float2(dm5); return dm5f.x*sumf_d - dm5f.y*sumf_m; } // contiguous v/x + u/y values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = ggml_cuda_dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q6_K_Q8_1_MMVQ 1 #define VDR_Q6_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR6_K; ++i) { const int sc = scales[4*i]; const int vil = (vl >> (4*i)) & 0x0F0F0F0F; const int vih = ((vh >> (4*i)) << 4) & 0x30303030; const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32 sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d*sumf; } // contiguous v/x + u/y values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, const float & d6, const float * __restrict__ d8) { float sumf_d = 0.0f; const int sc_packed = get_int_b4(sc, 0); const int8_t * sc_reg = (const int8_t *) &sc_packed; #pragma unroll for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) { int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale #pragma unroll for (int i = i0; i < i0 + 2; ++i) { sumi_d.x = ggml_cuda_dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product sumi_d.x = ggml_cuda_dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product sumi_d.y = ggml_cuda_dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product sumi_d.y = ggml_cuda_dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product } sumf_d += d8[i0/4] * (sc_reg[i0/2+0]*sumi_d.x + sc_reg[i0/2+1]*sumi_d.y); } return d6 * sumf_d; } static __device__ __forceinline__ float vec_dot_q4_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq + kbx; int v[VDR_Q4_0_Q8_1_MMVQ]; int u[2*VDR_Q4_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) { v[i] = get_int_b2(bq4_0->qs, iqs + i); u[2*i+0] = get_int_b4(bq8_1->qs, iqs + i); u[2*i+1] = get_int_b4(bq8_1->qs, iqs + i + QI4_0); } return vec_dot_q4_0_q8_1_impl(v, u, bq4_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq + kbx; int v[VDR_Q4_1_Q8_1_MMVQ]; int u[2*VDR_Q4_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) { v[i] = get_int_b4(bq4_1->qs, iqs + i); u[2*i+0] = get_int_b4(bq8_1->qs, iqs + i); u[2*i+1] = get_int_b4(bq8_1->qs, iqs + i + QI4_1); } return vec_dot_q4_1_q8_1_impl(v, u, bq4_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq + kbx; int vl[VDR_Q5_0_Q8_1_MMVQ]; int vh[VDR_Q5_0_Q8_1_MMVQ]; int u[2*VDR_Q5_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) { vl[i] = get_int_b2(bq5_0->qs, iqs + i); vh[i] = get_int_b2(bq5_0->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_b4(bq8_1->qs, iqs + i); u[2*i+1] = get_int_b4(bq8_1->qs, iqs + i + QI5_0); } return vec_dot_q5_0_q8_1_impl(vl, vh, u, bq5_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq + kbx; int vl[VDR_Q5_1_Q8_1_MMVQ]; int vh[VDR_Q5_1_Q8_1_MMVQ]; int u[2*VDR_Q5_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) { vl[i] = get_int_b4(bq5_1->qs, iqs + i); vh[i] = get_int_b4(bq5_1->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_b4(bq8_1->qs, iqs + i); u[2*i+1] = get_int_b4(bq8_1->qs, iqs + i + QI5_1); } return vec_dot_q5_1_q8_1_impl(vl, vh, u, bq5_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q8_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq + kbx; int v[VDR_Q8_0_Q8_1_MMVQ]; int u[VDR_Q8_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) { v[i] = get_int_b2(bq8_0->qs, iqs + i); u[i] = get_int_b4(bq8_1->qs, iqs + i); } return vec_dot_q8_0_q8_1_impl(v, u, bq8_0->d, __low2half(bq8_1->ds)); } static __device__ __forceinline__ float vec_dot_q2_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q2_K * bq2_K = (const block_q2_K *) vbq + kbx; const int bq8_offset = QR2_K * (iqs / QI8_1); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const uint8_t * scales = bq2_K->scales + scale_offset; const int v = get_int_b4(bq2_K->qs, iqs); int u[QR2_K]; float d8[QR2_K]; #pragma unroll for (int i = 0; i < QR2_K; ++ i) { u[i] = get_int_b4(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8); } static __device__ __forceinline__ float vec_dot_q3_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q3_K * bq3_K = (const block_q3_K *) vbq + kbx; const int bq8_offset = QR3_K * (iqs / (QI3_K/2)); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const float d = bq3_K->d; const int vl = get_int_b2(bq3_K->qs, iqs); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted const int vh = ~get_int_b2(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset; int u[QR3_K]; float d8[QR3_K]; #pragma unroll for (int i = 0; i < QR3_K; ++i) { u[i] = get_int_b4(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8); } static __device__ __forceinline__ float vec_dot_q4_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q4_K * bq4_K = (const block_q4_K *) vbq + kbx; int v[2]; int u[2*QR4_K]; float d8[QR4_K]; // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6 const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2)); // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12 // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44 // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76 // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108 const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); v[0] = q4[0]; v[1] = q4[4]; const uint16_t * scales = (const uint16_t *)bq4_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; for (int i = 0; i < QR4_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8); } static __device__ __forceinline__ float vec_dot_q5_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q5_K * bq5_K = (const block_q5_K *) vbq + kbx; int vl[2]; int vh[2]; int u[2*QR5_K]; float d8[QR5_K]; const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2)); const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4)); vl[0] = ql[0]; vl[1] = ql[4]; vh[0] = qh[0] >> bq8_offset; vh[1] = qh[4] >> bq8_offset; const uint16_t * scales = (const uint16_t *)bq5_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8); } static __device__ __forceinline__ float vec_dot_q6_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_q6_K * bq6_K = (const block_q6_K *) vbq + kbx; const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4); const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8); const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4)); const int vl = get_int_b2(bq6_K->ql, iqs); const int vh = get_int_b2(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift; const int8_t * scales = bq6_K->scales + scale_offset; int u[QR6_K]; float d8[QR6_K]; #pragma unroll for (int i = 0; i < QR6_K; ++i) { u[i] = get_int_b4(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds); } return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8); } #define VDR_IQ2_XXS_Q8_1_MMVQ 2 #define VDR_IQ2_XXS_Q8_1_MMQ 2 static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq + kbx; const int q2 = get_int_b2(bq2->qs, iqs); const uint8_t * aux8 = (const uint8_t *) &q2; const uint32_t aux32 = get_int_b2(bq2->qs, iqs + 1); int sumi = 0; #pragma unroll for (int k0 = 0; k0 < 8; k0 += 2) { const int * grid_pos = (const int *) (iq2xxs_grid + aux8[k0/2]); const int signs_packed = ksigns_iq2xs[(aux32 >> (7*k0/2)) & 0x7F]; const int signs0 = __vcmpne4(((signs_packed & 0x03) << 7) | ((signs_packed & 0x0C) << 21), 0x00000000); const int grid0 = __vsub4(grid_pos[0] ^ signs0, signs0); const int u0 = get_int_b4(bq8_1[iqs/2].qs, k0 + 0); sumi = ggml_cuda_dp4a(grid0, u0, sumi); const int signs1 = __vcmpne4(((signs_packed & 0x30) << 3) | ((signs_packed & 0xC0) << 17), 0x00000000); const int grid1 = __vsub4(grid_pos[1] ^ signs1, signs1); const int u1 = get_int_b4(bq8_1[iqs/2].qs, k0 + 1); sumi = ggml_cuda_dp4a(grid1, u1, sumi); } const int ls = aux32 >> 28; sumi = (ls*sumi + sumi/2)/4; const float d = __half2float(bq2->d) * __low2float(bq8_1[iqs/2].ds); return d * sumi; } #define VDR_IQ2_XS_Q8_1_MMVQ 2 #define VDR_IQ2_XS_Q8_1_MMQ 2 static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq + kbx; const int2 q2_packed = make_int2(get_int_b2(bq2->qs, iqs + 0), get_int_b2(bq2->qs, iqs + 1)); const uint16_t * q2 = (const uint16_t *) &q2_packed; const int ls0 = bq2->scales[iqs/2] & 0x0F; const int ls1 = bq2->scales[iqs/2] >> 4; int sumi0 = 0; int sumi1 = 0; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const uint32_t * grid_pos = (const uint32_t *)(iq2xs_grid + (q2[l0/2] & 0x000001FF)); const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l0/2] >> 9)); const int grid_l = __vsub4(grid_pos[0] ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos[1] ^ signs[1], signs[1]); const int u0 = get_int_b4(bq8_1[iqs/2].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs/2].qs, l0 + 1); if (l0 < 4) { sumi0 = ggml_cuda_dp4a(grid_l, u0, sumi0); sumi0 = ggml_cuda_dp4a(grid_h, u1, sumi0); } else { sumi1 = ggml_cuda_dp4a(grid_l, u0, sumi1); sumi1 = ggml_cuda_dp4a(grid_h, u1, sumi1); } } const int sumi = (sumi0*ls0 + sumi1*ls1 + (sumi0 + sumi1)/2)/4; const float d = __half2float(bq2->d) * __low2float(bq8_1[iqs/2].ds); return d * sumi; } #define VDR_IQ2_S_Q8_1_MMVQ 2 #define VDR_IQ2_S_Q8_1_MMQ 2 static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq2_s * bq2 = (const block_iq2_s *) vbq + kbx; const int qs_packed = get_int_b2(bq2->qs, iqs/2); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bq2->qh[iqs/2]; const int signs_packed_32 = get_int_b2(bq2->qs, QK_K/32 + iqs/2); const uint8_t * signs_packed_8 = (const uint8_t *) &signs_packed_32; const int ls0 = bq2->scales[iqs/2] & 0x0F; const int ls1 = bq2->scales[iqs/2] >> 4; int sumi0 = 0; int sumi1 = 0; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const int * grid_pos = (const int *)(iq2s_grid + (qs[l0/2] | ((qh << (8-l0)) & 0x300))); const int signs0 = __vcmpne4(((signs_packed_8[l0/2] & 0x03) << 7) | ((signs_packed_8[l0/2] & 0x0C) << 21), 0x00000000); const int signs1 = __vcmpne4(((signs_packed_8[l0/2] & 0x30) << 3) | ((signs_packed_8[l0/2] & 0xC0) << 17), 0x00000000); const int grid_l = __vsub4(grid_pos[0] ^ signs0, signs0); const int grid_h = __vsub4(grid_pos[1] ^ signs1, signs1); const int u0 = get_int_b4(bq8_1[iqs/2].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs/2].qs, l0 + 1); if (l0 < 4) { sumi0 = ggml_cuda_dp4a(grid_l, u0, sumi0); sumi0 = ggml_cuda_dp4a(grid_h, u1, sumi0); } else { sumi1 = ggml_cuda_dp4a(grid_l, u0, sumi1); sumi1 = ggml_cuda_dp4a(grid_h, u1, sumi1); } } const int sumi = (sumi0*ls0 + sumi1*ls1 + (sumi0 + sumi1)/2)/4; const float d = __half2float(bq2->d) * __low2float(bq8_1[iqs/2].ds); return d * sumi; } #define VDR_IQ3_XXS_Q8_1_MMVQ 2 #define VDR_IQ3_XXS_Q8_1_MMQ 2 static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq3_xxs * bq3 = (const block_iq3_xxs *) vbq + kbx; const int2 q3_packed = make_int2(get_int_b2(bq3->qs, iqs), get_int_b2(bq3->qs, iqs+1)); const uint8_t * q3 = (const uint8_t *) &q3_packed; const uint32_t aux32 = get_int_b2(bq3->qs, QK_K/16 + iqs/2); int sumi = 0; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const int2 grid_pos = make_int2(iq3xxs_grid[q3[l0 + 0]], iq3xxs_grid[q3[l0 + 1]]); const int * signs = (const int *)(ksigns64 + ((aux32 >> (7*l0/2)) & 0x7F)); const int grid_l = __vsub4(grid_pos.x ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos.y ^ signs[1], signs[1]); const int u0 = get_int_b4(bq8_1[iqs/2].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs/2].qs, l0 + 1); sumi = ggml_cuda_dp4a(grid_l, u0, sumi); sumi = ggml_cuda_dp4a(grid_h, u1, sumi); } const int ls = aux32 >> 28; sumi = (ls*sumi + sumi/2)/2; const float d = __half2float(bq3->d) * __low2float(bq8_1[iqs/2].ds); return d * sumi; } #define VDR_IQ3_S_Q8_1_MMVQ 2 #define VDR_IQ3_S_Q8_1_MMQ 2 // TODO: don't use lookup table for signs static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq3_s * bq3 = (const block_iq3_s *) vbq + kbx; const int2 qs_packed = make_int2(get_int_b2(bq3->qs, iqs + 0), get_int_b2(bq3->qs, iqs + 1)); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bq3->qh[iqs/2]; const int signs_packed_32 = get_int_b2(bq3->signs, iqs/2); const uint8_t * signs_packed_8 = (const uint8_t *) &signs_packed_32; int sumi = 0; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const int2 grid_pos = make_int2( iq3s_grid[qs[l0 + 0] | ((qh << (8 - l0)) & 0x100)], iq3s_grid[qs[l0 + 1] | ((qh << (7 - l0)) & 0x100)]); const int signs0 = __vcmpne4(((signs_packed_8[l0/2] & 0x03) << 7) | ((signs_packed_8[l0/2] & 0x0C) << 21), 0x00000000); const int signs1 = __vcmpne4(((signs_packed_8[l0/2] & 0x30) << 3) | ((signs_packed_8[l0/2] & 0xC0) << 17), 0x00000000); const int grid_l = __vsub4(grid_pos.x ^ signs0, signs0); const int grid_h = __vsub4(grid_pos.y ^ signs1, signs1); const int u0 = get_int_b4(bq8_1[iqs/2].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs/2].qs, l0 + 1); sumi = ggml_cuda_dp4a(grid_l, u0, sumi); sumi = ggml_cuda_dp4a(grid_h, u1, sumi); } sumi *= 1 + 2*((bq3->scales[iqs/4] >> ((iqs << 1) & 0x04)) & 0x0F); const float d = __half2float(bq3->d) * __low2float(bq8_1[iqs/2].ds); return d * sumi; } #define VDR_IQ1_S_Q8_1_MMVQ 1 #define VDR_IQ1_S_Q8_1_MMQ 1 static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq1_s * bq1 = (const block_iq1_s *) vbq + kbx; const int qs_packed = get_int_b2(bq1->qs, iqs); const uint8_t * qs = (const uint8_t *) &qs_packed; const int qh = bq1->qh[iqs]; int sumi = 0; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const int grid = iq1s_grid_gpu[qs[l0/2] | (((qh >> 3*(l0/2)) & 0x07) << 8)]; const int grid0 = (grid >> 0) & 0x0F0F0F0F; const int grid1 = (grid >> 4) & 0x0F0F0F0F; const int u0 = get_int_b4(bq8_1[iqs].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs].qs, l0 + 1); sumi = ggml_cuda_dp4a(grid0, u0, sumi); sumi = ggml_cuda_dp4a(grid1, u1, sumi); } const float d1q = __half2float(bq1->d) * (((qh >> 11) & 0x0E) + 1); const float delta = -1.0f + IQ1S_DELTA - (qh & 0x8000) * (2.0f*IQ1S_DELTA/0x8000); const float2 ds = __half22float2(bq8_1[iqs].ds); return d1q * (ds.x*sumi + ds.y*delta); } #define VDR_IQ1_M_Q8_1_MMVQ 1 #define VDR_IQ1_M_Q8_1_MMQ 1 static __device__ __forceinline__ float vec_dot_iq1_m_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq1_m * bq1 = (const block_iq1_m *) vbq + kbx; const int qs_packed = get_int_b4(bq1->qs, iqs); const uint8_t * qs = (const uint8_t *) &qs_packed; int sumi[2] = {0}; float sumf[2] = {0.0f}; #pragma unroll for (int l0 = 0; l0 < 8; l0 += 2) { const int qhl = bq1->qh[2*iqs + l0/4] >> (4 * ((l0/2) % 2)); const int grid = iq1s_grid_gpu[qs[l0/2] | ((qhl & 0x07) << 8)]; const int grid0 = (grid >> 0) & 0x0F0F0F0F; const int grid1 = (grid >> 4) & 0x0F0F0F0F; const int u0 = get_int_b4(bq8_1[iqs].qs, l0 + 0); const int u1 = get_int_b4(bq8_1[iqs].qs, l0 + 1); sumi[l0/4] = ggml_cuda_dp4a(grid0, u0, sumi[l0/4]); sumi[l0/4] = ggml_cuda_dp4a(grid1, u1, sumi[l0/4]); const float delta = -1.0f + IQ1M_DELTA - (qhl & 0x08) * (2.0f*IQ1M_DELTA/0x08); int sumy = 0; sumy = ggml_cuda_dp4a(u0, 0x01010101, sumy); sumy = ggml_cuda_dp4a(u1, 0x01010101, sumy); sumf[l0/4] += delta*sumy; } const uint16_t * sc = (const uint16_t *) bq1->scales; iq1m_scale_t scale; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00F0) | ((sc[2] >> 4) & 0x0F00) | (sc[3] & 0xF000); const float d = __half2float(scale.f16) * __low2float(bq8_1[iqs].ds); const int tmp = sc[iqs/2] >> (6*(iqs%2)); const int sc0 = 2*((tmp >> 0) & 0x07) + 1; const int sc1 = 2*((tmp >> 3) & 0x07) + 1; return d * ((sumi[0] + sumf[0]) * sc0 + (sumi[1] + sumf[1]) * sc1); } #define VDR_IQ4_NL_Q8_1_MMVQ 2 #define VDR_IQ4_NL_Q8_1_MMQ 4 static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq4_nl * bq4 = (const block_iq4_nl *) vbq + kbx; const int * q8 = (const int *) bq8_1->qs + iqs; int sumi = 0; #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) { const int aux_q4 = get_int_b2(bq4->qs, iqs + l); const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); sumi = ggml_cuda_dp4a(v.x, q8[l + 0], sumi); sumi = ggml_cuda_dp4a(v.y, q8[l + 4], sumi); } const float d = __half2float(bq4->d) * __low2float(bq8_1->ds); return d * sumi; } #define VDR_IQ4_XS_Q8_1_MMVQ 4 #define VDR_IQ4_XS_Q8_1_MMQ 4 static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq + kbx; int sumi = 0; #pragma unroll for (int j = 0; j < 4; ++j) { const int aux_q4 = get_int_b4(bq4->qs, iqs + j); const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); const int u0 = get_int_b4(bq8_1[iqs/4].qs, j + 0); const int u1 = get_int_b4(bq8_1[iqs/4].qs, j + 4); sumi = ggml_cuda_dp4a(v.x, u0, sumi); sumi = ggml_cuda_dp4a(v.y, u1, sumi); } const int ls = ((bq4->scales_l[iqs/8] >> (iqs & 0x04)) & 0x0F) | (((bq4->scales_h >> (iqs/2)) & 0x03) << 4); sumi *= ls - 32; const float d = __half2float(bq4->d) * __low2float(bq8_1[iqs/4].ds); return d * sumi; } ggml-org-ggml-3678254/src/ggml-cuda/vendors/000077500000000000000000000000001512524704700204735ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-cuda/vendors/cuda.h000066400000000000000000000012171512524704700215610ustar00rootroot00000000000000#pragma once #include #include #include #include #include #if CUDART_VERSION >= 12050 #include #endif // CUDART_VERSION >= 12050 #if CUDART_VERSION >= 12080 #include #endif // CUDART_VERSION >= 12080 #if CUDART_VERSION < 11020 #define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED #define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH #define CUBLAS_COMPUTE_16F CUDA_R_16F #define CUBLAS_COMPUTE_32F CUDA_R_32F #define cublasComputeType_t cudaDataType_t #endif // CUDART_VERSION < 11020 ggml-org-ggml-3678254/src/ggml-cuda/vendors/hip.h000066400000000000000000000254611512524704700214340ustar00rootroot00000000000000#pragma once #define HIP_DISABLE_WARP_SYNC_BUILTINS 1 #include #include #include #include #if defined(GGML_HIP_ROCWMMA_FATTN) #include #endif // defined(GGML_HIP_ROCWMMA_FATTN) #define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT #define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT #define CUBLAS_OP_N HIPBLAS_OP_N #define CUBLAS_OP_T HIPBLAS_OP_T #define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS #define CUBLAS_TF32_TENSOR_OP_MATH 0 #define CUDA_R_16F HIPBLAS_R_16F #define CUDA_R_16BF HIPBLAS_R_16B #define CUDA_R_32F HIPBLAS_R_32F #define CUBLAS_SIDE_RIGHT HIPBLAS_SIDE_RIGHT #define CUBLAS_FILL_MODE_UPPER HIPBLAS_FILL_MODE_UPPER #define CUBLAS_DIAG_NON_UNIT HIPBLAS_DIAG_NON_UNIT #define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED hipDeviceAttributeVirtualMemoryManagementSupported #define CU_MEM_ALLOC_GRANULARITY_RECOMMENDED hipMemAllocationGranularityRecommended #define CU_MEM_ALLOCATION_TYPE_PINNED hipMemAllocationTypePinned #define CU_MEM_LOCATION_TYPE_DEVICE hipMemLocationTypeDevice #define CU_MEM_ACCESS_FLAGS_PROT_READWRITE hipMemAccessFlagsProtReadWrite #define CU_CHECK(fn) {hipError_t err = fn; if(err != hipSuccess) { GGML_ABORT("HipVMM Failure: %s\n", hipGetErrorString(err)); }} #define __shfl_sync(mask, var, laneMask, width) __shfl(var, laneMask, width) #define __shfl_up_sync(mask, var, laneMask, width) __shfl_up(var, laneMask, width) #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) #define __all_sync(mask, var) __all(var) #define __any_sync(mask, var) __any(var) #define cublasStrsmBatched hipblasStrsmBatched #define cublasCreate hipblasCreate #define cublasDestroy hipblasDestroy #define cublasGemmEx hipblasGemmEx #define cublasGemmBatchedEx hipblasGemmBatchedEx #define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx #define cublasHandle_t hipblasHandle_t #define cublasSetMathMode(handle, mode) CUBLAS_STATUS_SUCCESS #define cublasSetStream hipblasSetStream #define cublasSgemm hipblasSgemm #define cublasStatus_t hipblasStatus_t #define cublasOperation_t hipblasOperation_t #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess #define cudaDeviceProp hipDeviceProp_t #define cudaDeviceSynchronize hipDeviceSynchronize #define cudaError_t hipError_t #define cudaErrorPeerAccessAlreadyEnabled hipErrorPeerAccessAlreadyEnabled #define cudaErrorPeerAccessNotEnabled hipErrorPeerAccessNotEnabled #define cudaEventCreateWithFlags hipEventCreateWithFlags #define cudaEventDisableTiming hipEventDisableTiming #define cudaEventRecord hipEventRecord #define cudaEventSynchronize hipEventSynchronize #define cudaEvent_t hipEvent_t #define cudaEventDestroy hipEventDestroy #define cudaFree hipFree #define cudaFreeHost hipHostFree #define cudaGetDevice hipGetDevice #define cudaGetDeviceCount hipGetDeviceCount #define cudaGetDeviceProperties hipGetDeviceProperties #define cudaGetErrorString hipGetErrorString #define cudaGetLastError hipGetLastError #define cudaHostRegister hipHostRegister #define cudaHostRegisterPortable hipHostRegisterPortable #define cudaHostRegisterReadOnly hipHostRegisterReadOnly #define cudaHostUnregister hipHostUnregister #define cudaLaunchHostFunc hipLaunchHostFunc #define cudaMalloc hipMalloc #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) #define cudaMallocManaged hipMallocManaged #define cudaMemAdvise hipMemAdvise #define cudaMemcpy hipMemcpy #define cudaMemcpyAsync hipMemcpyAsync #define cudaMemcpyPeerAsync hipMemcpyPeerAsync #define cudaMemcpy2DAsync hipMemcpy2DAsync #define cudaMemcpyDeviceToDevice hipMemcpyDeviceToDevice #define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost #define cudaMemcpyHostToDevice hipMemcpyHostToDevice #define cudaMemcpyKind hipMemcpyKind #define cudaMemset hipMemset #define cudaMemsetAsync hipMemsetAsync #define cudaMemGetInfo hipMemGetInfo #define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize #define cudaSetDevice hipSetDevice #define cuDeviceGet hipDeviceGet #define CUdevice hipDevice_t #define CUdeviceptr hipDeviceptr_t #define cuMemUnmap hipMemUnmap #define CUmemAccessDesc hipMemAccessDesc #define cuMemAddressFree hipMemAddressFree #define cuMemRelease hipMemRelease #define CUmemGenericAllocationHandle hipMemGenericAllocationHandle_t #define cuMemCreate hipMemCreate #define cuMemAddressReserve hipMemAddressReserve #define cuMemMap hipMemMap #define cuMemSetAccess hipMemSetAccess #define cuMemGetAllocationGranularity hipMemGetAllocationGranularity #define CUmemAllocationProp hipMemAllocationProp #define cuDeviceGetAttribute hipDeviceGetAttribute #define cudaStreamCreateWithFlags hipStreamCreateWithFlags #define cudaStreamDestroy hipStreamDestroy #define cudaStreamFireAndForget hipStreamFireAndForget #define cudaStreamNonBlocking hipStreamNonBlocking #define cudaStreamPerThread hipStreamPerThread #define cudaStreamSynchronize hipStreamSynchronize #define cudaStreamWaitEvent hipStreamWaitEvent #define cudaGraphExec_t hipGraphExec_t #define cudaGraphNode_t hipGraphNode_t #define cudaKernelNodeParams hipKernelNodeParams #define cudaKernelNodeParams hipKernelNodeParams #define cudaGraphExecDestroy hipGraphExecDestroy #define cudaGraphLaunch hipGraphLaunch #define cudaErrorGraphExecUpdateFailure hipErrorGraphExecUpdateFailure #define cudaGraphExecUpdateResult hipGraphExecUpdateResult #define cudaGraphNodeType hipGraphNodeType #define cudaGraphNodeTypeKernel hipGraphNodeTypeKernel #define cudaGraphInstantiate hipGraphInstantiate #define cudaStreamEndCapture hipStreamEndCapture #define cudaGraphDestroy hipGraphDestroy #define cudaGraphKernelNodeSetParams hipGraphKernelNodeSetParams #define cudaErrorInvalidDeviceFunction hipErrorInvalidDeviceFunction #define cudaGraphKernelNodeGetParams hipGraphKernelNodeGetParams #define cudaGraphNodeGetType hipGraphNodeGetType #define cudaGraphGetNodes hipGraphGetNodes #define cudaGraphExecUpdate hipGraphExecUpdate #define cudaStreamCaptureModeRelaxed hipStreamCaptureModeRelaxed #define cudaStreamBeginCapture hipStreamBeginCapture #define cudaGraph_t hipGraph_t #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess #define cudaOccupancyMaxActiveBlocksPerMultiprocessor hipOccupancyMaxActiveBlocksPerMultiprocessor #define __trap() do { abort(); __builtin_unreachable(); } while(0) #define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS #define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED #define CUBLAS_STATUS_ALLOC_FAILED HIPBLAS_STATUS_ALLOC_FAILED #define CUBLAS_STATUS_INVALID_VALUE HIPBLAS_STATUS_INVALID_VALUE #define CUBLAS_STATUS_ARCH_MISMATCH HIPBLAS_STATUS_ARCH_MISMATCH #define CUBLAS_STATUS_MAPPING_ERROR HIPBLAS_STATUS_MAPPING_ERROR #define CUBLAS_STATUS_EXECUTION_FAILED HIPBLAS_STATUS_EXECUTION_FAILED #define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR #define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED #if HIP_VERSION >= 60500000 #define CUBLAS_COMPUTE_16F HIPBLAS_COMPUTE_16F #define CUBLAS_COMPUTE_32F HIPBLAS_COMPUTE_32F #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_COMPUTE_32F_FAST_16F #define cublasComputeType_t hipblasComputeType_t #define cudaDataType_t hipDataType #else #define CUBLAS_COMPUTE_16F HIPBLAS_R_16F #define CUBLAS_COMPUTE_32F HIPBLAS_R_32F #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F #define cublasComputeType_t hipblasDatatype_t #define cudaDataType_t hipblasDatatype_t #endif // HIP_VERSION >= 6050000 #if !defined(__HIP_PLATFORM_AMD__) #error "The HIP backend supports only AMD targets" #endif // !defined(__HIP_PLATFORM_AMD__) #define __CUDA_ARCH__ 1300 #if defined(__gfx900__) || defined(__gfx906__) #define GCN5 #endif // defined(__gfx900__) || defined(__gfx906__) #if defined(__gfx803__) #define GCN4 #endif // defined(__gfx803__) #if defined(GCN5) || defined(GCN4) #define GCN #endif // defined(GCN5) || defined(GCN4) #if defined(__gfx942__) #define CDNA3 #endif // defined(__gfx942__) #if defined(__gfx90a__) #define CDNA2 #endif // defined(__gfx90a__) #if defined(__gfx908__) #define CDNA1 #endif // defined(__gfx908__) #if defined(CDNA3) || defined(CDNA2) || defined(CDNA1) #define CDNA // For the entire family #endif // defined(CDNA3) || defined(CDNA2) || defined(CDNA1) #if defined(__GFX12__) #define RDNA4 #endif // defined(__GFX12__) #if defined(__GFX11__) #define RDNA3 #endif // defined(__GFX11__) #if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || defined(__gfx1033__) || \ defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || defined(__gfx1037__) #define RDNA2 #endif #if defined(__gfx1010__) || defined(__gfx1012__) #define RDNA1 #endif // defined(__gfx1010__) || defined(__gfx1012__) #if defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(RDNA1) #define RDNA // For the entire family #endif // defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(RDNA1) #ifndef __has_builtin #define __has_builtin(x) 0 #endif typedef __hip_bfloat16 nv_bfloat16; typedef __hip_bfloat162 nv_bfloat162; typedef int8_t int8x4_t __attribute__((ext_vector_type(4))); typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4))); static __device__ __forceinline__ int __vsubss4(const int a, const int b) { const int8x4_t va = reinterpret_cast(a); const int8x4_t vb = reinterpret_cast(b); #if __has_builtin(__builtin_elementwise_sub_sat) const int8x4_t c = __builtin_elementwise_sub_sat(va, vb); return reinterpret_cast(c); #else int8x4_t c; int16_t tmp; #pragma unroll for (int i = 0; i < 4; i++) { tmp = va[i] - vb[i]; if(tmp > std::numeric_limits::max()) tmp = std::numeric_limits::max(); if(tmp < std::numeric_limits::min()) tmp = std::numeric_limits::min(); c[i] = tmp; } return reinterpret_cast(c); #endif // __has_builtin(__builtin_elementwise_sub_sat) } static __device__ __forceinline__ int __vsub4(const int a, const int b) { return __vsubss4(a, b); } static __device__ __forceinline__ unsigned int __vcmpeq4(unsigned int a, unsigned int b) { const uint8x4_t& va = reinterpret_cast(a); const uint8x4_t& vb = reinterpret_cast(b); unsigned int c; uint8x4_t& vc = reinterpret_cast(c); #pragma unroll for (int i = 0; i < 4; ++i) { vc[i] = va[i] == vb[i] ? 0xff : 0x00; } return c; } static __device__ __forceinline__ unsigned int __vcmpne4(unsigned int a, unsigned int b) { const uint8x4_t& va = reinterpret_cast(a); const uint8x4_t& vb = reinterpret_cast(b); unsigned int c; uint8x4_t& vc = reinterpret_cast(c); #pragma unroll for (int i = 0; i < 4; ++i) { vc[i] = va[i] == vb[i] ? 0x00 : 0xff; } return c; } ggml-org-ggml-3678254/src/ggml-cuda/vendors/musa.h000066400000000000000000000147311512524704700216170ustar00rootroot00000000000000#pragma once #include #include #include #include #include #define CUBLAS_COMPUTE_16F CUDA_R_16F #define CUBLAS_COMPUTE_32F CUDA_R_32F #define CUBLAS_COMPUTE_32F_FAST_16F MUBLAS_COMPUTE_32F_FAST_16F #define CUBLAS_GEMM_DEFAULT MUBLAS_GEMM_DEFAULT #define CUBLAS_GEMM_DEFAULT_TENSOR_OP MUBLAS_GEMM_DEFAULT #define CUBLAS_OP_N MUBLAS_OP_N #define CUBLAS_OP_T MUBLAS_OP_T #define CUBLAS_DEFAULT_MATH MUBLAS_DEFAULT_MATH #define CUBLAS_SIDE_RIGHT MUBLAS_SIDE_RIGHT #define CUBLAS_FILL_MODE_UPPER MUBLAS_FILL_MODE_UPPER #define CUBLAS_DIAG_NON_UNIT MUBLAS_DIAG_NON_UNIT #define CUBLAS_STATUS_SUCCESS MUBLAS_STATUS_SUCCESS #define CUBLAS_TF32_TENSOR_OP_MATH MUBLAS_TENSOR_OP_MATH #define CUDA_R_16F MUSA_R_16F #define CUDA_R_16BF MUSA_R_16BF #define CUDA_R_32F MUSA_R_32F #define cublasStrsmBatched mublasStrsmBatched #define cublasComputeType_t cudaDataType_t #define cublasCreate mublasCreate #define cublasDestroy mublasDestroy #define cublasGemmEx mublasGemmEx #define cublasGemmBatchedEx mublasGemmBatchedEx #define cublasGemmStridedBatchedEx mublasGemmStridedBatchedEx #define cublasHandle_t mublasHandle_t #define cublasSetMathMode mublasSetMathMode #define cublasSetStream mublasSetStream #define cublasSgemm mublasSgemm #define cublasStatus_t mublasStatus_t #define cublasOperation_t mublasOperation_t #define cublasGetStatusString mublasGetStatusString #define cudaDataType_t musaDataType_t #define cudaDeviceCanAccessPeer musaDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess musaDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess musaDeviceEnablePeerAccess #define cudaDeviceProp musaDeviceProp #define cudaDeviceSynchronize musaDeviceSynchronize #define cudaError_t musaError_t #define cudaErrorPeerAccessAlreadyEnabled musaErrorPeerAccessAlreadyEnabled #define cudaErrorPeerAccessNotEnabled musaErrorPeerAccessNotEnabled #define cudaEventCreateWithFlags musaEventCreateWithFlags #define cudaEventDisableTiming musaEventDisableTiming #define cudaEventRecord musaEventRecord #define cudaEventSynchronize musaEventSynchronize #define cudaEvent_t musaEvent_t #define cudaEventDestroy musaEventDestroy #define cudaFree musaFree #define cudaFreeHost musaFreeHost #define cudaGetDevice musaGetDevice #define cudaGetDeviceCount musaGetDeviceCount #define cudaGetDeviceProperties musaGetDeviceProperties #define cudaGetErrorString musaGetErrorString #define cudaGetLastError musaGetLastError #define cudaHostRegister musaHostRegister #define cudaHostRegisterPortable musaHostRegisterPortable #define cudaHostRegisterReadOnly musaHostRegisterReadOnly #define cudaHostUnregister musaHostUnregister #define cudaLaunchHostFunc musaLaunchHostFunc #define cudaMalloc musaMalloc #define cudaMallocHost musaMallocHost #define cudaMallocManaged musaMallocManaged #define cudaMemcpy musaMemcpy #define cudaMemcpyAsync musaMemcpyAsync #define cudaMemcpyPeerAsync musaMemcpyPeerAsync #define cudaMemcpy2DAsync musaMemcpy2DAsync #define cudaMemcpyDeviceToDevice musaMemcpyDeviceToDevice #define cudaMemcpyDeviceToHost musaMemcpyDeviceToHost #define cudaMemcpyHostToDevice musaMemcpyHostToDevice #define cudaMemcpyKind musaMemcpyKind #define cudaMemset musaMemset #define cudaMemsetAsync musaMemsetAsync #define cudaMemGetInfo musaMemGetInfo #define cudaOccupancyMaxPotentialBlockSize musaOccupancyMaxPotentialBlockSize #define cudaSetDevice musaSetDevice #define cudaStreamCreateWithFlags musaStreamCreateWithFlags #define cudaStreamDestroy musaStreamDestroy #define cudaStreamFireAndForget musaStreamFireAndForget #define cudaStreamNonBlocking musaStreamNonBlocking #define cudaStreamPerThread musaStreamPerThread #define cudaStreamSynchronize musaStreamSynchronize #define cudaStreamWaitEvent musaStreamWaitEvent #define cudaStream_t musaStream_t #define cudaSuccess musaSuccess // Additional mappings for MUSA virtual memory pool #define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED MU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED #define CU_MEM_ACCESS_FLAGS_PROT_READWRITE MU_MEM_ACCESS_FLAGS_PROT_READWRITE #define CU_MEM_ALLOC_GRANULARITY_RECOMMENDED MU_MEM_ALLOC_GRANULARITY_RECOMMENDED #define CU_MEM_ALLOCATION_TYPE_PINNED MU_MEM_ALLOCATION_TYPE_PINNED #define CU_MEM_LOCATION_TYPE_DEVICE MU_MEM_LOCATION_TYPE_DEVICE #define CUdevice MUdevice #define CUdeviceptr MUdeviceptr #define CUmemAccessDesc MUmemAccessDesc #define CUmemAllocationProp MUmemAllocationProp #define CUmemGenericAllocationHandle MUmemGenericAllocationHandle #define cuDeviceGet muDeviceGet #define cuDeviceGetAttribute muDeviceGetAttribute #define cuMemAddressFree muMemAddressFree #define cuMemAddressReserve muMemAddressReserve #define cuMemCreate muMemCreate #define cuMemGetAllocationGranularity muMemGetAllocationGranularity #define cuMemMap muMemMap #define cuMemRelease muMemRelease #define cuMemSetAccess muMemSetAccess #define cuMemUnmap muMemUnmap #define cudaFuncAttributeMaxDynamicSharedMemorySize musaFuncAttributeMaxDynamicSharedMemorySize #define cudaFuncSetAttribute musaFuncSetAttribute #define cudaMemcpy3DPeerParms musaMemcpy3DPeerParms #define make_cudaExtent make_musaExtent #define make_cudaPitchedPtr make_musaPitchedPtr // Additional mappings for MUSA graphs #define CUDA_SUCCESS MUSA_SUCCESS #define CUresult MUresult #define cuGetErrorString muGetErrorString #define cudaErrorGraphExecUpdateFailure musaErrorGraphExecUpdateFailure #define cudaErrorInvalidDeviceFunction musaErrorInvalidDeviceFunction #define cudaGraphDestroy musaGraphDestroy #define cudaGraphExecDestroy musaGraphExecDestroy #define cudaGraphExec_t musaGraphExec_t #define cudaGraphExecUpdate musaGraphExecUpdate #define cudaGraphExecUpdateResult musaGraphExecUpdateResult #define cudaGraphGetNodes musaGraphGetNodes #define cudaGraphInstantiate musaGraphInstantiate #define cudaGraphKernelNodeGetParams musaGraphKernelNodeGetParams #define cudaGraphKernelNodeSetParams musaGraphKernelNodeSetParams #define cudaGraphLaunch musaGraphLaunch #define cudaGraphNodeGetType musaGraphNodeGetType #define cudaGraphNode_t musaGraphNode_t #define cudaGraphNodeType musaGraphNodeType #define cudaGraphNodeTypeKernel musaGraphNodeTypeKernel #define cudaGraph_t musaGraph_t #define cudaKernelNodeParams musaKernelNodeParams #define cudaStreamCaptureModeRelaxed musaStreamCaptureModeRelaxed #define cudaStreamBeginCapture musaStreamBeginCapture #define cudaStreamEndCapture musaStreamEndCapture #define cudaOccupancyMaxActiveBlocksPerMultiprocessor musaOccupancyMaxActiveBlocksPerMultiprocessor typedef __mt_bfloat16 nv_bfloat16; typedef __mt_bfloat162 nv_bfloat162; ggml-org-ggml-3678254/src/ggml-cuda/wkv.cu000066400000000000000000000155551512524704700201660ustar00rootroot00000000000000#include "common.cuh" #include "wkv.cuh" template static __global__ void rwkv_wkv_f32(const int B, const int T, const int C, const int H, const float * k, const float * v, const float * r, const float * tf, const float * td, const float * s, float * dst) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int head_size = block_size; const int batch_i = bid / H; const int head_i = bid % H; const int state_size = C * head_size; const int n_seq_tokens = T / B; float state[head_size]; __shared__ float _k[head_size], _r[head_size], _tf[head_size], _td[head_size]; #pragma unroll for (int i = 0; i < head_size; i++) { state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; } __syncthreads(); _tf[tid] = tf[head_i * head_size + tid]; __syncthreads(); for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { __syncthreads(); _k[tid] = k[t]; _r[tid] = r[t]; _td[tid] = td[t]; __syncthreads(); const float _v = v[t]; float y = 0; for (int j = 0; j < head_size; j += 4) { const float4& k = (float4&)(_k[j]); const float4& r = (float4&)(_r[j]); const float4& tf = (float4&)(_tf[j]); const float4& td = (float4&)(_td[j]); float4& s = (float4&)(state[j]); float4 kv; kv.x = k.x * _v; kv.y = k.y * _v; kv.z = k.z * _v; kv.w = k.w * _v; y += r.x * (tf.x * kv.x + s.x); y += r.y * (tf.y * kv.y + s.y); y += r.z * (tf.z * kv.z + s.z); y += r.w * (tf.w * kv.w + s.w); s.x = s.x * td.x + kv.x; s.y = s.y * td.y + kv.y; s.z = s.z * td.z + kv.z; s.w = s.w * td.w + kv.w; } dst[t] = y; } #pragma unroll for (int i = 0; i < head_size; i++) { dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; } } template static __global__ void rwkv_wkv7_f32(const int B, const int T, const int C, const int H, const float * r, const float * w, const float * k, const float * v, const float * a, const float * b, const float * s, float * dst) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int head_size = block_size; const int batch_i = bid / H; const int head_i = bid % H; const int state_size = C * head_size; const int n_seq_tokens = T / B; float state[head_size]; __shared__ float _r[head_size], _w[head_size], _k[head_size], _a[head_size], _b[head_size]; #ifndef GGML_USE_MUSA #pragma unroll #endif for (int i = 0; i < head_size; i++) { state[i] = s[batch_i * state_size + head_i * head_size * head_size + tid * head_size + i]; } for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { __syncthreads(); _r[tid] = r[t]; _w[tid] = w[t]; _k[tid] = k[t]; _a[tid] = a[t]; _b[tid] = b[t]; __syncthreads(); float sa = 0; #pragma unroll for (int j = 0; j < head_size; j += 4) { const float4& a = (float4&)(_a[j]); const float4& s = (float4&)(state[j]); sa += a.x * s.x; sa += a.y * s.y; sa += a.z * s.z; sa += a.w * s.w; } const float _v = v[t]; float y = 0; for (int j = 0; j < head_size; j += 4) { const float4& r = (float4&)(_r[j]); const float4& w = (float4&)(_w[j]); const float4& k = (float4&)(_k[j]); const float4& b = (float4&)(_b[j]); float4& s = (float4&)(state[j]); float4 kv; kv.x = k.x * _v; kv.y = k.y * _v; kv.z = k.z * _v; kv.w = k.w * _v; s.x = s.x * w.x + kv.x + sa * b.x; s.y = s.y * w.y + kv.y + sa * b.y; s.z = s.z * w.z + kv.z + sa * b.z; s.w = s.w * w.w + kv.w + sa * b.w; y += s.x * r.x; y += s.y * r.y; y += s.z * r.z; y += s.w * r.w; } dst[t] = y; } #pragma unroll for (int i = 0; i < head_size; i++) { dst[T * C + batch_i * state_size + head_i * head_size * head_size + tid * head_size + i] = state[i]; } } void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const float * k_d = (const float *)dst->src[0]->data; const float * v_d = (const float *)dst->src[1]->data; const float * r_d = (const float *)dst->src[2]->data; const float * tf_d = (const float *)dst->src[3]->data; const float * td_d = (const float *)dst->src[4]->data; const float * s_d = (const float *)dst->src[5]->data; const int64_t B = dst->src[5]->ne[1]; const int64_t T = dst->src[0]->ne[2]; const int64_t C = dst->ne[0]; const int64_t H = dst->src[0]->ne[1]; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE || C / H == CUDA_WKV_BLOCK_SIZE * 2); if (C / H == CUDA_WKV_BLOCK_SIZE) { rwkv_wkv_f32<<>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d); } else { rwkv_wkv_f32<<>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d); } } void ggml_cuda_op_rwkv_wkv7(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const float * r_d = (const float *)dst->src[0]->data; const float * w_d = (const float *)dst->src[1]->data; const float * k_d = (const float *)dst->src[2]->data; const float * v_d = (const float *)dst->src[3]->data; const float * a_d = (const float *)dst->src[4]->data; const float * b_d = (const float *)dst->src[5]->data; const float * s_d = (const float *)dst->src[6]->data; const int64_t B = dst->src[6]->ne[1]; const int64_t T = dst->src[0]->ne[2]; const int64_t C = dst->ne[0]; const int64_t H = dst->src[0]->ne[1]; float * dst_d = (float *)dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE || C / H == CUDA_WKV_BLOCK_SIZE * 2); if (C / H == CUDA_WKV_BLOCK_SIZE) { rwkv_wkv7_f32<<>>(B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d); } else { rwkv_wkv7_f32<<>>(B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d); } } ggml-org-ggml-3678254/src/ggml-cuda/wkv.cuh000066400000000000000000000003321512524704700203210ustar00rootroot00000000000000#include "common.cuh" #define CUDA_WKV_BLOCK_SIZE 64 void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rwkv_wkv7(ggml_backend_cuda_context & ctx, ggml_tensor * dst); ggml-org-ggml-3678254/src/ggml-hexagon/000077500000000000000000000000001512524704700175305ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-hexagon/CMakeLists.txt000066400000000000000000000062071512524704700222750ustar00rootroot00000000000000include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_fun.cmake) include(ExternalProject) option(GGML_HEXAGON_HTP_DEBUG "ggml-hexagon: enable HTP debug output" OFF) set(GGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE 128 CACHE STRING "ggml-hexagon: quantize group size (32, 64, or 128)") add_library(htp_iface OBJECT ${CMAKE_CURRENT_BINARY_DIR}/htp_iface_stub.c) set_target_properties(htp_iface PROPERTIES POSITION_INDEPENDENT_CODE ON) target_include_directories(htp_iface PUBLIC ${HEXAGON_SDK_ROOT}/incs ${HEXAGON_SDK_ROOT}/incs/stddef ${HEXAGON_SDK_ROOT}/utils/examples ${CMAKE_CURRENT_SOURCE_DIR}/htp ${CMAKE_CURRENT_BINARY_DIR}) build_idl(htp/htp_iface.idl htp_iface) if (CMAKE_SYSTEM_NAME MATCHES Android) target_link_options(htp_iface PUBLIC -llog -ldl) elseif (CMAKE_SYSTEM_NAME MATCHES Windows) target_precompile_headers(htp_iface PUBLIC ) else() target_link_options(htp_iface PUBLIC -ldl) endif() link_custom_library(htp_iface cdsprpc) link_custom_library(htp_iface rpcmem) set(TARGET_NAME ggml-hexagon) ggml_add_backend_library(${TARGET_NAME} ggml-hexagon.cpp htp-utils.c htp-utils.h ../../include/ggml-hexagon.h) target_link_libraries(${TARGET_NAME} PRIVATE htp_iface) target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/htp ${CMAKE_CURRENT_BINARY_DIR}) # Build HTP bits set(HTP_CMAKE_ARGS -DCMAKE_TOOLCHAIN_FILE=${CMAKE_CURRENT_SOURCE_DIR}/htp/cmake-toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_LIBDIR=${CMAKE_CURRENT_BINARY_DIR} -DHEXAGON_SDK_ROOT=$ENV{HEXAGON_SDK_ROOT} -DHEXAGON_TOOLS_ROOT=$ENV{HEXAGON_TOOLS_ROOT} -DHEXAGON_HTP_DEBUG=${GGML_HEXAGON_HTP_DEBUG} -DGGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE=${GGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE}) ExternalProject_Add(htp-v68 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v68 -DPREBUILT_LIB_DIR="toolv19_v68") ExternalProject_Add(htp-v69 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v69 -DPREBUILT_LIB_DIR="toolv19_v69") ExternalProject_Add(htp-v73 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v73 -DPREBUILT_LIB_DIR="toolv19_v73") ExternalProject_Add(htp-v75 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v75 -DPREBUILT_LIB_DIR="toolv19_v75") ExternalProject_Add(htp-v79 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v79 -DPREBUILT_LIB_DIR="toolv19_v79") ExternalProject_Add(htp-v81 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v81 -DPREBUILT_LIB_DIR="toolv19_v81") # Install Hexagon skels required at runtime install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v68.so ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v69.so ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v73.so ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v75.so ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v79.so ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v81.so TYPE LIB) ggml-org-ggml-3678254/src/ggml-hexagon/ggml-hexagon.cpp000066400000000000000000003261101512524704700226140ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #ifdef _WIN32 # include # ifndef _WINDOWS # define _WINDOWS # endif #else # include # include #endif #pragma clang diagnostic ignored "-Wnested-anon-types" #pragma clang diagnostic ignored "-Wgnu-anonymous-struct" #include "htp-utils.h" #include #include #include #define GGML_COMMON_IMPL_CPP #include "ggml-backend-impl.h" #include "ggml-common.h" #include "ggml-hexagon.h" #include "ggml-impl.h" #include "ggml-quants.h" #include "op-desc.h" #include "htp-msg.h" #include "htp_iface.h" static size_t opt_ndev = 1; static size_t opt_nhvx = 0; // use all static int opt_arch = 0; // autodetect static int opt_etm = 0; static int opt_verbose = 0; static int opt_profile = 0; static int opt_hostbuf = 1; static int opt_experimental = 0; // Enable all stages by default static int opt_opmask = HTP_OPMASK_QUEUE | HTP_OPMASK_QUANTIZE | HTP_OPMASK_COMPUTE; static int opt_opsync = 0; // synchronous ops #define HEX_VERBOSE(...) \ if (opt_verbose) GGML_LOG_DEBUG(__VA_ARGS__) static inline uint64_t hex_is_aligned(void * addr, uint32_t align) { return ((size_t) addr & (align - 1)) == 0; } static inline size_t hex_round_up(size_t n, size_t m) { return m * ((n + m - 1) / m); } static const char * status_to_str(uint32_t status) { switch (status) { case HTP_STATUS_OK: return "OK"; case HTP_STATUS_NO_SUPPORT: return "NO-SUPPORT"; case HTP_STATUS_INVAL_PARAMS: return "INVAL-PARAMS"; case HTP_STATUS_VTCM_TOO_SMALL: return "VTCM-TOO-SMALL"; case HTP_STATUS_INTERNAL_ERR: return "INTERNAL-ERROR"; default: return "UNKNOWN"; } } // ** debug helpers static void ggml_hexagon_dump_op_exec(const std::string &sess_name, const ggml_tensor * op, const uint32_t req_flags) { if (!opt_verbose) return; op_desc desc(op); GGML_LOG_DEBUG("ggml-hex: %s execute-op %s: %s : %s : %s : %s : %s : flags 0x%x\n", sess_name.c_str(), ggml_op_name(op->op), desc.names, desc.dims, desc.types, desc.strides, desc.buffs, req_flags); } static void ggml_hexagon_dump_op_supp(const std::string &sess_name, const struct ggml_tensor * op, bool supp) { if (!opt_verbose) return; op_desc desc(op); GGML_LOG_DEBUG("ggml-hex: %s supports-op %s : %s : %s : %s : %s : %s : %s\n", sess_name.c_str(), ggml_op_name(op->op), desc.names, desc.dims, desc.types, desc.strides, desc.buffs, supp ? "yes" : "no"); } static void ggml_hexagon_dump_op_prof(const std::string &sess_name, const ggml_tensor * op, uint32_t op_usec, uint32_t op_cycles, uint32_t op_pkts, uint64_t call_usec) { if (!opt_profile) return; op_desc desc(op); GGML_LOG_DEBUG("ggml-hex: %s profile-op %s: %s : %s : %s : %s : %s : op-usec %u op-cycles %u op-pkts %u (%f) call-usec %llu\n", sess_name.c_str(), ggml_op_name(op->op), desc.names, desc.dims, desc.types, desc.strides, desc.buffs, op_usec, op_cycles, op_pkts, (float) op_cycles / op_pkts, (unsigned long long) call_usec); } // ** backend sessions struct ggml_hexagon_session { ggml_hexagon_session(int dev_id, ggml_backend_dev_t dev) noexcept(false); ~ggml_hexagon_session() noexcept(true); void allocate(int dev_id) noexcept(false); void release() noexcept(true); void enqueue(struct htp_general_req &req, struct dspqueue_buffer *bufs, uint32_t n_bufs, bool sync = false); void flush(); ggml_backend_buffer_type buffer_type = {}; ggml_backend_buffer_type repack_buffer_type = {}; std::string name; remote_handle64 handle; dspqueue_t queue; uint32_t session_id; uint32_t domain_id; uint64_t queue_id; int dev_id; bool valid_session; bool valid_handle; bool valid_queue; bool valid_iface; std::atomic op_pending; uint32_t prof_usecs; uint32_t prof_cycles; uint32_t prof_pkts; }; void ggml_hexagon_session::enqueue(struct htp_general_req &req, struct dspqueue_buffer *bufs, uint32_t n_bufs, bool sync) { // Bump pending flag (cleared in the session::flush once we get the responce) this->op_pending++; // atomic inc int err = dspqueue_write(this->queue, 0, // flags - the framework will autoset this n_bufs, // number of buffers bufs, // buffer references sizeof(req), (const uint8_t *) &req, // Message 1000000 // Timeout ); if (err != 0) { GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", this->name.c_str(), (unsigned) err); } if (sync) { flush(); } } // Flush HTP response queue i.e wait for all outstanding requests to complete void ggml_hexagon_session::flush() { dspqueue_t q = this->queue; // Repeatedly read packets from the queue until it's empty. We don't // necessarily get a separate callback for each packet, and new packets // may arrive while we're processing the previous one. while (this->op_pending) { struct htp_general_rsp rsp; uint32_t rsp_size; uint32_t flags; struct dspqueue_buffer bufs[HTP_MAX_PACKET_BUFFERS]; uint32_t n_bufs; // Read response packet from queue int err = dspqueue_read(q, &flags, HTP_MAX_PACKET_BUFFERS, // Maximum number of buffer references &n_bufs, // Number of buffer references bufs, // Buffer references sizeof(rsp), // Max message length &rsp_size, // Message length (uint8_t *) &rsp, 1000000); // Timeout if (err == AEE_EEXPIRED) { // TODO: might need to bail out if the HTP is stuck on something continue; } if (err != 0) { GGML_ABORT("ggml-hex: dspqueue_read failed: 0x%08x\n", (unsigned) err); } // Basic sanity checks if (rsp_size != sizeof(rsp)) { GGML_ABORT("ggml-hex: dspcall : bad response (size)\n"); } if (rsp.status != HTP_STATUS_OK) { GGML_LOG_ERROR("ggml-hex: dspcall : dsp-rsp: %s\n", status_to_str(rsp.status)); // TODO: handle errors } // TODO: update profiling implementation, currently only works for opt_opsync mode this->prof_usecs = rsp.prof_usecs; this->prof_cycles = rsp.prof_cycles; this->prof_pkts = rsp.prof_pkts; this->op_pending--; // atomic dec } } // ** backend buffers struct ggml_backend_hexagon_buffer_type_context { ggml_backend_hexagon_buffer_type_context(const std::string & name, ggml_hexagon_session * sess) { this->sess = sess; this->name = name; } ggml_hexagon_session * sess; std::string name; }; struct ggml_backend_hexagon_buffer_context { bool mmap_to(ggml_hexagon_session * s) { HEX_VERBOSE("ggml-hex: %s mmaping buffer: base %p domain-id %d session-id %d size %zu fd %d repack %d\n", s->name.c_str(), (void *) this->base, s->domain_id, s->session_id, this->size, this->fd, (int) this->repack); int err = fastrpc_mmap(s->domain_id, this->fd, (void *) this->base, 0, this->size, FASTRPC_MAP_FD); if (err != 0) { GGML_LOG_ERROR("ggml-hex: buffer mapping failed : domain_id %d size %zu fd %d error 0x%08x\n", s->domain_id, this->size, this->fd, (unsigned) err); return false; } return true; } bool mmap() { if (this->mapped) { return true; } if (!mmap_to(this->sess)) { return false; } this->mapped = true; return true; } void munmap() { if (!this->mapped) { return; } fastrpc_munmap(this->sess->domain_id, this->fd, this->base, this->size); this->mapped = false; } ggml_backend_hexagon_buffer_context(ggml_hexagon_session * sess, size_t size, bool repack) { size += 4 * 1024; // extra page for padding if (rpcmem_alloc2) { this->base = (uint8_t *) rpcmem_alloc2(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS | RPCMEM_HEAP_NOREG, size); } else { GGML_LOG_INFO("ggml-hex: %s rpcmem_alloc2 not found, falling back to rpcmem_alloc\n", sess->name.c_str()); this->base = (uint8_t *) rpcmem_alloc(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS | RPCMEM_HEAP_NOREG, size); } if (!this->base) { GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer : size %zu\n", sess->name.c_str(), size); throw std::runtime_error("ggml-hex: rpcmem_alloc failed (see log for details)"); } this->fd = rpcmem_to_fd(this->base); if (this->fd < 0) { GGML_LOG_ERROR("ggml-hex: %s failed to get FD for buffer %p\n", sess->name.c_str(), (void *) this->base); rpcmem_free(this->base); this->base = NULL; throw std::runtime_error("ggml-hex: rpcmem_to_fd failed (see log for details)"); } HEX_VERBOSE("ggml-hex: %s allocated buffer: base %p size %zu fd %d repack %d\n", sess->name.c_str(), (void *) this->base, size, this->fd, (int) repack); this->sess = sess; this->size = size; this->mapped = false; this->repack = repack; } ~ggml_backend_hexagon_buffer_context() { munmap(); if (this->base) { rpcmem_free(this->base); this->base = NULL; } } ggml_hexagon_session * sess; // primary session uint8_t * base; size_t size; int fd; bool mapped; // mmap is done bool repack; // repacked buffer }; static ggml_hexagon_session * ggml_backend_hexagon_buffer_get_sess(ggml_backend_buffer_t buffer) { return static_cast(buffer->buft->context)->sess; } static void ggml_backend_hexagon_buffer_free_buffer(ggml_backend_buffer_t buffer) { auto ctx = static_cast(buffer->context); delete ctx; } static void * ggml_backend_hexagon_buffer_get_base(ggml_backend_buffer_t buffer) { auto ctx = static_cast(buffer->context); return ctx->base; } static enum ggml_status ggml_backend_hexagon_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { auto ctx = static_cast(buffer->context); auto sess = ctx->sess; HEX_VERBOSE("ggml-hex: %s init-tensor %s : base %p data %p nbytes %zu usage %d repack %d\n", sess->name.c_str(), tensor->name, (void *) ctx->base, tensor->data, ggml_nbytes(tensor), (int) buffer->usage, (int) ctx->repack); if (tensor->view_src != NULL && tensor->view_offs == 0) { ; // nothing to do for the view } else { if (!ctx->mapped) { ctx->mmap(); } } return GGML_STATUS_SUCCESS; } // ======== Q4x4x2 ==================== struct x2_q4 { int v[2]; }; static x2_q4 unpack_q4(uint8_t v) { x2_q4 x = { (int) (v & 0x0f) - 8, (int) (v >> 4) - 8 }; return x; } static void dump_block_q4_0(const block_q4_0 * b, int i) { HEX_VERBOSE("ggml-hex: repack q4_0 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, unpack_q4(b->qs[0]).v[0], unpack_q4(b->qs[1]).v[0], unpack_q4(b->qs[2]).v[0], unpack_q4(b->qs[3]).v[0], unpack_q4(b->qs[12]).v[1], unpack_q4(b->qs[13]).v[1], unpack_q4(b->qs[14]).v[1], unpack_q4(b->qs[15]).v[1], GGML_FP16_TO_FP32(b->d)); } static void dump_packed_block_q4x4x2(const uint8_t * v, unsigned int i, size_t k) { static const int qk = QK_Q4_0x4x2; const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded) const uint8_t * v_q = v + 0; // quants first const uint8_t * v_d = v + qrow_size; // then scales const uint8_t * q = v_q + i * qblk_size; const ggml_half * d = (const ggml_half *) (v_d + i * dblk_size); HEX_VERBOSE("ggml-hex: repack q4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, unpack_q4(q[0]).v[0], unpack_q4(q[1]).v[0], unpack_q4(q[2]).v[0], unpack_q4(q[3]).v[0], unpack_q4(q[60]).v[0], unpack_q4(q[61]).v[0], unpack_q4(q[62]).v[0], unpack_q4(q[63]).v[0], unpack_q4(q[124]).v[0], unpack_q4(q[125]).v[0], unpack_q4(q[126]).v[0], unpack_q4(q[127]).v[0], GGML_FP16_TO_FP32(d[0]), GGML_FP16_TO_FP32(d[1]), GGML_FP16_TO_FP32(d[2]), GGML_FP16_TO_FP32(d[3])); HEX_VERBOSE("ggml-hex: repack q4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i + 1, unpack_q4(q[0]).v[1], unpack_q4(q[1]).v[1], unpack_q4(q[2]).v[1], unpack_q4(q[3]).v[1], unpack_q4(q[60]).v[1], unpack_q4(q[61]).v[1], unpack_q4(q[62]).v[1], unpack_q4(q[63]).v[1], unpack_q4(q[124]).v[1], unpack_q4(q[125]).v[1], unpack_q4(q[126]).v[1], unpack_q4(q[127]).v[1], GGML_FP16_TO_FP32(d[4]), GGML_FP16_TO_FP32(d[5]), GGML_FP16_TO_FP32(d[6]), GGML_FP16_TO_FP32(d[7])); } static void unpack_q4_0_quants(uint8_t * qs, const block_q4_0 * x, unsigned int bi) { static const int qk = QK4_0; for (unsigned int i = 0; i < qk / 2; ++i) { const int x0 = (x->qs[i] & 0x0F); const int x1 = (x->qs[i] >> 4); qs[bi * qk + i + 0] = x0; qs[bi * qk + i + qk / 2] = x1; } } static void pack_q4_0_quants(block_q4_0 * x, const uint8_t * qs, unsigned int bi) { static const int qk = QK4_0; for (unsigned int i = 0; i < qk / 2; ++i) { const uint8_t x0 = qs[bi * qk + i + 0]; const uint8_t x1 = qs[bi * qk + i + qk / 2]; x->qs[i] = x0 | (x1 << 4); } } static void repack_row_q4x4x2(uint8_t * y, const block_q4_0 * x, int64_t k) { static const int qk = QK_Q4_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded to blocks) uint8_t * y_q = y + 0; // quants first uint8_t * y_d = y + qrow_size; // then scales if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_q4_0(&x[i * 8 + 0], 0); dump_block_q4_0(&x[i * 8 + 1], 1); dump_block_q4_0(&x[i * 8 + 2], 2); dump_block_q4_0(&x[i * 8 + 3], 3); dump_block_q4_0(&x[i * 8 + 4], 4); dump_block_q4_0(&x[i * 8 + 5], 5); dump_block_q4_0(&x[i * 8 + 6], 6); dump_block_q4_0(&x[i * 8 + 7], 7); } } // Repack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_Q4_0x4x2]; // unpacked quants unpack_q4_0_quants(qs, &x[i * 8 + 0], 0); unpack_q4_0_quants(qs, &x[i * 8 + 1], 1); unpack_q4_0_quants(qs, &x[i * 8 + 2], 2); unpack_q4_0_quants(qs, &x[i * 8 + 3], 3); unpack_q4_0_quants(qs, &x[i * 8 + 4], 4); unpack_q4_0_quants(qs, &x[i * 8 + 5], 5); unpack_q4_0_quants(qs, &x[i * 8 + 6], 6); unpack_q4_0_quants(qs, &x[i * 8 + 7], 7); uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk / 2; j++) { q[j] = (qs[j + 128] << 4) | qs[j]; } } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Repack the scales ggml_half * d = (ggml_half *) (y_d + i * dblk_size); d[0] = x[i * 8 + 0].d; d[1] = x[i * 8 + 1].d; d[2] = x[i * 8 + 2].d; d[3] = x[i * 8 + 3].d; d[4] = x[i * 8 + 4].d; d[5] = x[i * 8 + 5].d; d[6] = x[i * 8 + 6].d; d[7] = x[i * 8 + 7].d; } if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_q4x4x2(y, i, k); } } } static void unpack_row_q4x4x2(block_q4_0 * x, const uint8_t * y, int64_t k) { static const int qk = QK_Q4_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded to blocks) const uint8_t * y_q = y + 0; // quants first const uint8_t * y_d = y + qrow_size; // then scales if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_q4x4x2(y, i, k); } } // Unpack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_Q4_0x4x2]; // unpacked quants const uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk / 2; j++) { qs[j] = q[j] & 0xf; qs[j + 128] = q[j] >> 4; } pack_q4_0_quants(&x[i * 8 + 0], qs, 0); pack_q4_0_quants(&x[i * 8 + 1], qs, 1); pack_q4_0_quants(&x[i * 8 + 2], qs, 2); pack_q4_0_quants(&x[i * 8 + 3], qs, 3); pack_q4_0_quants(&x[i * 8 + 4], qs, 4); pack_q4_0_quants(&x[i * 8 + 5], qs, 5); pack_q4_0_quants(&x[i * 8 + 6], qs, 6); pack_q4_0_quants(&x[i * 8 + 7], qs, 7); } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales const ggml_half * d = (const ggml_half *) (y_d + i * dblk_size); x[i * 8 + 0].d = d[0]; x[i * 8 + 1].d = d[1]; x[i * 8 + 2].d = d[2]; x[i * 8 + 3].d = d[3]; x[i * 8 + 4].d = d[4]; x[i * 8 + 5].d = d[5]; x[i * 8 + 6].d = d[6]; x[i * 8 + 7].d = d[7]; } if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_q4_0(&x[i * 8 + 0], 0); dump_block_q4_0(&x[i * 8 + 1], 1); dump_block_q4_0(&x[i * 8 + 2], 2); dump_block_q4_0(&x[i * 8 + 3], 3); dump_block_q4_0(&x[i * 8 + 4], 4); dump_block_q4_0(&x[i * 8 + 5], 5); dump_block_q4_0(&x[i * 8 + 6], 6); dump_block_q4_0(&x[i * 8 + 7], 7); } } } static void init_row_q4x4x2(block_q4_0 * x, int64_t k) { static const int qk = QK_Q4_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) // Init the quants such that they unpack into zeros uint8_t qs[QK_Q4_0x4x2]; // unpacked quants memset(qs, 8, sizeof(qs)); for (int i = 0; i < nb; i++) { pack_q4_0_quants(&x[i * 8 + 0], qs, 0); pack_q4_0_quants(&x[i * 8 + 1], qs, 1); pack_q4_0_quants(&x[i * 8 + 2], qs, 2); pack_q4_0_quants(&x[i * 8 + 3], qs, 3); pack_q4_0_quants(&x[i * 8 + 4], qs, 4); pack_q4_0_quants(&x[i * 8 + 5], qs, 5); pack_q4_0_quants(&x[i * 8 + 6], qs, 6); pack_q4_0_quants(&x[i * 8 + 7], qs, 7); } // Init the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales x[i * 8 + 0].d = 0; x[i * 8 + 1].d = 0; x[i * 8 + 2].d = 0; x[i * 8 + 3].d = 0; x[i * 8 + 4].d = 0; x[i * 8 + 5].d = 0; x[i * 8 + 6].d = 0; x[i * 8 + 7].d = 0; } } // repack q4_0 data into q4x4x2 tensor static void repack_q4_0_q4x4x2(ggml_tensor * t, const void * data, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_0x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to read more data than is available in the source buffer 'data' // or write more than the tensor can hold. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-q4_0-q4x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); init_row_q4x4x2((block_q4_0 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); memcpy(buf_pd, src, row_size); repack_row_q4x4x2((uint8_t *) buf_rp, (const block_q4_0 *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); // re-init the row because we are potentially copying a partial row init_row_q4x4x2((block_q4_0 *) buf_pd, t->ne[0]); // Copy only the remaining bytes from the source. memcpy(buf_pd, src, n_rem_bytes); // Repack the entire buffer repack_row_q4x4x2((uint8_t *) buf_rp, (const block_q4_0 *) buf_pd, t->ne[0]); // Write only the corresponding remaining bytes to the destination tensor. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } // repack q4x4x2 tensor into q4_0 data static void repack_q4x4x2_q4_0(void * data, const ggml_tensor * t, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_0x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to copy more data than the tensor actually contains. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-q4x4x2-q4_0 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); memcpy(buf_pd, src, row_size); unpack_row_q4x4x2((block_q4_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); // We still need to read and unpack the entire source row because quantization is block-based. memcpy(buf_pd, src, row_size); unpack_row_q4x4x2((block_q4_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); // But we only copy the remaining number of bytes to the destination. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } // ======== Q8x4x2 ==================== static void dump_block_q8_0(const block_q8_0 * b, int i) { HEX_VERBOSE("ggml-hex: repack q8_0 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, b->qs[0], b->qs[1], b->qs[2], b->qs[3], b->qs[28], b->qs[29], b->qs[30], b->qs[31], GGML_FP16_TO_FP32(b->d)); } static void dump_packed_block_q8x4x2(const uint8_t * v, unsigned int i, size_t k) { static const int qk = QK_Q8_0x4x2; const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk; // int8 const int qrow_size = k; // int8 (not padded) const uint8_t * v_q = v + 0; // quants first const uint8_t * v_d = v + qrow_size; // then scales const uint8_t * q = v_q + i * qblk_size; const ggml_half * d = (const ggml_half *) (v_d + i * dblk_size); HEX_VERBOSE("ggml-hex: repack q8x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, q[0], q[1], q[2], q[3], q[60], q[61], q[62], q[63], q[124], q[125], q[126], q[127], GGML_FP16_TO_FP32(d[0]), GGML_FP16_TO_FP32(d[1]), GGML_FP16_TO_FP32(d[2]), GGML_FP16_TO_FP32(d[3])); HEX_VERBOSE("ggml-hex: repack q8x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i + 1, q[128], q[129], q[130], q[131], q[192], q[193], q[194], q[195], q[252], q[253], q[254], q[255], GGML_FP16_TO_FP32(d[4]), GGML_FP16_TO_FP32(d[5]), GGML_FP16_TO_FP32(d[6]), GGML_FP16_TO_FP32(d[7])); } static void unpack_q8_0_quants(uint8_t * qs, const block_q8_0 * x, unsigned int bi) { static const int qk = QK8_0; for (unsigned int i = 0; i < qk; ++i) { qs[bi * qk + i] = x->qs[i]; } } static void pack_q8_0_quants(block_q8_0 * x, const uint8_t * qs, unsigned int bi) { static const int qk = QK8_0; for (unsigned int i = 0; i < qk; ++i) { x->qs[i] = qs[bi * qk + i]; } } static void repack_row_q8x4x2(uint8_t * y, const block_q8_0 * x, int64_t k) { static const int qk = QK_Q8_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk; // int8 const int qrow_size = k; // int8 (not padded to blocks) uint8_t * y_q = y + 0; // quants first uint8_t * y_d = y + qrow_size; // then scales if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_q8_0(&x[i * 8 + 0], 0); dump_block_q8_0(&x[i * 8 + 1], 1); dump_block_q8_0(&x[i * 8 + 2], 2); dump_block_q8_0(&x[i * 8 + 3], 3); dump_block_q8_0(&x[i * 8 + 4], 4); dump_block_q8_0(&x[i * 8 + 5], 5); dump_block_q8_0(&x[i * 8 + 6], 6); dump_block_q8_0(&x[i * 8 + 7], 7); } } // Repack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_Q8_0x4x2]; // unpacked quants unpack_q8_0_quants(qs, &x[i * 8 + 0], 0); unpack_q8_0_quants(qs, &x[i * 8 + 1], 1); unpack_q8_0_quants(qs, &x[i * 8 + 2], 2); unpack_q8_0_quants(qs, &x[i * 8 + 3], 3); unpack_q8_0_quants(qs, &x[i * 8 + 4], 4); unpack_q8_0_quants(qs, &x[i * 8 + 5], 5); unpack_q8_0_quants(qs, &x[i * 8 + 6], 6); unpack_q8_0_quants(qs, &x[i * 8 + 7], 7); uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk; j++) { q[j] = qs[j]; } } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Repack the scales ggml_half * d = (ggml_half *) (y_d + i * dblk_size); d[0] = x[i * 8 + 0].d; d[1] = x[i * 8 + 1].d; d[2] = x[i * 8 + 2].d; d[3] = x[i * 8 + 3].d; d[4] = x[i * 8 + 4].d; d[5] = x[i * 8 + 5].d; d[6] = x[i * 8 + 6].d; d[7] = x[i * 8 + 7].d; } if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_q8x4x2(y, i, k); } } } static void unpack_row_q8x4x2(block_q8_0 * x, const uint8_t * y, int64_t k) { static const int qk = QK_Q8_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int dblk_size = 8 * 2; // 8x __fp16 const int qblk_size = qk; // int8 const int qrow_size = k; // int8 (not padded to blocks) const uint8_t * y_q = y + 0; // quants first const uint8_t * y_d = y + qrow_size; // then scales if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_q8x4x2(y, i, k); } } // Unpack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_Q4_0x4x2]; // unpacked quants const uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk; j++) { qs[j] = q[j]; } pack_q8_0_quants(&x[i * 8 + 0], qs, 0); pack_q8_0_quants(&x[i * 8 + 1], qs, 1); pack_q8_0_quants(&x[i * 8 + 2], qs, 2); pack_q8_0_quants(&x[i * 8 + 3], qs, 3); pack_q8_0_quants(&x[i * 8 + 4], qs, 4); pack_q8_0_quants(&x[i * 8 + 5], qs, 5); pack_q8_0_quants(&x[i * 8 + 6], qs, 6); pack_q8_0_quants(&x[i * 8 + 7], qs, 7); } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales const ggml_half * d = (const ggml_half *) (y_d + i * dblk_size); x[i * 8 + 0].d = d[0]; x[i * 8 + 1].d = d[1]; x[i * 8 + 2].d = d[2]; x[i * 8 + 3].d = d[3]; x[i * 8 + 4].d = d[4]; x[i * 8 + 5].d = d[5]; x[i * 8 + 6].d = d[6]; x[i * 8 + 7].d = d[7]; } if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_q8_0(&x[i * 8 + 0], 0); dump_block_q8_0(&x[i * 8 + 1], 1); dump_block_q8_0(&x[i * 8 + 2], 2); dump_block_q8_0(&x[i * 8 + 3], 3); dump_block_q8_0(&x[i * 8 + 4], 4); dump_block_q8_0(&x[i * 8 + 5], 5); dump_block_q8_0(&x[i * 8 + 6], 6); dump_block_q8_0(&x[i * 8 + 7], 7); } } } static void init_row_q8x4x2(block_q8_0 * x, int64_t k) { static const int qk = QK_Q8_0x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) // Init the quants such that they unpack into zeros uint8_t qs[QK_Q8_0x4x2]; // unpacked quants memset(qs, 0, sizeof(qs)); for (int i = 0; i < nb; i++) { pack_q8_0_quants(&x[i * 8 + 0], qs, 0); pack_q8_0_quants(&x[i * 8 + 1], qs, 1); pack_q8_0_quants(&x[i * 8 + 2], qs, 2); pack_q8_0_quants(&x[i * 8 + 3], qs, 3); pack_q8_0_quants(&x[i * 8 + 4], qs, 4); pack_q8_0_quants(&x[i * 8 + 5], qs, 5); pack_q8_0_quants(&x[i * 8 + 6], qs, 6); pack_q8_0_quants(&x[i * 8 + 7], qs, 7); } // Init the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q8_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales x[i * 8 + 0].d = 0; x[i * 8 + 1].d = 0; x[i * 8 + 2].d = 0; x[i * 8 + 3].d = 0; x[i * 8 + 4].d = 0; x[i * 8 + 5].d = 0; x[i * 8 + 6].d = 0; x[i * 8 + 7].d = 0; } } // repack q8_0 data into q8x4x2 tensor static void repack_q8_0_q8x4x2(ggml_tensor * t, const void * data, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q8_0x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to read more data than is available in the source buffer 'data' // or write more than the tensor can hold. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-q8_0-q8x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); init_row_q8x4x2((block_q8_0 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); memcpy(buf_pd, src, row_size); repack_row_q8x4x2((uint8_t *) buf_rp, (const block_q8_0 *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); // re-init the row because we are potentially copying a partial row init_row_q8x4x2((block_q8_0 *) buf_pd, t->ne[0]); // Copy only the remaining bytes from the source. memcpy(buf_pd, src, n_rem_bytes); // Repack the entire buffer repack_row_q8x4x2((uint8_t *) buf_rp, (const block_q8_0 *) buf_pd, t->ne[0]); // Write only the corresponding remaining bytes to the destination tensor. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } // repack q8x4x2 tensor into q8_0 data static void repack_q8x4x2_q8_0(void * data, const ggml_tensor * t, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q8_0x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to copy more data than the tensor actually contains. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-q8x4x2-q8_0 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); memcpy(buf_pd, src, row_size); unpack_row_q8x4x2((block_q8_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); // We still need to read and unpack the entire source row because quantization is block-based. memcpy(buf_pd, src, row_size); unpack_row_q8x4x2((block_q8_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); // But we only copy the remaining number of bytes to the destination. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } // ======== MXFP4x4x2 ==================== struct x2_mxfp4 { int v[2]; }; static x2_mxfp4 unpack_mxfp4(uint8_t v) { x2_mxfp4 x; x.v[0] = kvalues_mxfp4[(v & 0x0f)]; x.v[1] = kvalues_mxfp4[(v >> 4)]; return x; } static void dump_block_mxfp4(const block_mxfp4 * b, int i) { HEX_VERBOSE("ggml-hex: repack mxfp4 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, unpack_mxfp4(b->qs[0]).v[0], unpack_mxfp4(b->qs[1]).v[0], unpack_mxfp4(b->qs[2]).v[0], unpack_mxfp4(b->qs[3]).v[0], unpack_mxfp4(b->qs[12]).v[1], unpack_mxfp4(b->qs[13]).v[1], unpack_mxfp4(b->qs[14]).v[1], unpack_mxfp4(b->qs[15]).v[1], GGML_E8M0_TO_FP32_HALF(b->e)); } static void dump_packed_block_mxfp4x4x2(const uint8_t * v, unsigned int i, size_t k) { static const int qk = QK_MXFP4x4x2; const int eblk_size = 8 * 1; // 8x E8M0 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded) const uint8_t * v_q = v + 0; // quants first const uint8_t * v_e = v + qrow_size; // then scales const uint8_t * q = v_q + i * qblk_size; const uint8_t * e = (const uint8_t *) (v_e + i * eblk_size); HEX_VERBOSE("ggml-hex: repack mxfp4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, unpack_mxfp4(q[0]).v[0], unpack_mxfp4(q[1]).v[0], unpack_mxfp4(q[2]).v[0], unpack_mxfp4(q[3]).v[0], unpack_mxfp4(q[60]).v[0], unpack_mxfp4(q[61]).v[0], unpack_mxfp4(q[62]).v[0], unpack_mxfp4(q[63]).v[0], unpack_mxfp4(q[124]).v[0], unpack_mxfp4(q[125]).v[0], unpack_mxfp4(q[126]).v[0], unpack_mxfp4(q[127]).v[0], GGML_E8M0_TO_FP32_HALF(e[0]), GGML_E8M0_TO_FP32_HALF(e[1]), GGML_E8M0_TO_FP32_HALF(e[2]), GGML_E8M0_TO_FP32_HALF(e[3])); HEX_VERBOSE("ggml-hex: repack mxfp4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i + 1, unpack_mxfp4(q[0]).v[1], unpack_mxfp4(q[1]).v[1], unpack_mxfp4(q[2]).v[1], unpack_mxfp4(q[3]).v[1], unpack_mxfp4(q[60]).v[1], unpack_mxfp4(q[61]).v[1], unpack_mxfp4(q[62]).v[1], unpack_mxfp4(q[63]).v[1], unpack_mxfp4(q[124]).v[1], unpack_mxfp4(q[125]).v[1], unpack_mxfp4(q[126]).v[1], unpack_mxfp4(q[127]).v[1], GGML_E8M0_TO_FP32_HALF(e[4]), GGML_E8M0_TO_FP32_HALF(e[5]), GGML_E8M0_TO_FP32_HALF(e[6]), GGML_E8M0_TO_FP32_HALF(e[7])); } static void unpack_mxfp4_quants(uint8_t * qs, const block_mxfp4 * x, unsigned int bi) { static const int qk = QK_MXFP4; for (unsigned int i = 0; i < qk / 2; ++i) { const uint8_t x0 = (x->qs[i] & 0x0F); const uint8_t x1 = (x->qs[i] >> 4); qs[bi * qk + i + 0] = x0; qs[bi * qk + i + qk / 2] = x1; } } static void pack_mxfp4_quants(block_mxfp4 * x, const uint8_t * qs, unsigned int bi) { static const int qk = QK4_0; for (unsigned int i = 0; i < qk / 2; ++i) { const uint8_t x0 = qs[bi * qk + i + 0]; const uint8_t x1 = qs[bi * qk + i + qk / 2]; x->qs[i] = x0 | (x1 << 4); } } static void repack_row_mxfp4x4x2(uint8_t * y, const block_mxfp4 * x, int64_t k) { static const int qk = QK_MXFP4x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int eblk_size = 8 * 1; // 8x E8M0 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded to blocks) uint8_t * y_q = y + 0; // quants first uint8_t * y_e = y + qrow_size; // then scales if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_mxfp4(&x[i * 8 + 0], 0); dump_block_mxfp4(&x[i * 8 + 1], 1); dump_block_mxfp4(&x[i * 8 + 2], 2); dump_block_mxfp4(&x[i * 8 + 3], 3); dump_block_mxfp4(&x[i * 8 + 4], 4); dump_block_mxfp4(&x[i * 8 + 5], 5); dump_block_mxfp4(&x[i * 8 + 6], 6); dump_block_mxfp4(&x[i * 8 + 7], 7); } } // Repack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_MXFP4x4x2]; // unpacked quants unpack_mxfp4_quants(qs, &x[i * 8 + 0], 0); unpack_mxfp4_quants(qs, &x[i * 8 + 1], 1); unpack_mxfp4_quants(qs, &x[i * 8 + 2], 2); unpack_mxfp4_quants(qs, &x[i * 8 + 3], 3); unpack_mxfp4_quants(qs, &x[i * 8 + 4], 4); unpack_mxfp4_quants(qs, &x[i * 8 + 5], 5); unpack_mxfp4_quants(qs, &x[i * 8 + 6], 6); unpack_mxfp4_quants(qs, &x[i * 8 + 7], 7); uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk / 2; j++) { q[j] = (qs[j + 128] << 4) | qs[j]; } } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Repack the scales uint8_t * e = (uint8_t *) (y_e + i * eblk_size); e[0] = x[i * 8 + 0].e; e[1] = x[i * 8 + 1].e; e[2] = x[i * 8 + 2].e; e[3] = x[i * 8 + 3].e; e[4] = x[i * 8 + 4].e; e[5] = x[i * 8 + 5].e; e[6] = x[i * 8 + 6].e; e[7] = x[i * 8 + 7].e; } if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_mxfp4x4x2(y, i, k); } } } static void unpack_row_mxfp4x4x2(block_mxfp4 * x, const uint8_t * y, int64_t k) { static const int qk = QK_MXFP4x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) const int eblk_size = 8 * 1; // 8x E8M0 const int qblk_size = qk / 2; // int4 const int qrow_size = k / 2; // int4 (not padded to blocks) const uint8_t * y_q = y + 0; // quants first const uint8_t * y_e = y + qrow_size; // then scales if (opt_verbose > 1) { for (int i = 0; i < nb; i++) { dump_packed_block_mxfp4x4x2(y, i, k); } } // Unpack the quants for (int i = 0; i < nb; i++) { uint8_t qs[QK_MXFP4x4x2]; // unpacked quants const uint8_t * q = y_q + (i * qblk_size); for (int j = 0; j < qk / 2; j++) { qs[j] = q[j] & 0xf; qs[j + 128] = q[j] >> 4; } pack_mxfp4_quants(&x[i * 8 + 0], qs, 0); pack_mxfp4_quants(&x[i * 8 + 1], qs, 1); pack_mxfp4_quants(&x[i * 8 + 2], qs, 2); pack_mxfp4_quants(&x[i * 8 + 3], qs, 3); pack_mxfp4_quants(&x[i * 8 + 4], qs, 4); pack_mxfp4_quants(&x[i * 8 + 5], qs, 5); pack_mxfp4_quants(&x[i * 8 + 6], qs, 6); pack_mxfp4_quants(&x[i * 8 + 7], qs, 7); } // Repack the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4_0x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales const uint8_t * e = (const uint8_t *) (y_e + i * eblk_size); x[i * 8 + 0].e = e[0]; x[i * 8 + 1].e = e[1]; x[i * 8 + 2].e = e[2]; x[i * 8 + 3].e = e[3]; x[i * 8 + 4].e = e[4]; x[i * 8 + 5].e = e[5]; x[i * 8 + 6].e = e[6]; x[i * 8 + 7].e = e[7]; } if (opt_verbose > 2) { for (int i = 0; i < nb; i++) { dump_block_mxfp4(&x[i * 8 + 0], 0); dump_block_mxfp4(&x[i * 8 + 1], 1); dump_block_mxfp4(&x[i * 8 + 2], 2); dump_block_mxfp4(&x[i * 8 + 3], 3); dump_block_mxfp4(&x[i * 8 + 4], 4); dump_block_mxfp4(&x[i * 8 + 5], 5); dump_block_mxfp4(&x[i * 8 + 6], 6); dump_block_mxfp4(&x[i * 8 + 7], 7); } } } static void init_row_mxfp4x4x2(block_mxfp4 * x, int64_t k) { static const int qk = QK_MXFP4x4x2; const int nb = (k + qk - 1) / qk; // number of blocks (padded) // Init the quants such that they unpack into zeros uint8_t qs[QK_MXFP4x4x2]; // unpacked quants memset(qs, 0, sizeof(qs)); for (int i = 0; i < nb; i++) { pack_mxfp4_quants(&x[i * 8 + 0], qs, 0); pack_mxfp4_quants(&x[i * 8 + 1], qs, 1); pack_mxfp4_quants(&x[i * 8 + 2], qs, 2); pack_mxfp4_quants(&x[i * 8 + 3], qs, 3); pack_mxfp4_quants(&x[i * 8 + 4], qs, 4); pack_mxfp4_quants(&x[i * 8 + 5], qs, 5); pack_mxfp4_quants(&x[i * 8 + 6], qs, 6); pack_mxfp4_quants(&x[i * 8 + 7], qs, 7); } // Init the scales // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4x4x2) // the last block is truncated and overriden by the scales. for (int i = 0; i < nb; i++) { // Unpack the scales x[i * 8 + 0].e = 0; x[i * 8 + 1].e = 0; x[i * 8 + 2].e = 0; x[i * 8 + 3].e = 0; x[i * 8 + 4].e = 0; x[i * 8 + 5].e = 0; x[i * 8 + 6].e = 0; x[i * 8 + 7].e = 0; } } // repack mxfp4 data into mxfp4x4x2 tensor static void repack_mxfp4_mxfp4x4x2(ggml_tensor * t, const void * data, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_MXFP4x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to read more data than is available in the source buffer 'data' // or write more than the tensor can hold. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-mxfp4-mxfp4x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); init_row_mxfp4x4x2((block_mxfp4 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); memcpy(buf_pd, src, row_size); repack_row_mxfp4x4x2((uint8_t *) buf_rp, (const block_mxfp4 *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) data + (i * row_size); uint8_t * dst = (uint8_t *) t->data + (i * row_size); // re-init the row because we are potentially copying a partial row init_row_mxfp4x4x2((block_mxfp4 *) buf_pd, t->ne[0]); // Copy only the remaining bytes from the source. memcpy(buf_pd, src, n_rem_bytes); // Repack the entire buffer (partial data + zero padding). repack_row_mxfp4x4x2((uint8_t *) buf_rp, (const block_mxfp4 *) buf_pd, t->ne[0]); // Write only the corresponding remaining bytes to the destination tensor. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } // repack mxfp4x4x2 tensor into mxfp4 data static void repack_mxfp4x4x2_mxfp4(void * data, const ggml_tensor * t, size_t size) { int64_t nrows = ggml_nrows(t); size_t row_size = ggml_row_size(t->type, t->ne[0]); size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_MXFP4x4x2)); // extra elements for the pad size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) // Ensure we don't try to copy more data than the tensor actually contains. const size_t total_tensor_size = (size_t)nrows * row_size; const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size; // Calculate how many full rows and how many remaining bytes we need to process. const int64_t n_full_rows = n_bytes_to_copy / row_size; const size_t n_rem_bytes = n_bytes_to_copy % row_size; void * buf_pd = ggml_aligned_malloc(row_size_pd); GGML_ASSERT(buf_pd != NULL); void * buf_rp = ggml_aligned_malloc(row_size_rp); GGML_ASSERT(buf_rp != NULL); HEX_VERBOSE("ggml-hex: repack-mxfp4x4x2-mxfp4 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, t->ne[0], nrows, row_size); memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros // 1. Process all the full rows for (int64_t i = 0; i < n_full_rows; i++) { const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); memcpy(buf_pd, src, row_size); unpack_row_mxfp4x4x2((block_mxfp4 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); memcpy(dst, buf_rp, row_size); } // 2. Process the final, potentially partial, row if (n_rem_bytes > 0) { const int64_t i = n_full_rows; const uint8_t * src = (const uint8_t *) t->data + (i * row_size); uint8_t * dst = (uint8_t *) data + (i * row_size); // We still need to read and unpack the entire source row because the format is block-based. memcpy(buf_pd, src, row_size); unpack_row_mxfp4x4x2((block_mxfp4 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); // But we only copy the remaining number of bytes to the destination to respect the size limit. memcpy(dst, buf_rp, n_rem_bytes); } ggml_aligned_free(buf_pd, row_size_pd); ggml_aligned_free(buf_rp, row_size_rp); } static void ggml_backend_hexagon_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; auto sess = ctx->sess; HEX_VERBOSE("ggml-hex: %s set-tensor %s : data %p offset %zu size %zu\n", sess->name.c_str(), tensor->name, data, offset, size); switch (tensor->type) { case GGML_TYPE_Q4_0: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_q4_0_q4x4x2(tensor, data, size); break; case GGML_TYPE_Q8_0: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_q8_0_q8x4x2(tensor, data, size); break; case GGML_TYPE_MXFP4: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_mxfp4_mxfp4x4x2(tensor, data, size); break; default: memcpy((char *) tensor->data + offset, data, size); break; } } static void ggml_backend_hexagon_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; auto sess = ctx->sess; HEX_VERBOSE("ggml-hex: %s get-tensor %s : data %p offset %zu size %zu\n", sess->name.c_str(), tensor->name, data, offset, size); switch (tensor->type) { case GGML_TYPE_Q4_0: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_q4x4x2_q4_0(data, tensor, size); break; case GGML_TYPE_Q8_0: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_q8x4x2_q8_0(data, tensor, size); break; case GGML_TYPE_MXFP4: GGML_ASSERT(offset == 0); GGML_ASSERT(offset + size <= ggml_nbytes(tensor)); repack_mxfp4x4x2_mxfp4(data, tensor, size); break; default: memcpy(data, (const char *) tensor->data + offset, size); break; } } static bool ggml_backend_hexagon_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { GGML_UNUSED(buffer); GGML_UNUSED(src); GGML_UNUSED(dst); // we might optimize this later, for now take the slow path (ie get/set_tensor) return false; } static void ggml_backend_hexagon_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; auto sess = ctx->sess; HEX_VERBOSE("ggml-hex: %s clear-buff base %p size %zu\n", sess->name.c_str(), (void *) ctx->base, ctx->size); memset(ctx->base, value, ctx->size); } static ggml_backend_buffer_i ggml_backend_hexagon_buffer_interface = { /* .free_buffer = */ ggml_backend_hexagon_buffer_free_buffer, /* .get_base = */ ggml_backend_hexagon_buffer_get_base, /* .init_tensor = */ ggml_backend_hexagon_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_hexagon_buffer_set_tensor, /* .get_tensor = */ ggml_backend_hexagon_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_hexagon_buffer_cpy_tensor, /* .clear = */ ggml_backend_hexagon_buffer_clear, /* .reset = */ NULL, }; // ** backend buffer type static const char * ggml_backend_hexagon_buffer_type_name(ggml_backend_buffer_type_t buffer_type) { return static_cast(buffer_type->context)->name.c_str(); } static ggml_backend_buffer_t ggml_backend_hexagon_buffer_type_alloc_buffer( ggml_backend_buffer_type_t buffer_type, size_t size) { auto sess = static_cast(buffer_type->context)->sess; try { ggml_backend_hexagon_buffer_context * ctx = new ggml_backend_hexagon_buffer_context(sess, size, false /*repack*/); return ggml_backend_buffer_init(buffer_type, ggml_backend_hexagon_buffer_interface, ctx, size); } catch (const std::exception & exc) { GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer context: %s\n", sess->name.c_str(), exc.what()); return nullptr; } } static ggml_backend_buffer_t ggml_backend_hexagon_repack_buffer_type_alloc_buffer( ggml_backend_buffer_type_t buffer_type, size_t size) { auto sess = static_cast(buffer_type->context)->sess; try { ggml_backend_hexagon_buffer_context * ctx = new ggml_backend_hexagon_buffer_context(sess, size, true /*repack*/); return ggml_backend_buffer_init(buffer_type, ggml_backend_hexagon_buffer_interface, ctx, size); } catch (const std::exception & exc) { GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer context: %s\n", sess->name.c_str(), exc.what()); return nullptr; } } static size_t ggml_backend_hexagon_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) { return 128; // HVX alignment GGML_UNUSED(buffer_type); } static size_t ggml_backend_hexagon_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * t) { return ggml_nbytes(t); } static size_t ggml_backend_hexagon_buffer_type_get_max_size(ggml_backend_buffer_type_t buffer_type) { return 1 * 1024 * 1024 * 1024; // 1GB per buffer GGML_UNUSED(buffer_type); } static bool ggml_backend_hexagon_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return opt_hostbuf; GGML_UNUSED(buft); } static bool ggml_backend_hexagon_repack_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static ggml_backend_buffer_type_i ggml_backend_hexagon_buffer_type_interface = { /* .get_name = */ ggml_backend_hexagon_buffer_type_name, /* .alloc_buffer = */ ggml_backend_hexagon_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_hexagon_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_hexagon_buffer_type_get_max_size, /* .get_alloc_size = */ ggml_backend_hexagon_buffer_type_get_alloc_size, /* .is_host = */ ggml_backend_hexagon_buffer_type_is_host, }; static ggml_backend_buffer_type_i ggml_backend_hexagon_repack_buffer_type_interface = { /* .get_name = */ ggml_backend_hexagon_buffer_type_name, /* .alloc_buffer = */ ggml_backend_hexagon_repack_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_hexagon_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_hexagon_buffer_type_get_max_size, /* .get_alloc_size = */ ggml_backend_hexagon_buffer_type_get_alloc_size, /* .is_host = */ ggml_backend_hexagon_repack_buffer_type_is_host, }; void ggml_hexagon_session::allocate(int dev_id) noexcept(false) { this->valid_session = false; this->valid_handle = false; this->valid_queue = false; this->valid_iface = false; this->domain_id = 3; // Default for CDSP, updated after the session is created this->session_id = 0; // Default for CDSP, updated after the session is created this->dev_id = dev_id; this->name = std::string("HTP") + std::to_string(dev_id); this->op_pending = 0; this->prof_usecs = 0; this->prof_cycles = 0; this->prof_pkts = 0; GGML_LOG_INFO("ggml-hex: allocating new session: %s\n", this->name.c_str()); domain * my_domain = get_domain(this->domain_id); if (my_domain == NULL) { GGML_LOG_ERROR("ggml-hex: unable to get domain struct for CDSP\n"); throw std::runtime_error("ggml-hex: failed to get CDSP domain (see log for details)"); } // Create new session if (dev_id != 0) { struct remote_rpc_reserve_new_session n; n.domain_name_len = strlen(CDSP_DOMAIN_NAME); n.domain_name = const_cast(CDSP_DOMAIN_NAME); n.session_name = const_cast(this->name.c_str()); n.session_name_len = this->name.size(); int err = remote_session_control(FASTRPC_RESERVE_NEW_SESSION, (void *) &n, sizeof(n)); if (err != AEE_SUCCESS) { GGML_LOG_ERROR("ggml-hex: failed to reserve new session %d : error 0x%x\n", dev_id, err); throw std::runtime_error("ggml-hex: remote_session_control(new-sess) failed (see log for details)"); } // Save the IDs this->session_id = n.session_id; this->domain_id = n.effective_domain_id; this->valid_session = true; } // Get session URI char session_uri[256]; { char htp_uri[256]; snprintf(htp_uri, sizeof(htp_uri), "file:///libggml-htp-v%u.so?htp_iface_skel_handle_invoke&_modver=1.0", opt_arch); struct remote_rpc_get_uri u = {}; u.session_id = this->session_id; u.domain_name = const_cast(CDSP_DOMAIN_NAME); u.domain_name_len = strlen(CDSP_DOMAIN_NAME); u.module_uri = const_cast(htp_uri); u.module_uri_len = strlen(htp_uri); u.uri = session_uri; u.uri_len = sizeof(session_uri); int err = remote_session_control(FASTRPC_GET_URI, (void *) &u, sizeof(u)); if (err != AEE_SUCCESS) { // fallback to single session uris int htp_URI_domain_len = strlen(htp_uri) + MAX_DOMAIN_NAMELEN; snprintf(session_uri, htp_URI_domain_len, "%s%s", htp_uri, my_domain->uri); GGML_LOG_WARN("ggml-hex: failed to get URI for session %d : error 0x%x. Falling back to single session URI: %s\n", dev_id, err, session_uri); } } // Enable Unsigned PD { struct remote_rpc_control_unsigned_module u; u.domain = this->domain_id; u.enable = 1; int err = remote_session_control(DSPRPC_CONTROL_UNSIGNED_MODULE, (void *) &u, sizeof(u)); if (err != AEE_SUCCESS) { GGML_LOG_ERROR("ggml-hex: failed to enable unsigned PD for session %d : error 0x%x\n", dev_id, err); throw std::runtime_error("ggml-hex: remote_session_control(unsign) failed (see log for details)"); } } // Open session int err = htp_iface_open(session_uri, &this->handle); if (err != AEE_SUCCESS) { GGML_LOG_ERROR("ggml-hex: failed to open session %d : error 0x%x\n", dev_id, err); throw std::runtime_error("ggml-hex: failed to open session (see log for details)"); } this->valid_handle = true; GGML_LOG_INFO("ggml-hex: new session: %s : session-id %d domain-id %d uri %s handle 0x%lx\n", this->name.c_str(), this->session_id, this->domain_id, session_uri, (unsigned long) this->handle); // Enable FastRPC QoS mode { struct remote_rpc_control_latency l; l.enable = 1; int err = remote_handle64_control(this->handle, DSPRPC_CONTROL_LATENCY, (void *) &l, sizeof(l)); if (err != 0) { GGML_LOG_WARN("ggml-hex: failed to enable fastrpc QOS mode: 0x%08x\n", (unsigned) err); } } // Now let's setup the DSP queue err = dspqueue_create(this->domain_id, 0, // Flags 128 * 1024, // Request queue size (in bytes) 64 * 1024, // Response queue size (in bytes) nullptr, // Read packet callback (we handle reads explicitly) nullptr, // Error callback (we handle errors during reads) (void *) this, // Callback context &queue); if (err != 0) { GGML_LOG_ERROR("ggml-hex: %s dspqueue_create failed: 0x%08x\n", this->name.c_str(), (unsigned) err); throw std::runtime_error("ggml-hex: failed to create dspqueue (see log for details)"); } this->valid_queue = true; // Export queue for use on the DSP err = dspqueue_export(queue, &this->queue_id); if (err != 0) { GGML_LOG_ERROR("ggml-hex: dspqueue_export failed: 0x%08x\n", (unsigned) err); throw std::runtime_error("ggml-hex: dspqueue export failed (see log for details)"); } if (opt_etm) { err = htp_iface_enable_etm(this->handle); if (err != 0) { GGML_LOG_ERROR("ggml-hex: failed to enable ETM tracing: 0x%08x\n", (unsigned) err); } } // Start the DSP-side service. We need to pass the queue ID to the // DSP in a FastRPC call; the DSP side will import the queue and start // listening for packets in a callback. err = htp_iface_start(this->handle, dev_id, this->queue_id, opt_nhvx); if (err != 0) { GGML_LOG_ERROR("ggml-hex: failed to start session: 0x%08x\n", (unsigned) err); throw std::runtime_error("ggml-hex: iface start failed (see log for details)"); } this->valid_iface = true; } void ggml_hexagon_session::release() noexcept(true) { GGML_LOG_INFO("ggml-hex: releasing session: %s\n", this->name.c_str()); int err; // Stop the DSP-side service and close the queue if (this->valid_iface) { err = htp_iface_stop(this->handle); if (err != 0) { GGML_ABORT("ggml-hex: htp_iface_stop failed: 0x%08x\n", (unsigned) err); } } if (opt_etm) { err = htp_iface_disable_etm(this->handle); if (err != 0) { GGML_LOG_ERROR("ggml-hex: warn : failed to disable ETM tracing: 0x%08x\n", (unsigned) err); } } if (this->valid_queue) { err = dspqueue_close(queue); if (err != 0) { GGML_ABORT("ggml-hex: dspqueue_close failed: 0x%08x\n", (unsigned) err); } } if (this->valid_handle) { htp_iface_close(this->handle); } } ggml_hexagon_session::ggml_hexagon_session(int dev_id, ggml_backend_dev_t dev) noexcept(false) { buffer_type.device = dev; repack_buffer_type.device = dev; try { allocate(dev_id); buffer_type.iface = ggml_backend_hexagon_buffer_type_interface; buffer_type.context = new ggml_backend_hexagon_buffer_type_context(this->name, this); repack_buffer_type.iface = ggml_backend_hexagon_repack_buffer_type_interface; repack_buffer_type.context = new ggml_backend_hexagon_buffer_type_context(this->name + "-REPACK", this); } catch (const std::exception & exc) { release(); throw; } } ggml_hexagon_session::~ggml_hexagon_session() noexcept(true) { release(); delete static_cast(buffer_type.context); delete static_cast(repack_buffer_type.context); } // ** backend interface static bool ggml_backend_buffer_is_hexagon(const struct ggml_backend_buffer * b) { return b->buft->iface.get_alignment == ggml_backend_hexagon_buffer_type_get_alignment; } static inline bool ggml_backend_buffer_is_hexagon_repack(const struct ggml_backend_buffer * b) { return b->buft->iface.alloc_buffer == ggml_backend_hexagon_repack_buffer_type_alloc_buffer; } static bool hex_supported_dims2(const struct ggml_tensor * x, const struct ggml_tensor * y) { if (x->ne[0] != y->ne[0]) { return false; } if (x->ne[1] != y->ne[1]) { return false; } if (x->ne[2] != y->ne[2]) { return false; } if (x->ne[3] != y->ne[3]) { return false; } return true; } static bool hex_supported_src0_type(ggml_type t) { return t == GGML_TYPE_F32; } static bool hex_supported_src1_type(ggml_type t) { return t == GGML_TYPE_F32; } static bool hex_supported_src2_type(ggml_type t) { return t == GGML_TYPE_F32; } static bool hex_supported_src1_type2(ggml_type t) { return t == GGML_TYPE_F16; } static bool hex_supported_src1_type3(ggml_type t) { return t == GGML_TYPE_I32; } static bool hex_supported_dst_type(ggml_type t) { return t == GGML_TYPE_F32; } static bool hex_supported_dims(const struct ggml_tensor * x, const struct ggml_tensor * y) { // TODO: support broadcast for ne[2 and 3] if (x->ne[0] != y->ne[0]) { return false; } if (x->ne[2] != y->ne[2]) { return false; } if (x->ne[3] != y->ne[3]) { return false; } return true; } static bool ggml_hexagon_supported_mul_mat(const struct ggml_hexagon_session * sess, const struct ggml_tensor * dst) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; if (src1->type != GGML_TYPE_F32 || dst->type != GGML_TYPE_F32) { return false; } // TODO: add support for non-cont tensors if (!ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } switch (src0->type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: if (src0->ne[0] % 32) { return false; } if (src0->ne[1] > 16 * 1024) { return false; // typically the lm-head which would be too large for VTCM } // if ((src0->ne[2] != src1->ne[2] || src0->ne[3] != src1->ne[3])) return false; if ((src1->ne[2] != 1 || src1->ne[3] != 1)) { return false; } // src0 (weights) must be repacked if (src0->buffer && !ggml_backend_buffer_is_hexagon_repack(src0->buffer)) { return false; } break; case GGML_TYPE_F16: if (src0->nb[1] < src0->nb[0]) { GGML_LOG_DEBUG("ggml_hexagon_supported_mul_mat: permuted F16 src0 not supported\n"); return false; } break; default: return false; } return true; } static bool ggml_hexagon_supported_mul_mat_id(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * src2 = op->src[2]; const struct ggml_tensor * dst = op; if (src1->type != GGML_TYPE_F32 || dst->type != GGML_TYPE_F32 || src2->type != GGML_TYPE_I32) { return false; } switch (src0->type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: if ((src0->ne[0] % 32)) { return false; } // src0 (weights) must be repacked if (src0->buffer && !ggml_backend_buffer_is_hexagon_repack(src0->buffer)) { return false; } break; case GGML_TYPE_F16: if (!opt_experimental) { return false; } break; default: return false; } // TODO: add support for non-cont tensors if (!ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } return true; } static bool ggml_hexagon_supported_binary(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * dst = op; if (!hex_supported_src0_type(src0->type)) { return false; } if (!hex_supported_src1_type(src1->type)) { return false; } if (!hex_supported_dst_type(dst->type)) { return false; } if (!hex_supported_dims2(src0, dst)) { return false; } if (!ggml_can_repeat(src1, src0)) { return false; } // TODO: add support for non-contigiuos tensors if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } return true; } static bool ggml_hexagon_supported_add_id(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * dst = op; if (!hex_supported_src0_type(src0->type)) { return false; } if (!hex_supported_src1_type(src1->type)) { return false; } if (!hex_supported_dst_type(dst->type)) { return false; } if (!hex_supported_dims2(src0, dst)) { return false; } // REVISIT: add support for non-contigiuos tensors if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } return true; } static bool ggml_hexagon_supported_unary(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * dst = op; if (!hex_supported_src0_type(src0->type)) { return false; } if (!hex_supported_dst_type(dst->type)) { return false; } if (!hex_supported_dims2(src0, dst)) { return false; } // TODO: add support for non-contigiuos tensors if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { return false; } return true; } static bool ggml_hexagon_supported_activations(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * dst = op; if (!hex_supported_src0_type(src0->type)) { return false; } if (!hex_supported_dst_type(dst->type)) { return false; } if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { return false; } if (src1) { if (!hex_supported_src1_type(src1->type)) { return false; } if (!hex_supported_dims2(src0, src1)) { return false; } if (!ggml_is_contiguous(src1)) { return false; } } return true; } static bool ggml_hexagon_supported_softmax(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * src2 = op->src[2]; const struct ggml_tensor * dst = op; if (src2) { return false; // FIXME: add support for sinks } if (!hex_supported_src0_type(src0->type)) { return false; } if (!hex_supported_dst_type(dst->type)) { return false; } if (src1) { if (!hex_supported_src1_type(src1->type) && !hex_supported_src1_type2(src1->type)) { return false; } if (src0->ne[0] != src1->ne[0]) { return false; } if (src1->ne[1] < src0->ne[1]) { return false; } if (src0->ne[2] % src1->ne[2] != 0) { return false; } if (src0->ne[3] % src1->ne[3] != 0) { return false; } } if (src1) { if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } } else { if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { return false; } } return true; } static bool ggml_hexagon_supported_rope(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { const int32_t * op_params = &op->op_params[0]; int mode = op_params[2]; if ((mode & GGML_ROPE_TYPE_MROPE) || (mode & GGML_ROPE_TYPE_VISION)) { return false; } if (mode & 1) { return false; } const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; const struct ggml_tensor * src2 = op->src[2]; const struct ggml_tensor * dst = op; if (!hex_supported_src0_type(src0->type)) { return false; // FIXME: add support for GGML_TYPE_F16 for src0 } if (!hex_supported_dst_type(dst->type)) { return false; } if (!hex_supported_src1_type3(src1->type)) { return false; } if (src2) { if (!hex_supported_src2_type(src2->type)) { return false; } int n_dims = op_params[1]; if (src2->ne[0] < (n_dims / 2)) { return false; } } if (src2) { if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(src2) || !ggml_is_contiguous(dst)) { return false; } } else { if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { return false; } } return true; } enum dspqbuf_type { DSPQBUF_TYPE_DSP_WRITE_CPU_READ = 0, DSPQBUF_TYPE_CPU_WRITE_DSP_READ, DSPQBUF_TYPE_CONSTANT, }; static void dspqbuf_dump(dspqueue_buffer * d, const struct ggml_tensor * t, dspqbuf_type type) { if (opt_verbose < 2) return; auto buf = static_cast(t->buffer->context); auto sess = buf->sess; GGML_LOG_DEBUG("ggml-hex: %s dspqbuf : %s base-addr %p base-size %zu data %p offset %u size %u\n", sess->name.c_str(), t->name, (void *) buf->base, buf->size, (void *) d->ptr, (unsigned int) d->offset, (unsigned int) d->size); } // Init hexagon tensor from GGML tensor and Hexagon buffer static void htp_req_tensor_init(htp_tensor * h, const ggml_tensor * t) { h->data = 0; // updated by the receiver h->type = t->type; h->ne[0] = t->ne[0]; h->ne[1] = t->ne[1]; h->ne[2] = t->ne[2]; h->ne[3] = t->ne[3]; h->nb[0] = t->nb[0]; h->nb[1] = t->nb[1]; h->nb[2] = t->nb[2]; h->nb[3] = t->nb[3]; } static size_t htp_req_buff_init(htp_tensor *h, dspqueue_buffer * d, const ggml_tensor * t, dspqbuf_type type) { if (!t) { return 0; } auto buf = static_cast(t->buffer->context); memset(d, 0, sizeof(*d)); d->fd = buf->fd; d->ptr = t->data; d->offset = (uint8_t *) t->data - buf->base; d->size = ggml_nbytes(t); switch (type) { case DSPQBUF_TYPE_DSP_WRITE_CPU_READ: // Flush CPU d->flags = DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER; break; case DSPQBUF_TYPE_CPU_WRITE_DSP_READ: // Flush CPU, Invalidate DSP d->flags = DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT; break; default: // Constant buffer, no cache maintenance d->flags = 0; break; } htp_req_tensor_init(h, t); dspqbuf_dump(d, t, type); return 1; } typedef size_t (*htp_req_init_func_t)(htp_general_req * req, dspqueue_buffer * bufs, const ggml_tensor * op); template static inline void ggml_hexagon_dispatch_op(ggml_hexagon_session *sess, const struct ggml_tensor * op, uint32_t flags) { uint64_t t = ggml_time_us(); // Construct HTP request htp_general_req req; memset(&req, 0, sizeof(req)); req.flags = flags; if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; } if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; } ggml_hexagon_dump_op_exec(sess->name, op, req.flags); if ((opt_opmask & HTP_OPMASK_QUEUE)) { dspqueue_buffer bufs[HTP_MAX_PACKET_BUFFERS]; size_t n_bufs = _init_req_func(&req, bufs, op); sess->enqueue(req, bufs, n_bufs, opt_opsync); } t = ggml_time_us() - t; ggml_hexagon_dump_op_prof(sess->name, op, sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, t); } template static inline size_t init_binary_req(htp_general_req * req, dspqueue_buffer * bufs, const ggml_tensor * t) { switch (t->op) { case GGML_OP_MUL_MAT: req->op = HTP_OP_MUL_MAT; break; case GGML_OP_MUL: req->op = HTP_OP_MUL; break; case GGML_OP_ADD: req->op = HTP_OP_ADD; break; case GGML_OP_SUB: req->op = HTP_OP_SUB; break; default: GGML_ABORT("ggml-hex: binary : unsupported op: %d\n", t->op); break; } // src0: Weights (mulmat) or First Operand (binary op). // If constant (e.g. weights), no cache management is needed. // src1: Input Activations (mulmat) or Second Operand (binary op). size_t n_bufs = 0; n_bufs += htp_req_buff_init(&req->src0, &bufs[n_bufs], t->src[0], _is_src0_constant ? DSPQBUF_TYPE_CONSTANT : DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src1, &bufs[n_bufs], t->src[1], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->dst, &bufs[n_bufs], t, DSPQBUF_TYPE_DSP_WRITE_CPU_READ); return n_bufs; } template static inline size_t init_binary_id_req(htp_general_req * req, dspqueue_buffer * bufs, const ggml_tensor * t) { switch (t->op) { case GGML_OP_MUL_MAT_ID: req->op = HTP_OP_MUL_MAT_ID; break; case GGML_OP_ADD_ID: req->op = HTP_OP_ADD_ID; break; default: GGML_ABORT("ggml-hex: unsupported op: %d\n", t->op); } // src0: Weights (mulmat) or Input Activations (other op). // If constant, no cache management is needed. // src1: Input Activations (mulmat) or Second Operand (binary op). // src2: Expert IDs (mulmat) or Activated Experts (other op). size_t n_bufs = 0; n_bufs += htp_req_buff_init(&req->src0, &bufs[n_bufs], t->src[0], _is_src0_constant ? DSPQBUF_TYPE_CONSTANT : DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src1, &bufs[n_bufs], t->src[1], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src2, &bufs[n_bufs], t->src[2], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->dst, &bufs[n_bufs], t, DSPQBUF_TYPE_DSP_WRITE_CPU_READ); return n_bufs; } static inline size_t init_unary_req(htp_general_req * req, dspqueue_buffer * bufs, const ggml_tensor * t) { memcpy(&req->op_params, &t->op_params, sizeof(t->op_params)); bool supported = false; switch (t->op) { case GGML_OP_RMS_NORM: req->op = HTP_OP_RMS_NORM; supported = true; break; case GGML_OP_UNARY: if (ggml_get_unary_op(t) == GGML_UNARY_OP_SILU) { req->op = HTP_OP_UNARY_SILU; supported = true; } else if (ggml_get_unary_op(t) == GGML_UNARY_OP_GELU) { req->op = HTP_OP_UNARY_GELU; supported = true; } break; case GGML_OP_GLU: if (ggml_get_glu_op(t) == GGML_GLU_OP_SWIGLU) { req->op = HTP_OP_GLU_SWIGLU; supported = true; } else if (ggml_get_glu_op(t) == GGML_GLU_OP_SWIGLU_OAI) { req->op = HTP_OP_GLU_SWIGLU_OAI; supported = true; } break; case GGML_OP_SOFT_MAX: req->op = HTP_OP_SOFTMAX; supported = true; break; default: break; } if (!supported) { GGML_ABORT("ggml-hex: unary : unsupported op: %d\n", t->op); } size_t n_bufs = 0; n_bufs += htp_req_buff_init(&req->src0, &bufs[n_bufs], t->src[0], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src1, &bufs[n_bufs], t->src[1], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->dst, &bufs[n_bufs], t, DSPQBUF_TYPE_DSP_WRITE_CPU_READ); return n_bufs; } static inline size_t init_rope_req(htp_general_req * req, dspqueue_buffer * bufs, const ggml_tensor * t) { memcpy(&req->op_params, &t->op_params, sizeof(t->op_params)); req->op = HTP_OP_ROPE; size_t n_bufs = 0; n_bufs += htp_req_buff_init(&req->src0, &bufs[n_bufs], t->src[0], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src1, &bufs[n_bufs], t->src[1], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->src2, &bufs[n_bufs], t->src[2], DSPQBUF_TYPE_CPU_WRITE_DSP_READ); n_bufs += htp_req_buff_init(&req->dst, &bufs[n_bufs], t, DSPQBUF_TYPE_DSP_WRITE_CPU_READ); return n_bufs; } static const char * ggml_backend_hexagon_name(ggml_backend_t backend) { auto sess = static_cast(backend->context); return sess->name.c_str(); } static void ggml_backend_hexagon_free(ggml_backend_t backend) { // we just need to delete the backend here // the sessions are allocated & freed as part of the registry delete backend; } static inline bool op_reuse_src1(const ggml_tensor * op1, const ggml_tensor * op0) { return (op0 && op0->src[1] == op1->src[1] && ggml_is_quantized(op0->src[0]->type) && ggml_is_quantized(op1->src[1]->type)); } static inline bool is_compute_op(ggml_tensor *node) { return !(ggml_op_is_empty(node->op) || ggml_is_empty(node)); } // scan the graph and figure out last compute op index static inline int last_compute_op(ggml_cgraph * graph) { int last = 0; for (int i = 0; i < graph->n_nodes; ++i) { if (is_compute_op(graph->nodes[i])) { last = i; } } return last; } static ggml_status ggml_backend_hexagon_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) { auto sess = static_cast(backend->context); HEX_VERBOSE("ggml-hex: %s graph-compute n_nodes %d\n", sess->name.c_str(), graph->n_nodes); const int last = last_compute_op(graph); const struct ggml_tensor * prev_quant_op = nullptr; // prev executed op with quantizer for (int i = 0; i < graph->n_nodes; ++i) { ggml_tensor * node = graph->nodes[i]; if (!is_compute_op(node)) { continue; } uint32_t flags = 0; // skip quantizer if src1 is reused if (op_reuse_src1(node, prev_quant_op)) { flags |= HTP_OPFLAGS_SKIP_QUANTIZE; } // ask for early notification for the last Op if (i == last) { flags |= HTP_OPFLAGS_EARLY_WAKEUP; } switch (node->op) { case GGML_OP_MUL_MAT: if (ggml_is_quantized(node->src[0]->type)) { ggml_hexagon_dispatch_op>(sess, node, flags); } else { ggml_hexagon_dispatch_op>(sess, node, flags); } prev_quant_op = node; break; case GGML_OP_MUL_MAT_ID: if (ggml_is_quantized(node->src[0]->type)) { ggml_hexagon_dispatch_op>(sess, node, flags); } else { ggml_hexagon_dispatch_op>(sess, node, flags); } prev_quant_op = node; break; case GGML_OP_MUL: case GGML_OP_ADD: case GGML_OP_SUB: ggml_hexagon_dispatch_op>(sess, node, flags); break; case GGML_OP_ADD_ID: ggml_hexagon_dispatch_op>(sess, node, flags); break; case GGML_OP_RMS_NORM: ggml_hexagon_dispatch_op(sess, node, flags); break; case GGML_OP_UNARY: if ((ggml_get_unary_op(node) == GGML_UNARY_OP_SILU) || (ggml_get_unary_op(node) == GGML_UNARY_OP_GELU)) { ggml_hexagon_dispatch_op(sess, node, flags); } break; case GGML_OP_GLU: if ((ggml_get_glu_op(node) == GGML_GLU_OP_SWIGLU) || (ggml_get_glu_op(node) == GGML_GLU_OP_SWIGLU_OAI)) { ggml_hexagon_dispatch_op(sess, node, flags); } break; case GGML_OP_SOFT_MAX: ggml_hexagon_dispatch_op(sess, node, flags); break; case GGML_OP_ROPE: ggml_hexagon_dispatch_op(sess, node, flags); break; default: GGML_ABORT("\nggml-hex: graph-compute %s is not supported\n", ggml_op_desc(node)); } } // Wait until all pending ops complete sess->flush(); return GGML_STATUS_SUCCESS; } static void ggml_backend_hexagon_synchronize(ggml_backend_t backend) { auto sess = static_cast(backend->context); HEX_VERBOSE("ggml-hex: %s synchronize\n", sess->name.c_str()); // Wait until all pending ops complete sess->flush(); } struct node_info { ggml_tensor * node; std::vector fused; ggml_op op() const { return node->op; } const ggml_tensor * dst() const { return fused.empty() ? node : fused.back(); } const ggml_tensor * src0() const { return node->src[0]; } const ggml_tensor * src1() const { return node->src[1]; } bool is_empty() const { return ggml_op_is_empty(node->op); } void add_fused(ggml_tensor * t) { fused.push_back(t); } bool stackable() const { switch (this->op()) { case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: return ggml_is_quantized(this->src0()->type); default: return false; } } bool same_input(const node_info& n) const { return n.src1() == this->src1(); } }; static std::vector ggml_hexagon_graph_optimize_reorder(const std::vector & nodes) { const int n = nodes.size(); std::vector res; res.reserve(n); std::vector used(n, false); // The main goal here is to stack the MUL_MAT ops with the same src1 input. // This allows use to reuse dynamically quantized src1 in VTCM. // TODO: the current version might do incorrect reodering in cases where quantized src0 // input is an output of another Op. for (int i0 = 0; i0 < n; i0++) { if (used[i0]) { continue; } res.push_back(i0); const auto & node0 = nodes[i0]; if (!node0.stackable()) { continue; } // that many nodes forward to search for stackable nodes that can reuse VTCM constexpr int N_FORWARD = 8; for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) { if (used[i1]) { continue; } const auto & node1 = nodes[i1]; if (node1.stackable() && node1.same_input(node0)) { res.push_back(i1); used[i1] = true; } } } return res; } static void ggml_backend_hexagon_graph_optimize(ggml_backend_t backend, ggml_cgraph * gf) { const int n = gf->n_nodes; constexpr int MAX_FUSE = 16; enum ggml_op ops[MAX_FUSE]; std::vector nodes; nodes.reserve(gf->n_nodes); // fuse nodes: // we don't want to make reorders that break fusing, so we first pack all fusable tensors // and perform the reorder over the fused nodes. after the reorder is done, we unfuse for (int i = 0; i < n; i++) { node_info node = { /*.node =*/gf->nodes[i], /*.fused =*/{}, }; // fuse only ops that start with these operations // can be expanded when needed if (node.op() == GGML_OP_ADD || node.op() == GGML_OP_NORM || node.op() == GGML_OP_RMS_NORM) { ops[0] = node.op(); int f = i + 1; while (f < n && f < i + MAX_FUSE) { // conservatively allow fusing only these ops // can be expanded when needed if (gf->nodes[f]->op != GGML_OP_ADD && gf->nodes[f]->op != GGML_OP_MUL && gf->nodes[f]->op != GGML_OP_NORM && gf->nodes[f]->op != GGML_OP_RMS_NORM) { break; } ops[f - i] = gf->nodes[f]->op; f++; } f -= i; for (; f > 1; f--) { if (ggml_can_fuse(gf, i, ops, f)) { break; } } // add the fused tensors into the node info so we can unfuse them later for (int k = 1; k < f; k++) { ++i; // the .dst() becomes the last fused tensor node.add_fused(gf->nodes[i]); } } nodes.push_back(std::move(node)); } const auto order = ggml_hexagon_graph_optimize_reorder(nodes); // unfuse { int j = 0; for (const auto i : order) { const auto & node = nodes[i]; gf->nodes[j++] = node.node; for (auto * fused : node.fused) { gf->nodes[j++] = fused; } } } } static struct ggml_backend_i hexagon_backend_i = { /* .get_name = */ ggml_backend_hexagon_name, /* .free = */ ggml_backend_hexagon_free, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, /* .cpy_tensor_async = */ NULL, /* .synchronize = */ ggml_backend_hexagon_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_hexagon_graph_compute, /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ ggml_backend_hexagon_graph_optimize, }; static ggml_guid_t ggml_backend_hexagon_guid() { static ggml_guid guid = { 0x7b, 0x57, 0xdc, 0xaf, 0xde, 0x12, 0x1d, 0x49, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }; return &guid; } bool ggml_backend_is_hexagon(ggml_backend_t backend) { return backend && backend->iface.get_name == ggml_backend_hexagon_name; } // device interface static ggml_backend_t ggml_backend_hexagon_device_init(ggml_backend_dev_t dev, const char * params) { auto sess = static_cast(dev->context); return new ggml_backend{ /* .guid = */ ggml_backend_hexagon_guid(), /* .interface = */ hexagon_backend_i, /* .device = */ dev, /* .context = */ sess, }; GGML_UNUSED(params); } static const char * ggml_backend_hexagon_device_get_name(ggml_backend_dev_t dev) { auto sess = static_cast(dev->context); return sess->name.c_str(); GGML_UNUSED(dev); } static const char * ggml_backend_hexagon_device_get_description(ggml_backend_dev_t dev) { return "Hexagon"; GGML_UNUSED(dev); } static void ggml_backend_hexagon_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { // ~2GB per session for now *free = 2ULL * 1024 * 1024 * 1024; *total = *free; GGML_UNUSED(dev); } static enum ggml_backend_dev_type ggml_backend_hexagon_device_get_type(ggml_backend_dev_t dev) { return GGML_BACKEND_DEVICE_TYPE_GPU; GGML_UNUSED(dev); } static void ggml_backend_hexagon_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_hexagon_device_get_name(dev); props->description = ggml_backend_hexagon_device_get_description(dev); props->type = ggml_backend_hexagon_device_get_type(dev); ggml_backend_hexagon_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ true, /* .host_buffer = */ (bool) opt_hostbuf, /* .buffer_from_host_ptr = */ false, /* .events = */ false, }; } static ggml_backend_buffer_type_t ggml_backend_hexagon_device_get_buffer_type(ggml_backend_dev_t dev) { auto sess = static_cast(dev->context); return &sess->buffer_type; } static ggml_backend_buffer_type_t ggml_backend_hexagon_device_get_repack_buffer_type(ggml_backend_dev_t dev) { auto sess = static_cast(dev->context); return &sess->repack_buffer_type; } static bool ggml_hexagon_supported_buffer(ggml_hexagon_session *sess, const struct ggml_tensor * t) { if (t && t->buffer) { if (ggml_backend_buffer_is_hexagon(t->buffer) == false) return false; // not our buffer if (ggml_backend_hexagon_buffer_get_sess(t->buffer) != sess) return false; // wrong session } return true; } static bool ggml_hexagon_supported_buffers(ggml_hexagon_session *sess, const struct ggml_tensor * t) { // all srcs & dsts must be mapped to the same session if (!ggml_hexagon_supported_buffer(sess, t)) { return false; } for (int i = 0; i < GGML_MAX_SRC; i++) { if (!ggml_hexagon_supported_buffer(sess, t->src[i])) { return false; } } return true; } static bool ggml_backend_hexagon_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { auto sess = static_cast(dev->context); // all srcs & dsts must be mapped to the same session if (!ggml_hexagon_supported_buffers(sess, op)) { ggml_hexagon_dump_op_supp(sess->name, op, false); return false; } bool supp = false; switch (op->op) { case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: supp = true; break; case GGML_OP_MUL_MAT: supp = ggml_hexagon_supported_mul_mat(sess, op); break; case GGML_OP_MUL_MAT_ID: supp = ggml_hexagon_supported_mul_mat_id(sess, op); break; case GGML_OP_MUL: case GGML_OP_ADD: case GGML_OP_SUB: supp = ggml_hexagon_supported_binary(sess, op); break; case GGML_OP_ADD_ID: supp = ggml_hexagon_supported_add_id(sess, op); break; case GGML_OP_RMS_NORM: supp = ggml_hexagon_supported_unary(sess, op); break; case GGML_OP_SOFT_MAX: supp = ggml_hexagon_supported_softmax(sess, op); break; case GGML_OP_UNARY: { const auto unary_op = ggml_get_unary_op(op); if (unary_op == GGML_UNARY_OP_SILU || unary_op == GGML_UNARY_OP_GELU) { supp = ggml_hexagon_supported_activations(sess, op); } break; } case GGML_OP_GLU: { const auto glu_op = ggml_get_glu_op(op); if ((glu_op == GGML_GLU_OP_SWIGLU) || (glu_op == GGML_GLU_OP_SWIGLU_OAI)) { supp = ggml_hexagon_supported_activations(sess, op); } break; } case GGML_OP_ROPE: supp = ggml_hexagon_supported_rope(sess, op); break; default: break; } ggml_hexagon_dump_op_supp(sess->name, op, supp); return supp; } static bool ggml_backend_hexagon_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { if (buft->iface.get_alignment != ggml_backend_hexagon_buffer_type_get_alignment) { return false; } auto s0 = static_cast(dev->context); auto s1 = static_cast(buft->context)->sess; // Need session/domain-id for buffers to be compatible bool supp = (s0->session_id == s1->session_id); HEX_VERBOSE("ggml-hex: %s device-supports-buft %s (%d)\n", s0->name.c_str(), s1->name.c_str(), (int) supp); return supp; } static ggml_backend_buffer_type_t * ggml_backend_hexagon_device_get_extra_buffers_type(ggml_backend_dev_t dev) { auto s0 = static_cast(dev->context); HEX_VERBOSE("ggml-hex: device-get-extra-buft : %s \n", s0->name.c_str()); static ggml_backend_buffer_type_t bufts[2]; bufts[0] = ggml_backend_hexagon_device_get_repack_buffer_type(dev); bufts[1] = NULL; return bufts; } static const struct ggml_backend_device_i ggml_backend_hexagon_device_i = { /* .get_name = */ ggml_backend_hexagon_device_get_name, /* .get_description = */ ggml_backend_hexagon_device_get_description, /* .get_memory = */ ggml_backend_hexagon_device_get_memory, /* .get_type = */ ggml_backend_hexagon_device_get_type, /* .get_props = */ ggml_backend_hexagon_device_get_props, /* .init_backend = */ ggml_backend_hexagon_device_init, /* .get_buffer_type = */ ggml_backend_hexagon_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, // ggml_backend_hexagon_device_get_host_buffer_type, /* .buffer_from_host_ptr = */ NULL, // ggml_backend_hexagon_device_buffer_from_ptr, /* .supports_op = */ ggml_backend_hexagon_device_supports_op, /* .supports_buft = */ ggml_backend_hexagon_device_supports_buft, /* .offload_op = */ NULL, // ggml_backend_hexagon_device_offload_op, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; //** backend registry #define GGML_HEXAGON_MAX_SESSIONS 16 struct ggml_hexagon_registry { ggml_hexagon_registry(ggml_backend_reg_t reg); ~ggml_hexagon_registry(); ggml_backend_device devices[GGML_HEXAGON_MAX_SESSIONS]; }; ggml_hexagon_registry::ggml_hexagon_registry(ggml_backend_reg_t reg) { GGML_LOG_INFO("ggml-hex: Hexagon backend (experimental) : allocating new registry : ndev %zu\n", opt_ndev); if (!opt_arch) { int err = get_hex_arch_ver(CDSP_DOMAIN_ID, &opt_arch); if (err != 0) { GGML_LOG_ERROR("ggml-hex: failed to query HTP version (err %d) defaulting to v73\n", err); opt_arch = 73; } } if (opt_arch < 75) { opt_ndev = 1; GGML_LOG_WARN("ggml-hex: forcing ndev to 1 for SoCs archs lower than v75.\n"); } GGML_LOG_INFO("ggml-hex: Hexagon Arch version v%d\n", opt_arch); // Create devices / sessions for (size_t i = 0; i < opt_ndev; i++) { devices[i].iface = ggml_backend_hexagon_device_i; devices[i].reg = reg; try { devices[i].context = new ggml_hexagon_session(i, &devices[i]); } catch (const std::exception & exc) { GGML_LOG_ERROR("ggml-hex: failed to create device/session %zu\n", i); devices[i].context = nullptr; } } } ggml_hexagon_registry::~ggml_hexagon_registry() { GGML_LOG_INFO("ggml-hex: releasing registry\n"); // Release devices / sessions for (size_t i = 0; i < opt_ndev; i++) { auto sess = static_cast(devices[i].context); delete sess; } } static const char * ggml_backend_hexagon_reg_get_name(ggml_backend_reg_t reg) { return "HTP"; GGML_UNUSED(reg); } static size_t ggml_backend_hexagon_reg_get_device_count(ggml_backend_reg_t reg) { return opt_ndev; GGML_UNUSED(reg); } static ggml_backend_dev_t ggml_backend_hexagon_reg_get_device(ggml_backend_reg_t reg, size_t index) { auto hreg = static_cast(reg->context); if (index >= opt_ndev || !hreg->devices[index].context) { return nullptr; } return &hreg->devices[index]; } static void * ggml_backend_hexagon_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) { ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_hexagon_device_get_extra_buffers_type; return (void *) fct; } return NULL; } static void ggml_hexagon_init(ggml_backend_reg * reg) { // Basic sanity checks to make sure definitions match static_assert((unsigned int) HTP_TYPE_Q4_0 == (unsigned int) GGML_TYPE_Q4_0, "please update hexagon_type to match ggml_type"); static_assert((unsigned int) HTP_TYPE_Q8_0 == (unsigned int) GGML_TYPE_Q8_0, "please update hexagon_type to match ggml_type"); static_assert((unsigned int) HTP_TYPE_MXFP4 == (unsigned int) GGML_TYPE_MXFP4, "please update hexagon_type to match ggml_type"); const char * str_verbose = getenv("GGML_HEXAGON_VERBOSE"); const char * str_hostbuf = getenv("GGML_HEXAGON_HOSTBUF"); opt_verbose = str_verbose ? atoi(str_verbose) : 0; opt_profile = getenv("GGML_HEXAGON_PROFILE") != nullptr; opt_etm = getenv("GGML_HEXAGON_ETM") != nullptr; opt_experimental = getenv("GGML_HEXAGON_EXPERIMENTAL") != nullptr; const char * str_opmask = getenv("GGML_HEXAGON_OPMASK"); if (str_opmask != nullptr) { opt_opmask = strtoul(str_opmask, NULL, 0); } opt_opsync = getenv("GGML_HEXAGON_OPSYNC") != nullptr; const char * str_ndev = getenv("GGML_HEXAGON_NDEV"); if (str_ndev) { opt_ndev = strtoul(str_ndev, NULL, 0); if (opt_ndev > GGML_HEXAGON_MAX_SESSIONS) { opt_ndev = GGML_HEXAGON_MAX_SESSIONS; } } const char * str_nhvx = getenv("GGML_HEXAGON_NHVX"); if (str_nhvx) { opt_nhvx = strtoul(str_nhvx, NULL, 0); } const char * str_arch = getenv("GGML_HEXAGON_ARCH"); if (str_arch) { if (str_arch[0] == 'v') { str_arch++; } opt_arch = strtoul(str_arch, NULL, 0); } opt_hostbuf = str_hostbuf ? atoi(str_hostbuf) : 1; reg->context = new ggml_hexagon_registry(reg); HEX_VERBOSE("ggml-hex: size-of-general-req %zu size-of-general-rsp %zu\n", sizeof(struct htp_general_req), sizeof(struct htp_general_rsp)); } static const struct ggml_backend_reg_i ggml_backend_hexagon_reg_i = { /* .get_name = */ ggml_backend_hexagon_reg_get_name, /* .get_device_count = */ ggml_backend_hexagon_reg_get_device_count, /* .get_device = */ ggml_backend_hexagon_reg_get_device, /* .get_proc_address = */ ggml_backend_hexagon_get_proc_address, }; ggml_backend_reg_t ggml_backend_hexagon_reg(void) { static bool initialized = false; static ggml_backend_reg reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_hexagon_reg_i, /* .context = */ NULL }; { static std::mutex mutex; std::lock_guard lock(mutex); if (!initialized) { ggml_hexagon_init(®); } initialized = true; } return ® } GGML_BACKEND_DL_IMPL(ggml_backend_hexagon_reg) ggml-org-ggml-3678254/src/ggml-hexagon/htp-utils.c000066400000000000000000000406771512524704700216430ustar00rootroot00000000000000 #pragma clang diagnostic ignored "-Wgnu-anonymous-struct" #pragma clang diagnostic ignored "-Wmissing-prototypes" #pragma clang diagnostic ignored "-Wsign-compare" #define GGML_COMMON_IMPL_C #include "ggml-backend-impl.h" #include "ggml-common.h" #include "ggml-hexagon.h" #include "ggml-impl.h" #include "htp-utils.h" #include #include #include #include #include #include #include domain * get_domain(int domain_id) { int i = 0; int size = sizeof(supported_domains) / sizeof(domain); for (i = 0; i < size; i++) { if (supported_domains[i].id == domain_id) { return &supported_domains[i]; } } return NULL; } bool is_valid_domain_id(int domain_id, int compute_only) { int i = 0; int size = sizeof(supported_domains) / sizeof(domain); if (compute_only) { return is_CDSP(domain_id); } for (i = 0; i < size; i++) { if (supported_domains[i].id == domain_id) { return true; } } return false; } int get_domains_info(char * domain_type, int * num_domains, fastrpc_domain ** domains_info) { int nErr = AEE_SUCCESS; int ss_info = 0; if (domain_type != NULL) { if (strcmp(domain_type, "LPASS") == 0) { ss_info = FASTRPC_LPASS; } else if (strcmp(domain_type, "HPASS") == 0) { ss_info = FASTRPC_HPASS; } else { ss_info = FASTRPC_NSP; } } system_req_payload req = { 0 }; req.id = FASTRPC_GET_DOMAINS; req.sys.domains = NULL; fastrpc_domain * domain = NULL; if (ss_info != 0) { req.sys.flags = DOMAINS_LIST_FLAGS_SET_TYPE(req.sys.flags, ss_info); } else { req.sys.flags = 0; } #ifdef _WIN32 nErr = AEE_EUNSUPPORTED; goto bail; #endif if (remote_system_request) { nErr = remote_system_request(&req); if (nErr != AEE_SUCCESS) { GGML_LOG_ERROR("Failure in remote_system_request call: %d.\n", nErr); goto bail; } // Allocate memory for domain-info array req.sys.max_domains = req.sys.num_domains; if ((req.sys.domains = calloc(req.sys.num_domains, sizeof(fastrpc_domain))) == NULL) { nErr = AEE_ENOMEMORY; GGML_LOG_ERROR("Unable to allocate memory for req.sys.domains"); goto bail; } nErr = remote_system_request(&req); if (nErr != AEE_SUCCESS) { GGML_LOG_ERROR("Failure in remote_system_request call: %d.\n", nErr); goto bail; } for (int i = 0; i < req.sys.num_domains; i++) { // Verify that only requested type domains were returned domain = &req.sys.domains[i]; if (domain->type != ss_info && domain_type != NULL) { nErr = -1; GGML_LOG_ERROR("Incorrect data received from remote_system_request.\n"); goto bail; } } *domains_info = req.sys.domains; *num_domains = req.sys.num_domains; } else { nErr = AEE_EUNSUPPORTED; goto bail; } bail: if (nErr && !req.sys.domains) { free(req.sys.domains); } return nErr; } int get_effective_domain_id(char * domain_name, int session_id, int * effec_domain_id) { int err = 0; remote_rpc_effective_domain_id_t sess = { 0 }; sess.domain_name = domain_name; sess.domain_name_len = strlen(domain_name); sess.session_id = session_id; err = remote_session_control(FASTRPC_GET_EFFECTIVE_DOMAIN_ID, &sess, sizeof(sess)); if (err) { GGML_LOG_ERROR("Error 0x%x: failed to get effective domain id for %s, session id %d\n", err, sess.domain_name, session_id); return err; } *effec_domain_id = sess.effective_domain_id; return err; } int get_dsp_support(int * domain) { int nErr = AEE_SUCCESS; *domain = CDSP_DOMAIN_ID; // DSP domain default value is CDSP_DOMAIN_ID if (remote_handle_control) { struct remote_dsp_capability dsp_capability_domain = { CDSP_DOMAIN_ID, DOMAIN_SUPPORT, 0 }; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); goto bail; } if (dsp_capability_domain.capability == 0) { dsp_capability_domain.domain = ADSP_DOMAIN_ID; // Check for ADSP support. dsp_capability_domain.attribute_ID = DOMAIN_SUPPORT; dsp_capability_domain.capability = 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, sizeof(struct remote_dsp_capability)); if (dsp_capability_domain.capability) { *domain = ADSP_DOMAIN_ID; // For targets like Agatti (not having cDSP), domain is ADSP_DOMAIN_ID } } if (nErr != AEE_SUCCESS) { GGML_LOG_ERROR("\nget_dsp_support failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return nErr; } int get_vtcm_info(int domain, uint32_t * capability, uint32_t attr) { int nErr = AEE_SUCCESS; *capability = 0; if (attr == VTCM_PAGE || attr == VTCM_COUNT) { } else { nErr = AEE_EBADPARM; GGML_LOG_ERROR("Unsupported attr. Only VTCM_PAGE and VTCM_COUNT supported\n"); goto bail; } if (remote_handle_control) { if (domain == ADSP_DOMAIN_ID || domain == CDSP_DOMAIN_ID) { /* * Query the DSP for VTCM information * Since the ADSP does not have a dedicated VTCM, we expect the output to be 0 */ struct remote_dsp_capability dsp_capability_vtcm_dsp; dsp_capability_vtcm_dsp.domain = (uint32_t) domain; dsp_capability_vtcm_dsp.attribute_ID = attr; dsp_capability_vtcm_dsp.capability = (uint32_t) 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_vtcm_dsp, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); GGML_LOG_ERROR("Running the usecase without checking the capability\n"); nErr = AEE_SUCCESS; goto bail; } else if (nErr == AEE_SUCCESS) { *capability = dsp_capability_vtcm_dsp.capability; } else { GGML_LOG_ERROR("\nget_vtcm_info failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTED; GGML_LOG_ERROR("Unsupported domain %d\n", domain); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return nErr; } bool is_unsignedpd_supported(int domain_id) { int nErr = AEE_SUCCESS; if (remote_handle_control) { struct remote_dsp_capability dsp_capability_domain = { domain_id, UNSIGNED_PD_SUPPORT, 0 }; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device. Falling back to signed pd.\n"); return false; } if (nErr) { GGML_LOG_ERROR("\nERROR 0x%x: FastRPC Capability API failed. Falling back to signed pd.", nErr); return false; } if (dsp_capability_domain.capability == 1) { return true; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device. Falling back to signed pd.\n"); return false; } return false; } bool get_unsignedpd_support(void) { return is_unsignedpd_supported(CDSP_DOMAIN_ID); } bool is_async_fastrpc_supported(int domain) { int nErr = AEE_SUCCESS; if (remote_handle_control) { if (domain == CDSP_DOMAIN_ID) { /* * Query the DSP for ASYNC_FASTRPC_SUPPORT information * Async fastrpc is supported only on CDSP */ struct remote_dsp_capability dsp_capability_async_support; dsp_capability_async_support.domain = (uint32_t) domain; dsp_capability_async_support.attribute_ID = ASYNC_FASTRPC_SUPPORT; dsp_capability_async_support.capability = (uint32_t) 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_async_support, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); GGML_LOG_ERROR("Running the usecase without checking the capability\n"); nErr = AEE_SUCCESS; goto bail; } else if (dsp_capability_async_support.capability == 1) { return true; } if (nErr != AEE_SUCCESS) { GGML_LOG_ERROR("\nis_async_fastrpc_supported failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTED; GGML_LOG_ERROR("Async fastrpc is not supported on domain %d\n", domain); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return false; } bool is_status_notification_supported(int domain) { int nErr = AEE_SUCCESS; if (remote_handle_control) { /* * Query the DSP for STATUS_NOTIFICATION_SUPPORT information * DSP User PD status notification Support */ struct remote_dsp_capability dsp_capability_status_notification_support; dsp_capability_status_notification_support.domain = (uint32_t) domain; dsp_capability_status_notification_support.attribute_ID = STATUS_NOTIFICATION_SUPPORT; dsp_capability_status_notification_support.capability = (uint32_t) 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_status_notification_support, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); GGML_LOG_ERROR("Running the usecase without checking the capability\n"); nErr = AEE_SUCCESS; goto bail; } else if (dsp_capability_status_notification_support.capability == 1) { return true; } if (nErr != AEE_SUCCESS) { GGML_LOG_ERROR("\nis_status_notification_supported failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return false; } int get_hmx_support_info(int domain, uint32_t * capability, uint32_t attr) { int nErr = AEE_SUCCESS; *capability = 0; if (attr != HMX_SUPPORT_SPATIAL && attr != HMX_SUPPORT_DEPTH) { nErr = AEE_EBADPARM; GGML_LOG_ERROR("Unsupported attr. Only HMX_SUPPORT_SPATIAL and HMX_SUPPORT_DEPTH supported\n"); goto bail; } if (remote_handle_control) { if (domain == CDSP_DOMAIN_ID) { /* * Query the DSP for HMX SUPPORT information * HMX is supported on CDSP only */ struct remote_dsp_capability dsp_capability_hmx_dsp; dsp_capability_hmx_dsp.domain = (uint32_t) domain; dsp_capability_hmx_dsp.attribute_ID = attr; dsp_capability_hmx_dsp.capability = (uint32_t) 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_hmx_dsp, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); GGML_LOG_ERROR("Running the usecase without checking the capability\n"); nErr = AEE_SUCCESS; goto bail; } else if (nErr == AEE_SUCCESS) { *capability = dsp_capability_hmx_dsp.capability; } else { GGML_LOG_ERROR("\nget_hmx_support_info failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTED; GGML_LOG_ERROR("HMX support is not there for domain %d\n", domain); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return nErr; } int get_hex_arch_ver(int domain, int * arch) { if (!remote_handle_control) { GGML_LOG_ERROR("ggml-hex: remote_handle_control is not supported on this device\n"); return AEE_EUNSUPPORTEDAPI; } struct remote_dsp_capability arch_ver; arch_ver.domain = (uint32_t) domain; arch_ver.attribute_ID = ARCH_VER; arch_ver.capability = (uint32_t) 0; int err = remote_handle_control(DSPRPC_GET_DSP_INFO, &arch_ver, sizeof(arch_ver)); if ((err & 0xff) == (AEE_EUNSUPPORTEDAPI & 0xff)) { GGML_LOG_ERROR("ggml-hex: FastRPC capability API is not supported on this device\n"); return AEE_EUNSUPPORTEDAPI; } if (err != AEE_SUCCESS) { GGML_LOG_ERROR("ggml-hex: FastRPC capability query failed (err %d)\n", err); return err; } switch (arch_ver.capability & 0xff) { case 0x68: *arch = 68; return 0; case 0x69: *arch = 69; return 0; case 0x73: *arch = 73; return 0; case 0x75: *arch = 75; return 0; case 0x79: *arch = 79; return 0; case 0x81: *arch = 81; return 0; } return -1; } int get_hvx_support_info(int domain, uint32_t * capability, uint32_t attr) { int nErr = AEE_SUCCESS; *capability = 0; if (remote_handle_control) { if (domain == CDSP_DOMAIN_ID) { /* * Query the DSP for HVX SUPPORT information * HVX is supported on CDSP only */ struct remote_dsp_capability dsp_capability_hvx_dsp; dsp_capability_hvx_dsp.domain = (uint32_t) domain; dsp_capability_hvx_dsp.attribute_ID = attr; dsp_capability_hvx_dsp.capability = (uint32_t) 0; nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_hvx_dsp, sizeof(struct remote_dsp_capability)); if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); GGML_LOG_ERROR("Running the usecase without checking the capability\n"); nErr = AEE_SUCCESS; goto bail; } else if (nErr == AEE_SUCCESS) { *capability = dsp_capability_hvx_dsp.capability; } else { GGML_LOG_ERROR("\nget_hvx_support_info failed with Error 0x%x\n", nErr); goto bail; } } else { nErr = AEE_EUNSUPPORTED; GGML_LOG_ERROR("HVX support is not available on domain %d\n", domain); goto bail; } } else { nErr = AEE_EUNSUPPORTEDAPI; GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); } bail: return nErr; } ggml-org-ggml-3678254/src/ggml-hexagon/htp-utils.h000066400000000000000000000153071512524704700216400ustar00rootroot00000000000000#ifndef HTP_UTILS_H #define HTP_UTILS_H #ifdef __cplusplus extern "C" { #endif #include #include #include #include #include /* Offset to differentiate HLOS and Hexagon error codes. Stores the value of AEE_EOFFSET for Hexagon. */ #ifndef DSP_OFFSET # define DSP_OFFSET 0x80000400 #endif /* Errno for connection reset by peer. */ #ifndef ECONNRESET # ifdef __hexagon__ # define ECONNRESET 104 # endif #endif /* Abstraction of different OS specific sleep APIs. SLEEP accepts input in seconds. */ #ifndef SLEEP # ifdef __hexagon__ # define SLEEP(x) \ { /* Do nothing for simulator. */ \ } # else # ifdef _WINDOWS # define SLEEP(x) Sleep(1000 * x) /* Sleep accepts input in milliseconds. */ # else # define SLEEP(x) sleep(x) /* sleep accepts input in seconds. */ # endif # endif #endif /* Include windows specific header files. */ #ifdef _WINDOWS # include # include # define _CRT_SECURE_NO_WARNINGS 1 # define _WINSOCK_DEPRECATED_NO_WARNINGS 1 /* Including this file for custom implementation of getopt function. */ # include "getopt_custom.h" #endif /* Includes and defines for all HLOS except windows */ #if !defined(__hexagon__) && !defined(_WINDOWS) # include "unistd.h" # include #endif /* Includes and defines for Hexagon and all HLOS except Windows. */ #if !defined(_WINDOWS) /* Weak reference to remote symbol for compilation. */ # pragma weak remote_session_control # pragma weak remote_handle_control # pragma weak remote_handle64_control # pragma weak fastrpc_mmap # pragma weak fastrpc_munmap # pragma weak rpcmem_alloc2 #endif #if !defined(_WINDOWS) # pragma weak remote_system_request #endif /** * Wrapper for FastRPC Capability API: query DSP support. * * @param[out] domain pointer to supported domain. * @return 0 if query is successful. * non-zero if error, return value points to the error. */ int get_dsp_support(int * domain); /** * Wrapper for FastRPC Capability API: query VTCM information. * * @param[in] domain value of domain in the queried. * @param[out] capability capability value of the attribute queried. * @param[in] attr value of the attribute to the queried. * @return 0 if query is successful. * non-zero if error, return value points to the error. */ int get_vtcm_info(int domain, uint32_t * capability, uint32_t attr); /** * Wrapper for FastRPC Capability API: query unsigned pd support on CDSP domain. * * @return true if unsigned pd is supported. * false if unsigned pd is not supported, capability query failed. */ bool get_unsignedpd_support(void); /** * Wrapper for FastRPC Capability API: query unsigned pd support. * * @param[in] domain value of domain in the queried. * @return true if unsigned pd is supported. * false if unsigned pd is not supported, capability query failed. */ bool is_unsignedpd_supported(int domain_id); /** * is_valid_domain_id API: query a domain id is valid. * * @param[in] domain value of domain in the queried. * @param[in] compute_only value of domain is only compared with CDSP domains supported by the target when enabled. * @return true if value of domain is valid. * false if value of domain is not valid. */ bool is_valid_domain_id(int domain_id, int compute_only); /** * get_domain API: get domain struct from domain value. * * @param[in] domain value of a domain * @return Returns domain struct of the domain if it is supported or else * returns NULL. * */ domain * get_domain(int domain_id); /** * get_domains_info API: get information for all the domains available on the device * * @param[in] domain_type pointer to domain type * @param[in] num_domains pointer to number of domains * @param[in] domains_info pointer to save discovered domains information. * @return 0 if query is successful. * non-zero if error, return value points to the error. * * It is user's responsibility to free the memory used to store the domains info whose address is present in domains_info before closing the application. * */ int get_domains_info(char * domain_type, int * num_domains, fastrpc_domain ** domains_info); /** * get_effective_domain_id API: get effective domain id for given session id * * @param[in] domain_name pointer to domain name * @param[in] session_id * @param[in] effec_domain_id pointer to save obtained effective domain id. * @return 0 if query is successful. * non-zero if error, return value points to the error. * */ int get_effective_domain_id(char * domain_name, int session_id, int * effec_domain_id); /** * is_async_fastrpc_supported API: query a domain id has async fastrpc supported or not * * @param[in] domain_id value of a domain * @return Returns true or false stating support of Async FastRPC * */ bool is_async_fastrpc_supported(int domain_id); /** * is_status_notification_supported API: query the DSP for STATUS_NOTIFICATION_SUPPORT information * * @param[in] domain_id value of a domain * @return Returns true or false stating status notification support information * */ bool is_status_notification_supported(int domain_id); /** * get_hmx_support_info API: query the DSP for HMX SUPPORT information * * @param[in] domain_id value of a domain * @param[out] capability capability value of the attribute queried. * @param[in] attr value of the attribute to the queried. * @return 0 if query is successful. * non-zero if error, return value points to the error. * */ int get_hmx_support_info(int domain, uint32_t * capability, uint32_t attr); /** * get_hex_arch_ver API: query the Hexagon processor architecture version information * * @param[in] domain_id value of a domain * @param[out] Arch version (73, 75, ...) * @return 0 if query is successful. * non-zero if error, return value points to the error. * */ int get_hex_arch_ver(int domain, int * arch); /** * get_hvx_support_info API: query the DSP for HVX SUPPORT information * * @param[in] domain_id value of a domain * @param[out] capability capability value of the attribute queried. * @param[in] attr value of the attribute to the queried. * @return 0 if query is successful. * non-zero if error, return value points to the error. * */ int get_hvx_support_info(int domain, uint32_t * capability, uint32_t attr); #ifdef __cplusplus } #endif #endif //DSP_CAPABILITIES_UTILS_H ggml-org-ggml-3678254/src/ggml-hexagon/htp/000077500000000000000000000000001512524704700203235ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-hexagon/htp/CMakeLists.txt000066400000000000000000000017071512524704700230700ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.22.2) project(ggml-htp C CXX ASM) include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_fun.cmake) include_directories( ${HEXAGON_SDK_ROOT}/incs ${HEXAGON_SDK_ROOT}/incs/stddef ${CMAKE_CURRENT_SOURCE_DIR}/../.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) set(HTP_LIB ggml-htp-${DSP_VERSION}) add_library(${HTP_LIB} SHARED main.c htp_iface_skel.c worker-pool.c htp-dma.c hvx-sigmoid.c hvx-inverse.c hvx-exp.c hvx-utils.c matmul-ops.c binary-ops.c unary-ops.c softmax-ops.c act-ops.c rope-ops.c ) target_compile_definitions(${HTP_LIB} PRIVATE $,HTP_DEBUG=1,NDEBUG=1> FP32_QUANTIZE_GROUP_SIZE=${GGML_HEXAGON_FP32_QUANTIZE_GROUP_SIZE}) build_idl(htp_iface.idl ${HTP_LIB}) set_target_properties(${HTP_LIB} PROPERTIES EXPORT_COMPILE_COMMANDS ON) install(TARGETS ${HTP_LIB}) ggml-org-ggml-3678254/src/ggml-hexagon/htp/act-ops.c000066400000000000000000000576171512524704700220550ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" #define htp_act_preamble3 \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t ne10 = src1->ne[0]; \ const uint32_t ne11 = src1->ne[1]; \ const uint32_t ne12 = src1->ne[2]; \ const uint32_t ne13 = src1->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t nb10 = src1->nb[0]; \ const uint32_t nb11 = src1->nb[1]; \ const uint32_t nb12 = src1->nb[2]; \ const uint32_t nb13 = src1->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; #define htp_act_preamble2 \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; static void glu_swiglu_fp32_per_thread(const struct htp_tensor * src0, const struct htp_tensor * src1, struct htp_tensor * dst, const int32_t * op_params, struct htp_spad * src0_spad, struct htp_spad * src1_spad, struct htp_spad * dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread) { htp_act_preamble3; size_t src0_row_size = nb01; size_t src1_row_size = nb11; size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); int is_aligned = 1; if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { is_aligned = 0; FARF(HIGH, "swiglu-f32: unaligned addresses in elementwise op, possibly slower execution\n"); } const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; const bool src1_valid = src1->ne[0]; const int nc = (src1_valid) ? ne00 : ne00 / 2; if (!src1_valid) { const int32_t swapped = op_params[1]; data_src1 = data_src0; src1_row_size = src0_row_size; const size_t nc_in_bytes = nc * SIZEOF_FP32; data_src0 += swapped ? nc_in_bytes : 0; data_src1 += swapped ? 0 : nc_in_bytes; } uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); uint8_t * restrict src1_spad_data = src1_spad->data + (ith * src1_row_size); uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); const bool opt_path = ((1 == is_aligned) && !(nb01 & (VLEN - 1))); for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); const float * restrict src1 = (float *) (data_src1 + (ir * src1_row_size)); float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); if (ir + 1 < src0_end_row) { htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); } if (opt_path) { hvx_fast_sigmoid_f32((const uint8_t *) src0, (uint8_t *) src0_spad_data, nc); hvx_mul_mul_f32_opt((const uint8_t *) src0, (const uint8_t *) src0_spad_data, (const uint8_t *) src1, (uint8_t *) dst, nc); } else { hvx_exp_f32((const uint8_t *) src0, src0_spad_data, nc, true); hvx_add_scalar_f32(src0_spad_data, 1.0, src1_spad_data, nc); hvx_inverse_f32(src1_spad_data, src0_spad_data, nc); hvx_mul_f32((const uint8_t *) src0, src0_spad_data, dst_spad_data, nc); hvx_mul_f32(dst_spad_data, (const uint8_t *) src1, (uint8_t *) dst, nc); } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "swiglu-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void glu_swiglu_oai_fp32_per_thread(const struct htp_tensor * src0, const struct htp_tensor * src1, struct htp_tensor * dst, const int32_t * op_params, struct htp_spad * src0_spad, struct htp_spad * src1_spad, struct htp_spad * dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread) { htp_act_preamble3; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const size_t src0_row_size = nb01; const size_t src1_row_size = nb11; const size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { FARF(HIGH, "act-f32: unaligned addresses in activations op, possibly slower execution\n"); } const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; bool src1_valid = src1->ne[0]; if (!src1_valid) { data_src1 = data_src0; } uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); uint8_t * restrict src1_spad_data = src1_spad->data + (ith * src1_row_size); uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); const int32_t swapped = op_params[1]; const float alpha = ((const float *) (op_params))[2]; const float limit = ((const float *) (op_params))[3]; const int nc = (src1_valid) ? ne00 : ne00 / 2; for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); const float * restrict src1 = (float *) (data_src1 + (ir * src1_row_size)); float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); if (ir + 1 < src0_end_row) { htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); } if (!src1) { src0 += swapped ? nc : 0; src1 += swapped ? 0 : nc; } // x (src0_spad_data) = std::min(src0_p[k], limit); hvx_min_scalar_f32((const uint8_t *) src0, limit, src0_spad_data, nc); // y1 (src1_spad_data) = std::clamp(src1_p[k], -limit, limit); hvx_clamp_scalar_f32((const uint8_t *) src1, -limit, limit, src1_spad_data, nc); // y (src1_spad_data) = y1 + 1.f hvx_add_scalar_f32(src1_spad_data, 1.0, src1_spad_data, nc); // x1 (dst_spad_data) = alpha * (x) hvx_mul_scalar_f32(src0_spad_data, alpha, dst_spad_data, nc); // x2 (dst_spad_data) = expf(-x1) hvx_exp_f32(dst_spad_data, dst_spad_data, nc, true); // x3 (dst_spad_data) = x2 + 1.f hvx_add_scalar_f32(dst_spad_data, 1.0, dst_spad_data, nc); // x4 (dst_spad_data) = 1 / x3 hvx_inverse_f32(dst_spad_data, dst_spad_data, nc); // out_glu(dst_spad_data) = x * x4 hvx_mul_f32(src0_spad_data, dst_spad_data, dst_spad_data, nc); // out = out_glu * (y + 1.f); hvx_mul_f32(dst_spad_data, src1_spad_data, (uint8_t *) dst, nc); } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "swiglu-f32 %d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void unary_gelu_fp32_per_thread(const struct htp_tensor * src0, struct htp_tensor * dst, const int32_t * op_params, struct htp_spad * src0_spad, struct htp_spad * dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_act_preamble2; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const size_t src0_row_size = nb01; const size_t dst_row_size = nb1; const size_t src0_row_size_aligned = htp_round_up(src0_row_size, VLEN); const size_t dst_row_size_aligned = htp_round_up(dst_row_size, VLEN); const uint32_t src0_nrows = ne01 * ne02 * ne03; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } const uint8_t * data_src0 = (const uint8_t *) src0->data; uint8_t * data_dst = (uint8_t *) dst->data; uint8_t * src0_spad_data = src0_spad->data + (ith * src0_spad->size_per_thread); uint8_t * dst_spad_data = dst_spad->data + (ith * dst_spad->size_per_thread); // While given src0_spad->size_per_thread, divide it to two ping-pong buffer for src0 size_t src0_spad_half_size = src0_spad->size_per_thread / 2; size_t dst_spad_half_size = dst_spad->size_per_thread / 2; // In gelu = x*sigmoid(x*1.702) const int BLOCK = src0_spad_half_size / src0_row_size_aligned; // How many rows can we process in one block if (BLOCK == 0) { FARF(ERROR, "gelu-f32 : current VTCM reservation %zu is too small for even 1 row per thread, needed at least %zu\n", src0_spad->size_per_thread, src0_row_size_aligned); return; } // See discussion: https://github.com/ggml-org/llama.cpp/pull/18151#issuecomment-3678235379 for (uint32_t ir = src0_start_row, spad_idx = 0; ir < src0_end_row && spad_idx < 2; ir += BLOCK, spad_idx++) { const uint32_t block_size = MIN(BLOCK, src0_end_row - ir); // Dummy DMA transation for sequencing (interleaving dst,src,dst,...) dma_queue_push_vtcm_to_ddr(dma_queue, dma_make_ptr(data_dst, dst_spad_data + (spad_idx * dst_spad_half_size)), dst_row_size, dst_row_size_aligned, 0); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(src0_spad_data + (spad_idx * src0_spad_half_size), data_src0 + (ir * src0_row_size)), src0_row_size_aligned, src0_row_size, block_size); } for (uint32_t ir = src0_start_row; ir < src0_end_row; ir += BLOCK) { const uint32_t block_size = MIN(BLOCK, src0_end_row - ir); float* dst_spad = (float *) dma_queue_pop(dma_queue).src; float* src0_spad = (float *) dma_queue_pop(dma_queue).dst; for (uint32_t ib = 0; ib < block_size; ib++) { const float* src0_spad_ptr = src0_spad + ib * (src0_row_size_aligned / sizeof(float)); float* dst_spad_ptr = dst_spad + ib * (dst_row_size_aligned / sizeof(float)); // gelu = x * sigmoid(1.702 * x) // current implementation hvx_mul_scalar_f32((const uint8_t *) src0_spad_ptr, (float) 1.702, (uint8_t *) dst_spad_ptr, ne0); hvx_fast_sigmoid_f32((const uint8_t *) dst_spad_ptr, (uint8_t *) dst_spad_ptr, ne0); hvx_mul_f32_opt((const uint8_t *) src0_spad_ptr, (uint8_t *) dst_spad_ptr, (uint8_t *) dst_spad_ptr, ne0); } dma_queue_push_vtcm_to_ddr(dma_queue, dma_make_ptr(data_dst + (ir * dst_row_size), dst_spad), dst_row_size, dst_row_size_aligned, block_size); // prefetch N+2 loop iteration if any const uint32_t pref_block = (ir + BLOCK * 2); if (pref_block < src0_end_row) { const uint32_t pref_block_size = MIN(BLOCK, src0_end_row - pref_block); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(src0_spad, data_src0 + (pref_block * src0_row_size)), src0_row_size_aligned, src0_row_size, pref_block_size); } } dma_queue_flush(dma_queue); t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "gelu-f32 %d/%d: %ux%ux%ux%u (%u:%u) -> %ux%ux%ux%u usec %u\n", ith, nth, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void unary_gelu_fp32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; unary_gelu_fp32_per_thread(&octx->src0, &octx->dst, octx->op_params, &octx->src0_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void unary_silu_fp32_per_thread(const struct htp_tensor * src0, struct htp_tensor * dst, const int32_t * op_params, struct htp_spad * src0_spad, struct htp_spad * dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread) { htp_act_preamble2; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const size_t src0_row_size = nb01; const size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } int is_aligned = 1; int opt_path = 0; if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { is_aligned = 0; FARF(HIGH, "silu-f32: unaligned addresses in elementwise op, possibly slower execution\n"); } if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { opt_path = 1; } const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); if (ir + 1 < src0_end_row) { htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); } if (1 == opt_path) { hvx_fast_sigmoid_f32((const uint8_t *) src0, (uint8_t *) src0_spad_data, ne0); hvx_mul_f32_opt((const uint8_t *) src0, src0_spad_data, (uint8_t *) dst, ne0); } else { hvx_exp_f32((const uint8_t *) src0, src0_spad_data, ne0, true); hvx_add_scalar_f32(src0_spad_data, 1.0, dst_spad_data, ne0); hvx_inverse_f32(dst_spad_data, src0_spad_data, ne0); hvx_mul_f32((const uint8_t *) src0, src0_spad_data, (uint8_t *) dst, ne0); } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "silu-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void unary_silu_fp32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; unary_silu_fp32_per_thread(&octx->src0, &octx->dst, octx->op_params, &octx->src0_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread); } static void glu_swiglu_fp32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; glu_swiglu_fp32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->op_params, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread); } static void glu_swiglu_oai_fp32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; glu_swiglu_oai_fp32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->op_params, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread); } static int execute_op_activations_fp32(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; if (((src0->ne[0] * SIZEOF_FP32) != src0->nb[1]) || ((dst->ne[0] * SIZEOF_FP32) != dst->nb[1])) { FARF(ERROR, "Non-contiguous tensors are not supported at this time \n"); return HTP_STATUS_NO_SUPPORT; } worker_callback_t act_op_func; const char * op_type = NULL; switch (octx->op) { case HTP_OP_UNARY_SILU: act_op_func = unary_silu_fp32; op_type = "silu-f32"; break; case HTP_OP_GLU_SWIGLU: act_op_func = glu_swiglu_fp32; op_type = "swiglu-f32"; break; case HTP_OP_GLU_SWIGLU_OAI: act_op_func = glu_swiglu_oai_fp32; op_type = "swiglu-oai-f32"; break; case HTP_OP_UNARY_GELU: act_op_func = unary_gelu_fp32; op_type = "gelu-f32"; break; default: FARF(ERROR, "Unsupported activations Op %u\n", octx->op); return HTP_STATUS_NO_SUPPORT; } const uint32_t n_threads = octx->n_threads; const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; size_t src0_row_size = src0->nb[1]; size_t src1_row_size = src1->nb[1]; // zero bytes if src1 is not used size_t dst_row_size = dst->nb[1]; const bool src1_valid = src1->ne[0]; if (!src1_valid) { src1_row_size = src0_row_size; } const size_t src0_row_size_aligned = htp_round_up(src0_row_size, VLEN); const size_t src1_row_size_aligned = htp_round_up(src1_row_size, VLEN); const size_t dst_row_size_aligned = htp_round_up(dst_row_size, VLEN); // VTCM scratchpads for all tensors // N rows per thread, padded to HVX vector size size_t spad_size_per_row = (src0_row_size_aligned + src1_row_size_aligned) + dst_row_size_aligned; size_t vtcm_row_per_thread = (octx->ctx->vtcm_size)/ (n_threads* spad_size_per_row); // Make sure the reserved vtcm size is sufficient if(vtcm_row_per_thread ==0){ FARF(ERROR, "act-%s : current VTCM reservation %zu is too small for even 1 row per thread, needed at least %zu\n", op_type, octx->ctx->vtcm_size, spad_size_per_row * n_threads); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.size_per_thread = src0_row_size_aligned * vtcm_row_per_thread; octx->src1_spad.size_per_thread = src1_row_size_aligned * vtcm_row_per_thread; octx->dst_spad.size_per_thread = dst_row_size_aligned * vtcm_row_per_thread; octx->dst_spad.size = n_threads* octx->dst_spad.size_per_thread; octx->src0_spad.size = n_threads* octx->src0_spad.size_per_thread; octx->src1_spad.size = n_threads* octx->src1_spad.size_per_thread; octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; if (src1->ne[0]) { FARF(HIGH, "%s: %ux%ux%ux%u x %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } else { FARF(HIGH, "%s: %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { uint32_t n_jobs = MIN(n_threads, src0_nrows); octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; worker_pool_run_func(octx->ctx->worker_pool, act_op_func, octx, n_jobs); } return err; } int op_activations(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; switch (octx->src0.type) { case HTP_TYPE_F32: err = execute_op_activations_fp32(octx); break; default: err = HTP_STATUS_NO_SUPPORT; break; } return err; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/binary-ops.c000066400000000000000000000333161512524704700225600ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" typedef void (*hvx_elemwise_f32_func)(const uint8_t * src0, const uint8_t * src1, uint8_t * data_dst, const int num_elems); static hvx_elemwise_f32_func func_table_HVX[] = { hvx_mul_f32, hvx_add_f32, hvx_sub_f32 }; static hvx_elemwise_f32_func func_table_HVX_opt[] = { hvx_mul_f32_opt, hvx_add_f32_opt, hvx_sub_f32_opt }; #define htp_binary_preamble \ const struct htp_tensor * src0 = &octx->src0; \ const struct htp_tensor * src1 = &octx->src1; \ const struct htp_tensor * src2 = &octx->src2; \ struct htp_tensor * dst = &octx->dst; \ \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t ne10 = src1->ne[0]; \ const uint32_t ne11 = src1->ne[1]; \ const uint32_t ne12 = src1->ne[2]; \ const uint32_t ne13 = src1->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t nb10 = src1->nb[0]; \ const uint32_t nb11 = src1->nb[1]; \ const uint32_t nb12 = src1->nb[2]; \ const uint32_t nb13 = src1->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; \ \ const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; static void binary_job_f32_per_thread(struct htp_ops_context * octx, uint8_t * spad_data, uint32_t nth, uint32_t ith, enum htp_op op) { htp_binary_preamble; const size_t src0_row_size = nb01; const size_t src1_row_size = nb11; const size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src1_nrows = ne11 * ne12 * ne13; // src1 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); int is_aligned = 1; int opt_path = 0; if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || (0 == htp_is_aligned((void *) dst->data, VLEN))) { FARF(HIGH, "binary-f32: unaligned addresses in elementwise op, possibly slower execution\n"); is_aligned = 0; } if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { opt_path = 1; } hvx_elemwise_f32_func func_HVX = (1 == opt_path) ? func_table_HVX_opt[op] : func_table_HVX[op]; uint8_t * restrict spad_data_th = spad_data + (ith * src0_row_size); const uint8_t * restrict src0_ptr = (const uint8_t *) src0->data + (src0_start_row * src0_row_size); uint8_t * restrict dst_ptr = (uint8_t *) dst->data + (src0_start_row * dst_row_size); const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; const uint32_t ne02_ne01 = ne02 * ne01; for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { const uint32_t i03 = fastdiv(ir, &octx->src0_div21); const uint32_t i02 = fastdiv(ir - i03 * ne02_ne01, &octx->src0_div1); const uint32_t i01 = (ir - i03 * ne02_ne01 - i02 * ne01); const uint32_t i13 = fastmodulo(i03, ne13, &octx->src1_div3); const uint32_t i12 = fastmodulo(i02, ne12, &octx->src1_div2); const uint32_t i11 = fastmodulo(i01, ne11, &octx->src1_div1); const uint8_t * restrict src1_ptr = data_src1 + i13 * nb13 + i12 * nb12 + i11 * src1_row_size; if (ir + 1 < src0_end_row) { htp_l2fetch(src0_ptr + ne00, 1, src0_row_size, src0_row_size); if (src1_row_size == src0_row_size) { htp_l2fetch(src1_ptr, 1, src1_row_size, src1_row_size); } } const uint32_t nr0 = ne00 / ne10; if (nr0 > 1) { if ((1 == is_aligned) && (nr0 == ne00)) { hvx_bcast_fp32_a(spad_data_th, *(float *) src1_ptr, nr0); } else { for (uint32_t r = 0; r < nr0; r++) { memcpy(spad_data_th + r * nb11, (const uint8_t *) src1_ptr, nb11); } } func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) spad_data_th, (uint8_t *) dst_ptr, ne00); } else { func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) src1_ptr, (uint8_t *) dst_ptr, ne00); } src0_ptr += src0_row_size; dst_ptr += dst_row_size; } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "binary-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void binary_add_id_job_f32_per_thread(struct htp_ops_context * octx, uint8_t * spad_data, uint32_t nth, uint32_t ith, hvx_elemwise_f32_func func_HVX) { htp_binary_preamble; const size_t src0_row_size = nb01; const size_t src1_row_size = nb11; const size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || (0 == htp_is_aligned((void *) dst->data, VLEN))) { FARF(HIGH, "add-id-f32: unaligned addresses, possibly slower execution\n"); } const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; const uint32_t ne02_ne01 = ne02 * ne01; for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { // src0 indices const uint32_t i03 = fastdiv(ir, &octx->src0_div21); const uint32_t i02 = fastdiv(ir - i03 * ne02_ne01, &octx->src0_div1); const uint32_t i01 = (ir - i03 * ne02_ne01 - i02 * ne01); // src1 indices const int i11 = *(int32_t *) ((char *) src2->data + i01 * src2->nb[0] + i02 * src2->nb[1]); assert(i11 >= 0 && i11 < ne11); float * restrict dst_ptr = (float *) (data_dst + i03 * nb3 + i02 * nb2 + i01 * nb1); const float * restrict src0_ptr = (const float *) (data_src0 + i03 * nb03 + i02 * nb02 + i01 * nb01); const float * restrict src1_ptr = (const float *) (data_src1 + 0 + 0 + i11 * nb11); if (ir + 1 < src0_end_row) { htp_l2fetch(src0_ptr + ne00, 1, src0_row_size, src0_row_size); if (src1_row_size == src0_row_size) { htp_l2fetch(src1_ptr + ne10, 1, src1_row_size, src1_row_size); } } const uint32_t nr0 = ne00 / ne10; if (nr0 > 1) { for (uint32_t r = 0; r < nr0; r++) { memcpy(spad_data + r * nb10, (const uint8_t *) src1_ptr, nb10); } func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) spad_data, (uint8_t *) dst_ptr, ne00); } else { func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) src1_ptr, (uint8_t *) dst_ptr, ne00); } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "add-id-f32 %d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void binary_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; switch (octx->op) { case HTP_OP_MUL: case HTP_OP_ADD: case HTP_OP_SUB: binary_job_f32_per_thread(octx, octx->src1_spad.data, n, i, octx->op); break; case HTP_OP_ADD_ID: binary_add_id_job_f32_per_thread(octx, octx->src0_spad.data, n, i, hvx_add_f32); break; default: FARF(ERROR, "Unknown Binary Op %u", octx->op); break; } } static int execute_op_binary_f32(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; worker_callback_t binary_op_func; const char * op_type = NULL; switch (octx->op) { case HTP_OP_MUL: binary_op_func = binary_job_dispatcher_f32; op_type = "mul-f32"; break; case HTP_OP_ADD: binary_op_func = binary_job_dispatcher_f32; op_type = "add-f32"; break; case HTP_OP_SUB: binary_op_func = binary_job_dispatcher_f32; op_type = "sub-f32"; break; case HTP_OP_ADD_ID: binary_op_func = binary_job_dispatcher_f32; op_type = "add-id-f32"; break; default: FARF(ERROR, "Unsupported binary-Op %u\n", octx->op); return HTP_STATUS_NO_SUPPORT; } const int n_threads = octx->n_threads; const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; const size_t src0_row_size = src0->nb[1]; const size_t src1_row_size = src1->nb[1]; const size_t dst_row_size = dst->nb[1]; // VTCM scratchpads for all tensors octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; FARF(HIGH, "%s: (%ux%ux%ux%u) * (%ux%ux%ux%u) -> (%ux%ux%ux%u) : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "binary-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { uint32_t n_jobs = MIN(n_threads, src0_nrows); octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; octx->src0_div21 = init_fastdiv_values(src0->ne[2] * src0->ne[1]); octx->src0_div3 = init_fastdiv_values(src0->ne[3]); octx->src0_div2 = init_fastdiv_values(src0->ne[2]); octx->src0_div1 = init_fastdiv_values(src0->ne[1]); octx->src1_div21 = init_fastdiv_values(src1->ne[2] * src1->ne[1]); octx->src1_div3 = init_fastdiv_values(src1->ne[3]); octx->src1_div2 = init_fastdiv_values(src1->ne[2]); octx->src1_div1 = init_fastdiv_values(src1->ne[1]); worker_pool_run_func(octx->ctx->worker_pool, binary_op_func, octx, n_jobs); } return err; } int op_binary(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; switch (octx->src0.type) { case HTP_TYPE_F32: err = execute_op_binary_f32(octx); break; default: err = HTP_STATUS_NO_SUPPORT; break; } return err; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/cmake-toolchain.cmake000066400000000000000000000127051512524704700243700ustar00rootroot00000000000000if (HEXAGON_TOOLCHAIN_INCLUDED) return() endif() set(HEXAGON_TOOLCHAIN_INCLUDED true) #Cross Compiling for Hexagon set(HEXAGON TRUE) set(CMAKE_SYSTEM_NAME QURT) set(CMAKE_SYSTEM_PROCESSOR Hexagon) set(CMAKE_SYSTEM_VERSION "1") #${HEXAGON_PLATFORM_LEVEL}) set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) set(CUSTOM_RUNELF_PATH "") #To fix backward compatibility with EAI addon. if (NOT HEXAGON_SDK_ROOT) set(HEXAGON_SDK_ROOT $ENV{HEXAGON_SDK_ROOT}) endif() if (NOT HEXAGON_TOOLS_ROOT) if (DEFINED ENV{HEXAGON_TOOLS_ROOT}) set(HEXAGON_TOOLS_ROOT $ENV{HEXAGON_TOOLS_ROOT}) endif() if(NOT HEXAGON_TOOLS_ROOT) set(HEXAGON_TOOLS_ROOT $ENV{DEFAULT_HEXAGON_TOOLS_ROOT}) endif() endif() file(TO_CMAKE_PATH "${HEXAGON_TOOLS_ROOT}" HEXAGON_TOOLS_ROOT) file(TO_CMAKE_PATH "${HEXAGON_SDK_ROOT}" HEXAGON_SDK_ROOT) #Get the Binary extension of the Hexagon Toolchain if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) set(HEXAGON_TOOLCHAIN_SUFFIX .exe) endif() message(DEBUG "CMAKE_HOST_SYSTEM_NAME:${CMAKE_HOST_SYSTEM_NAME}") include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_arch.cmake) set(HEXAGON_TOOLCHAIN ${HEXAGON_TOOLS_ROOT}) set(HEXAGON_LIB_DIR "${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib") set(HEXAGON_ISS_DIR ${HEXAGON_TOOLCHAIN}/Tools/lib/iss) set(CMAKE_TRY_COMPILE_PLATFORM_VARIABLES HEXAGON_SDK_ROOT HEXAGON_TOOLS_ROOT ) #QURT Related includes and linker flags set(V_ARCH ${HEXAGON_ARCH}) set(_QURT_INSTALL_DIR "${HEXAGON_SDK_ROOT}/rtos/qurt/ADSP${V_ARCH}MP${V_ARCH_EXTN}") set(_QURT_INSTALL_DIR "${HEXAGON_SDK_ROOT}/rtos/qurt/compute${V_ARCH}${V_ARCH_EXTN}") if( ${TREE} MATCHES PAKMAN ) set(_QURT_INSTALL_DIR "${QURT_IMAGE_DIR}/compute${V_ARCH}${V_ARCH_EXTN}") endif() message(DEBUG "_QURT_INSTALL_DIR:${_QURT_INSTALL_DIR}") set(RTOS_DIR ${_QURT_INSTALL_DIR}) set(QCC_DIR "${HEXAGON_QCC_DIR}/${V_ARCH}/G0") set(TARGET_DIR "${HEXAGON_LIB_DIR}/${V_ARCH}/G0") include_directories( ${_QURT_INSTALL_DIR}/include ${_QURT_INSTALL_DIR}/include/qurt ${_QURT_INSTALL_DIR}/include/posix ) set(QURT_START_LINK_LIBS) set(QURT_START_LINK_LIBS "${TARGET_DIR}/init.o" "${RTOS_DIR}/lib/crt1.o" "${RTOS_DIR}/lib/debugmon.o" "${RTOS_DIR}/lib/libqurt.a" "${TARGET_DIR}/libc.a" "${TARGET_DIR}/libqcc.a" "${TARGET_DIR}/libhexagon.a" "${RTOS_DIR}/lib/libqurtcfs.a" "${RTOS_DIR}/lib/libtimer_island.a" "${RTOS_DIR}/lib/libtimer_main.a" "${RTOS_DIR}/lib/libposix.a" ) STRING(REPLACE ";" " " QURT_START_LINK_LIBS "${QURT_START_LINK_LIBS}") set(QURT_END_LINK_LIBS ${TARGET_DIR}/fini.o ) #Non QURT related includes and linker flags set(TARGET_DIR_NOOS "${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib/${HEXAGON_ARCH}") if (NOT NO_WRAP_MEM_API) set(WRAP_MALLOC -Wl,--wrap=malloc) set(WRAP_CALLOC -Wl,--wrap=calloc) set(WRAP_FREE -Wl,--wrap=free) set(WRAP_REALLOC -Wl,--wrap=realloc) set(WRAP_MEMALIGN -Wl,--wrap=memalign) endif() set(PIC_SHARED_LD_FLAGS -mcpu=${V_ARCH} -m${V_ARCH} -mhvx=${V_ARCH} -G0 -fpic -Wl,-Bsymbolic -Wl,-L${TARGET_DIR_NOOS}/G0/pic -Wl,-L${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib/ -Wl,--no-threads ${WRAP_MALLOC} ${WRAP_CALLOC} ${WRAP_FREE} ${WRAP_REALLOC} ${WRAP_MEMALIGN} -shared "-o " "" -Wl,--start-group "" "" -Wl,--end-group -lc ) STRING(REPLACE ";" " " PIC_SHARED_LD_FLAGS "${PIC_SHARED_LD_FLAGS}") set(HEXAGON_PIC_SHARED_LINK_OPTIONS "${PIC_SHARED_LD_FLAGS}") #System include paths include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/incs) include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/incs/stddef) include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/ipc/fastrpc/incs) #LLVM toolchain setup #Compiler paths, options and architecture set(CMAKE_C_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang${HEXAGON_TOOLCHAIN_SUFFIX}) set(CMAKE_CXX_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang++${HEXAGON_TOOLCHAIN_SUFFIX}) set(CMAKE_AR ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-ar${HEXAGON_TOOLCHAIN_SUFFIX}) set(CMAKE_ASM_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang++${HEXAGON_TOOLCHAIN_SUFFIX}) set(HEXAGON_LINKER ${CMAKE_C_COMPILER}) set(CMAKE_PREFIX_PATH ${HEXAGON_TOOLCHAIN}/Tools/target/hexagon) set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-Wl,-soname,") set(CMAKE_SHARED_LIBRARY_SONAME_CXX_FLAG "-Wl,-soname,") #Compiler Options set(COMMON_FLAGS "-mcpu=hexagon${V_ARCH} -m${V_ARCH} -mhvx=${V_ARCH} -fvectorize -Wall -Werror -fno-zero-initialized-in-bss -G0 -fdata-sections -fpic ${XQF_ARGS}") set(CMAKE_CXX_FLAGS_DEBUG "${COMMON_FLAGS} -O0 -D_DEBUG -g") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} -O3 -g") set(CMAKE_CXX_FLAGS_RELEASE "${COMMON_FLAGS} -O3") set(CMAKE_C_FLAGS_DEBUG "${COMMON_FLAGS} -O0 -D_DEBUG -g") set(CMAKE_C_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} -O3 -g") set(CMAKE_C_FLAGS_RELEASE "${COMMON_FLAGS} -O3") set(CMAKE_ASM_FLAGS_DEBUG "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") set(CMAKE_ASM_FLAGS_RELEASE "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}") set(CMAKE_ASM_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" ) #Linker Options set(CMAKE_C_CREATE_SHARED_LIBRARY "${HEXAGON_LINKER} ${HEXAGON_PIC_SHARED_LINK_OPTIONS}") set(CMAKE_CXX_CREATE_SHARED_LIBRARY "${HEXAGON_LINKER} ${HEXAGON_PIC_SHARED_LINK_OPTIONS}") ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp-ctx.h000066400000000000000000000014431512524704700220650ustar00rootroot00000000000000#ifndef HTP_CTX_H #define HTP_CTX_H #include "htp-dma.h" #include "worker-pool.h" #include #include #include #include #define HTP_MAX_NTHREADS 10 // FIXME: move these into matmul-ops #define HTP_SPAD_SRC0_NROWS 16 #define HTP_SPAD_SRC1_NROWS 16 #define HTP_SPAD_DST_NROWS 2 // Main context for htp DSP backend struct htp_context { dspqueue_t queue; dma_queue * dma[HTP_MAX_NTHREADS]; worker_pool_context_t worker_pool; uint32_t n_threads; int thread_id; int thread_prio; uint8_t * vtcm_base; size_t vtcm_size; uint32_t vtcm_rctx; atomic_bool vtcm_valid; atomic_bool vtcm_inuse; atomic_bool vtcm_needs_release; uint32_t opmask; }; #endif /* HTP_CTX_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp-dma.c000066400000000000000000000026641512524704700220310ustar00rootroot00000000000000#include "htp-dma.h" #include #include #include #pragma clang diagnostic ignored "-Wunused-function" static inline uint32_t pow2_ceil(uint32_t x) { if (x <= 1) { return 1; } int p = 2; x--; while (x >>= 1) { p <<= 1; } return p; } dma_queue * dma_queue_create(size_t capacity) { dma_queue * q = (dma_queue *) memalign(32, sizeof(dma_queue)); if (q == NULL) { FARF(ERROR, "%s: failed to allocate DMA queue\n", __FUNCTION__); return NULL; } capacity = pow2_ceil(capacity); memset(q, 0, sizeof(dma_queue)); q->capacity = capacity; q->idx_mask = capacity - 1; q->desc = (hexagon_udma_descriptor_type1_t *) memalign(64, capacity * sizeof(hexagon_udma_descriptor_type1_t)); memset(q->desc, 0, capacity * sizeof(hexagon_udma_descriptor_type1_t)); q->dptr = (dma_ptr *) memalign(4, capacity * sizeof(dma_ptr)); memset(q->dptr, 0, capacity * sizeof(dma_ptr)); q->tail = &q->desc[capacity - 1]; if (!q->desc && !q->dptr) { FARF(ERROR, "%s: failed to allocate DMA queue items\n", __FUNCTION__); return NULL; } FARF(HIGH, "dma-queue: capacity %u\n", capacity); return q; } void dma_queue_delete(dma_queue * q) { if (!q) { return; } free(q->desc); free(q->dptr); free(q); } void dma_queue_flush(dma_queue * q) { while (dma_queue_pop(q).dst != NULL) ; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp-dma.h000066400000000000000000000113371512524704700220330ustar00rootroot00000000000000#ifndef HTP_DMA_H #define HTP_DMA_H #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif typedef struct { void *dst; const void *src; } dma_ptr; typedef struct { hexagon_udma_descriptor_type1_t * desc; // descriptor pointers hexagon_udma_descriptor_type1_t * tail; // tail pointer dma_ptr * dptr; // dst/src pointers uint32_t push_idx; uint32_t pop_idx; uint32_t capacity; uint32_t idx_mask; } dma_queue; dma_queue * dma_queue_create(size_t capacity); void dma_queue_delete(dma_queue * q); void dma_queue_flush(dma_queue * q); // TODO: technically we don't need these and could use Q6_dmstart/wait/etc instead // but those do not seem to always compiler properly. static inline void dmstart(void * next) { asm volatile(" release(%0):at" : : "r"(next)); asm volatile(" dmstart(%0)" : : "r"(next)); } static inline void dmlink(void * cur, void * next) { asm volatile(" release(%0):at" : : "r"(next)); asm volatile(" dmlink(%0, %1)" : : "r"(cur), "r"(next)); } static inline unsigned int dmpoll(void) { unsigned int ret = 0; asm volatile(" %0 = dmpoll" : "=r"(ret) : : "memory"); return ret; } static inline unsigned int dmwait(void) { unsigned int ret = 0; asm volatile(" %0 = dmwait" : "=r"(ret) : : "memory"); return ret; } static inline dma_ptr dma_make_ptr(void *dst, const void *src) { dma_ptr p = { dst, src }; return p; } static inline bool dma_queue_push(dma_queue * q, dma_ptr dptr, size_t dst_row_size, size_t src_row_size, size_t width, // width in bytes. number of bytes to transfer per row size_t nrows) { if (((q->push_idx + 1) & q->idx_mask) == q->pop_idx) { FARF(ERROR, "dma-push: queue full\n"); return false; } hexagon_udma_descriptor_type1_t * desc = &q->desc[q->push_idx]; desc->next = NULL; desc->length = 0; desc->desctype = HEXAGON_UDMA_DESC_DESCTYPE_TYPE1; desc->dstbypass = 1; desc->srcbypass = 1; #if __HVX_ARCH__ >= 73 desc->dstbypass = 1; desc->srcbypass = 1; #else desc->dstbypass = 0; desc->srcbypass = 1; #endif desc->order = 0; desc->dstate = HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE; desc->src = (void *) dptr.src; desc->dst = (void *) dptr.dst; desc->allocation = 0; desc->padding = 0; desc->roiwidth = width; desc->roiheight = nrows; desc->srcstride = src_row_size; desc->dststride = dst_row_size; desc->srcwidthoffset = 0; desc->dstwidthoffset = 0; q->dptr[q->push_idx] = dptr; dmlink(q->tail, desc); q->tail = desc; // FARF(ERROR, "dma-push: i %u len %u dst %p src %p\n", q->push_idx, len, dst, src); q->push_idx = (q->push_idx + 1) & q->idx_mask; return true; } static inline bool dma_queue_push_ddr_to_vtcm(dma_queue * q, dma_ptr dptr, size_t dst_row_size, size_t src_row_size, size_t nrows) { return dma_queue_push(q, dptr, dst_row_size, src_row_size, src_row_size, nrows); } static inline bool dma_queue_push_vtcm_to_ddr(dma_queue * q, dma_ptr dptr, size_t dst_row_size, size_t src_row_size, size_t nrows) { return dma_queue_push(q, dptr, dst_row_size, src_row_size, dst_row_size, nrows); } static inline dma_ptr dma_queue_pop(dma_queue * q) { dma_ptr dptr = { NULL }; if (q->push_idx == q->pop_idx) { return dptr; } hexagon_udma_descriptor_type1_t * desc = &q->desc[q->pop_idx]; // Wait for desc to complete while (1) { dmpoll(); if (desc->dstate == HEXAGON_UDMA_DESC_DSTATE_COMPLETE) { break; } // FARF(ERROR, "dma-pop: waiting for DMA : %u\n", q->pop_idx); } dptr = q->dptr[q->pop_idx]; // FARF(ERROR, "dma-pop: i %u dst %p\n", q->pop_idx, dst); q->pop_idx = (q->pop_idx + 1) & q->idx_mask; return dptr; } #ifdef __cplusplus } // extern "C" #endif #endif /* HTP_DMA_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp-msg.h000066400000000000000000000110201512524704700220450ustar00rootroot00000000000000#ifndef HTP_MSG_H #define HTP_MSG_H #include // ggml-common.h must be included prio to this header // Mask to enable various stages of the Ops. // Used for debugging and profiling. enum { HTP_OPMASK_QUEUE = (1 << 0), // Enable Queueing (ie calls into the DSP) HTP_OPMASK_QUANTIZE = (1 << 1), // Enable Quantize HTP_OPMASK_COMPUTE = (1 << 2), // Enable Compute }; // Op flags enum { HTP_OPFLAGS_SKIP_QUANTIZE = (1 << 0), // Skip dynamic quantization (reuse quantized tensors) HTP_OPFLAGS_SKIP_COMPUTE = (1 << 1), // Skip actual computation (used for profiling) HTP_OPFLAGS_EARLY_WAKEUP = (1 << 2) // Send early wakeup notification }; enum htp_status { HTP_STATUS_OK = 1, HTP_STATUS_INTERNAL_ERR = 2, HTP_STATUS_NO_SUPPORT = 3, HTP_STATUS_INVAL_PARAMS = 4, HTP_STATUS_VTCM_TOO_SMALL = 5, }; // The values must match the ggml_type. // Duplicated here because we can't include full ggml.h in the htp build. // We have some static_asserts in the cpp code to ensure things are in sync. enum htp_data_type { HTP_TYPE_F32 = 0, HTP_TYPE_F16 = 1, HTP_TYPE_Q4_0 = 2, HTP_TYPE_Q8_0 = 8, HTP_TYPE_MXFP4 = 39, HTP_TYPE_COUNT }; // These values are manually translated over to HTP // !!!! DO NOT ALTER THE ORDER OF THE FIRST FOUR ENUMS !!!! enum htp_op { HTP_OP_MUL = 0, HTP_OP_ADD = 1, HTP_OP_SUB = 2, HTP_OP_DIV = 3, HTP_OP_MUL_MAT = 4, HTP_OP_MUL_MAT_ID = 5, HTP_OP_RMS_NORM = 6, HTP_OP_UNARY_SILU = 7, HTP_OP_UNARY_GELU = 8, HTP_OP_GLU_SWIGLU = 9, HTP_OP_GLU_SWIGLU_OAI = 10, HTP_OP_SOFTMAX = 11, HTP_OP_ADD_ID = 12, HTP_OP_ROPE = 13, INVALID }; static inline size_t htp_type_block_size(uint32_t t) { switch (t) { case HTP_TYPE_F32: return 1; case HTP_TYPE_F16: return 1; case HTP_TYPE_Q4_0: return QK4_0; case HTP_TYPE_Q8_0: return QK8_0; case HTP_TYPE_MXFP4: return QK_MXFP4; default: assert(0 && "unsupported HTP data type"); } return 0; } static inline size_t htp_type_nbytes(uint32_t t) { switch (t) { case HTP_TYPE_F32: return 4; case HTP_TYPE_F16: return 2; case HTP_TYPE_Q4_0: return sizeof(block_q4_0); case HTP_TYPE_Q8_0: return sizeof(block_q8_0); case HTP_TYPE_MXFP4: return sizeof(block_mxfp4); default: assert(0 && "unsupported HTP data type"); } return 0; } static const char * htp_type_name(uint32_t t) { switch (t) { case HTP_TYPE_F32: return "fp32"; case HTP_TYPE_F16: return "fp16"; case HTP_TYPE_Q4_0: return "q4_0"; case HTP_TYPE_Q8_0: return "q8_0"; case HTP_TYPE_MXFP4: return "mxfp4"; } return 0; } // Internal types #define QK_Q4_0x4x2 256 // 4x Q4_0 blocks packed with next 4x Q4_0 blocks (size in bytes 128) #define QK_Q8_0x4x2 256 // 4x Q8_0 blocks concat with next 4x Q8_0 blocks #define QK_MXFP4x4x2 256 // 4x MXFP4 blocks concat with next 4x MXFP4 blocks #define HTP_MAX_DIMS 4 struct htp_tensor { uint32_t data; // Buffer offset in the messages, and data pointer on the NSP uint32_t type; // Data type uint32_t ne[HTP_MAX_DIMS]; // Number of elements uint32_t nb[HTP_MAX_DIMS]; // Stride in bytes (see ggml.h ggml_tensor) }; #define HTP_MAX_OP_PARAMS 64 struct htp_general_req { uint32_t op; // GGML/HTP Op int32_t op_params[HTP_MAX_OP_PARAMS / sizeof(int32_t)]; // Params for the op, e.g. epsilon of RMS norm uint32_t flags; // Request flags struct htp_tensor src0; // Input0 tensor struct htp_tensor src1; // Input1 tensor struct htp_tensor src2; // Input2 tensor struct htp_tensor dst; // Output tensor // should be multiple of 64 bytes (cacheline) }; struct htp_general_rsp { uint32_t op; // GGML/HTP Op uint32_t status; // HTP_STATUS_... uint32_t prof_usecs; // Number of usec per request uint32_t prof_cycles; // Number of cycles per request uint32_t prof_pkts; // Number of instruction packets per request uint8_t unused[44]; // Pad to 64 bytes }; #define HTP_MAX_MESSAGE_SIZE sizeof(struct htp_general_req) #define HTP_MAX_PACKET_BUFFERS 4 #endif /* HTP_MSG_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp-ops.h000066400000000000000000000034431512524704700220720ustar00rootroot00000000000000#ifndef HTP_OPS_H #define HTP_OPS_H #include "htp-ctx.h" #include "htp-msg.h" #include "worker-pool.h" #include "ops-utils.h" #include #include // ggml-common.h must be included prior to this header struct htp_spad { uint8_t * data; size_t size; size_t size_per_thread; }; struct htp_ops_context { struct htp_context * ctx; enum htp_op op; int32_t op_params[HTP_MAX_OP_PARAMS / sizeof(int32_t)]; struct htp_tensor src0; struct htp_tensor src1; struct htp_tensor src2; struct htp_tensor dst; struct htp_spad src0_spad; struct htp_spad src1_spad; struct htp_spad src2_spad; struct htp_spad dst_spad; worker_pool_context_t * wpool; // worker pool uint32_t n_threads; // num threads uint32_t src0_nrows_per_thread; uint32_t src1_nrows_per_thread; struct fastdiv_values src0_div1; // fastdiv values for ne1 struct fastdiv_values src0_div2; // fastdiv values for ne2 struct fastdiv_values src0_div3; // fastdiv values for ne3 struct fastdiv_values src0_div21; // fastdiv values for ne2 * ne1 struct fastdiv_values src1_div1; // fastdiv values for ne1 struct fastdiv_values src1_div2; // fastdiv values for ne2 struct fastdiv_values src1_div3; // fastdiv values for ne3 struct fastdiv_values src1_div21; // fastdiv values for ne2 * ne1 uint32_t flags; }; int op_matmul(struct htp_ops_context * octx); int op_matmul_id(struct htp_ops_context * octx); int op_binary(struct htp_ops_context * octx); int op_unary(struct htp_ops_context * octx); int op_activations(struct htp_ops_context * octx); int op_softmax(struct htp_ops_context * octx); int op_add_id(struct htp_ops_context * octx); int op_rope(struct htp_ops_context * octx); #endif /* HTP_OPS_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/htp_iface.idl000066400000000000000000000005311512524704700227360ustar00rootroot00000000000000// FastRPC IDL interface for GGML HTP #ifndef HTP_IDL #define HTP_IDL #include "AEEStdDef.idl" #include "remote.idl" interface htp_iface : remote_handle64 { AEEResult start(in uint32 sess_id, in uint64 dsp_queue_id, in uint32 n_hvx); AEEResult stop(); AEEResult enable_etm(); AEEResult disable_etm(); }; #endif /* HTP_IDL */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/hvx-exp.c000066400000000000000000000064231512524704700220730ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" static inline HVX_Vector hvx_vec_exp_fp32_guard(HVX_Vector in_vec, HVX_Vector max_exp, HVX_Vector inf) { const HVX_VectorPred pred0 = Q6_Q_vcmp_gt_VsfVsf(in_vec, max_exp); HVX_Vector out = hvx_vec_exp_fp32(in_vec); return Q6_V_vmux_QVV(pred0, inf, out); } void hvx_exp_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, bool negate) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_exp_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } // assert((0 == unaligned_addr) || (0 == num_elems_whole)); if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_exp_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector vec_out = Q6_V_vzero(); static const float kInf = INFINITY; static const float kMaxExp = 88.02f; // log(INF) const HVX_Vector max_exp = hvx_vec_splat_fp32(kMaxExp); const HVX_Vector inf = hvx_vec_splat_fp32(kInf); if (0 == unaligned_loop) { HVX_Vector * p_vec_in1 = (HVX_Vector *) src; HVX_Vector * p_vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { if (true == negate) { HVX_Vector neg_vec_in = hvx_vec_neg_fp32(*p_vec_in1++); *p_vec_out++ = hvx_vec_exp_fp32_guard(neg_vec_in, max_exp, inf); } else { *p_vec_out++ = hvx_vec_exp_fp32_guard(*p_vec_in1++, max_exp, inf); } } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); if (true == negate) { HVX_Vector neg_vec_in = hvx_vec_neg_fp32(in); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_exp_fp32_guard(neg_vec_in, max_exp, inf); } else { *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_exp_fp32_guard(in, max_exp, inf); } } } if (left_over > 0) { const float * srcf = (float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; if (true == negate) { HVX_Vector neg_vec_in = hvx_vec_neg_fp32(in); vec_out = hvx_vec_exp_fp32_guard(neg_vec_in, max_exp, inf); } else { vec_out = hvx_vec_exp_fp32_guard(in, max_exp, inf); } hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, vec_out); } } ggml-org-ggml-3678254/src/ggml-hexagon/htp/hvx-inverse.c000066400000000000000000000051271512524704700227520ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" static inline HVX_Vector hvx_vec_inverse_fp32_guard(HVX_Vector v_sf, HVX_Vector nan_inf_mask) { HVX_Vector out = hvx_vec_inverse_fp32(v_sf); HVX_Vector masked_out = Q6_V_vand_VV(out, nan_inf_mask); const HVX_VectorPred pred = Q6_Q_vcmp_eq_VwVw(nan_inf_mask, masked_out); return Q6_V_vmux_QVV(pred, Q6_V_vzero(), out); } void hvx_inverse_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_inverse_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } // assert((0 == unaligned_addr) || (0 == num_elems_whole)); if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_inverse_f32: unaligned loop in hvx op, possibly slower execution\n"); } static const uint32_t kNanInfMask = 0x7f800000; const HVX_Vector nan_inf_mask = Q6_V_vsplat_R(kNanInfMask); if (0 == unaligned_loop) { HVX_Vector * p_vec_in = (HVX_Vector *) src; HVX_Vector * p_vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { *p_vec_out++ = hvx_vec_inverse_fp32_guard(*p_vec_in++, nan_inf_mask); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_inverse_fp32_guard(in, nan_inf_mask); } } if (left_over > 0) { const float * srcf = (float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector out = hvx_vec_inverse_fp32_guard(in, nan_inf_mask); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, out); } } ggml-org-ggml-3678254/src/ggml-hexagon/htp/hvx-sigmoid.c000066400000000000000000000022721512524704700227300ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" #if 0 // Reference algo used in hvx-utils static void fast_sigmoid_f32(const float* restrict src, float* restrict dst, const int num_elems) { const float c1 = 0.03138777; const float c2 = 0.276281267; const float c_log2f = 1.442695022; int32_t store_ints[32]; float store_floats[3][32]; for (int i = 0; i < num_elems; i++) { float v = src0[i]; v *= c_log2f*0.5; int intPart = (int)v; float x = (v - intPart); float xx = x * x; float v1 = c_log2f + c2 * xx; float v2 = x + xx * c1 * x; float v3 = (v2 + v1); *((int*)&v3) += intPart << 24; float v4 = v2 - v1; float v5 = v3 - v4; float res = v3 / v5; dst[i] = res; } } #endif ggml-org-ggml-3678254/src/ggml-hexagon/htp/hvx-utils.c000066400000000000000000001131341512524704700224350ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "hvx-utils.h" #define htp_binary_ops_preamble \ int step_of_4 = num_elems >> 7; \ int step_of_2 = (num_elems - step_of_4 * VLEN_FP32 * 4) >> 6; \ int step_of_1 = (num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2) >> 5; \ int remaining = num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; \ \ const uint8_t * restrict src0_curr = src0; \ const uint8_t * restrict src1_curr = src1; \ uint8_t * restrict dst_curr = dst; void hvx_mul_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_mul_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_mul_f32: unaligned loop in hvx op, possibly slower execution\n"); } bool handled_leftover = false; if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, *vec_in2++); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { int step_of_1 = num_elems_whole >> 5; // divby 32, because 32 float = 128 bytes per HVX vector int leftover_size = left_over * sizeof(float); HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; HVX_UVector * restrict vec_out = (HVX_UVector *) dst; HVX_Vector slinep; HVX_Vector slinec; HVX_Vector sline; HVX_Vector sline2p; HVX_Vector sline2c; HVX_Vector sline2; slinep = *vec_in1++; sline2p = *vec_in2++; #pragma unroll(4) for (int i = step_of_1 - 1; i > 0; i--) { slinec = *vec_in1++; sline2c = *vec_in2++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0); sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1); *((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2)); slinep = slinec; sline2p = sline2c; } if (step_of_1 > 1) { slinec = htp_is_aligned(vec_in1, VLEN) && left_over == 0 ? slinep : *vec_in1++; sline2c = htp_is_aligned(vec_in2, VLEN) && left_over == 0 ? sline2p : *vec_in2++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0); sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1); *((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2)); slinep = slinec; sline2p = sline2c; } if (left_over > 0) { slinec = (is_in_one_chunk(vec_in1, leftover_size, VLEN) ? slinep : *vec_in1++); sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0); sline2c = (is_in_one_chunk(vec_in2, leftover_size, VLEN) ? sline2p : *vec_in2++); sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1); HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(sline, sline2); hvx_vec_store_u(vec_out, leftover_size, Q6_Vsf_equals_Vqf32(out)); handled_leftover = true; } } if (left_over > 0 && !handled_leftover) { const float * src0f = (const float *) src0 + num_elems_whole; const float * src1f = (const float *) src1 + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in1 = *(HVX_UVector *) src0f; HVX_Vector in2 = *(HVX_UVector *) src1f; HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in1, in2); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } void hvx_mul_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { htp_binary_ops_preamble; for (int i = 0; i < step_of_4; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); src0_curr += 4 * VLEN; HVX_Vector v3 = Q6_Vqf32_vmpy_VsfVsf(v3a, v3b); *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v4a, v4b); src1_curr += 4 * VLEN; *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); dst_curr += 4 * VLEN; } for (int i = 0; i < step_of_2; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); src0_curr += 2 * VLEN; HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); src1_curr += 2 * VLEN; *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); dst_curr += 2 * VLEN; } for (int i = 0; i < step_of_1; i++) { HVX_Vector va = *(HVX_Vector *) src0_curr; src0_curr += VLEN; HVX_Vector vb = *(HVX_Vector *) src1_curr; src1_curr += VLEN; HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(va, vb); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); dst_curr += VLEN; } if (remaining > 0) { HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); } } void hvx_mul_mul_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, const uint8_t * restrict src2, uint8_t * restrict dst, const int num_elems) { const uint8_t * restrict src0_curr = src0; const uint8_t * restrict src1_curr = src1; const uint8_t * restrict src2_curr = src2; uint8_t * restrict dst_curr = dst; int step_of_2 = num_elems >> 6; int step_of_1 = (num_elems - step_of_2 * VLEN_FP32 * 2) >> 5; int remaining = num_elems - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; for (int i = 0; i < step_of_2; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v1c = *(HVX_Vector *) src2_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1_ = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1_), v1c); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); HVX_Vector v2c = *(HVX_Vector *) (src2_curr + VLEN); src0_curr += 2 * VLEN; HVX_Vector v2_ = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v2_), v2c); src1_curr += 2 * VLEN; src2_curr += 2 * VLEN; *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); dst_curr += 2 * VLEN; } for (int i = 0; i < step_of_1; i++) { HVX_Vector va = *(HVX_Vector *) src0_curr; src0_curr += VLEN; HVX_Vector vb = *(HVX_Vector *) src1_curr; src1_curr += VLEN; HVX_Vector vc = *(HVX_Vector *) src2_curr; src2_curr += VLEN; HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(va, vb); HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), vc); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v2); dst_curr += VLEN; } if (remaining > 0) { HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), *(HVX_Vector *) src2_curr); hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v2)); } } void hvx_add_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_add_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_add_f32: unaligned loop in hvx op, possibly slower execution\n"); } if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*vec_in1++, *vec_in2++); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32); HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32); HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); } } if (left_over > 0) { const float * src0f = (const float *) src0 + num_elems_whole; const float * src1f = (const float *) src1 + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in1 = *(HVX_UVector *) src0f; HVX_Vector in2 = *(HVX_UVector *) src1f; HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } void hvx_add_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { htp_binary_ops_preamble; for (int i = 0; i < step_of_4; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); src0_curr += 4 * VLEN; HVX_Vector v3 = Q6_Vqf32_vadd_VsfVsf(v3a, v3b); *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); HVX_Vector v4 = Q6_Vqf32_vadd_VsfVsf(v4a, v4b); src1_curr += 4 * VLEN; *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); dst_curr += 4 * VLEN; } for (int i = 0; i < step_of_2; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); src0_curr += 2 * VLEN; HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b); src1_curr += 2 * VLEN; *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); dst_curr += 2 * VLEN; } for (int i = 0; i < step_of_1; i++) { HVX_Vector va = *(HVX_Vector *) src0_curr; src0_curr += VLEN; HVX_Vector vb = *(HVX_Vector *) src1_curr; src1_curr += VLEN; HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(va, vb); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); dst_curr += VLEN; } if (remaining > 0) { HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); } } void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_add_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_add_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); } static const float kInf = INFINITY; const HVX_Vector inf = hvx_vec_splat_fp32(kInf); HVX_Vector val_vec = hvx_vec_splat_fp32(val); if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *vec_in1++; const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in); HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(in, val_vec); v = Q6_Vsf_equals_Vqf32(v); v = Q6_V_vmux_QVV(pred_inf, inf, v); *vec_out++ = v; } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in); HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec); out = Q6_Vsf_equals_Vqf32(out); out = Q6_V_vmux_QVV(pred_inf, inf, out); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = out; } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in); HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec); out = Q6_Vsf_equals_Vqf32(out); out = Q6_V_vmux_QVV(pred_inf, inf, out); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, out); } } void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_mul_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_mul_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector val_vec = hvx_vec_splat_fp32(val); bool handled_leftover = false; if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, val_vec); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { int step_of_1 = num_elems >> 5; // divby 32, because 32 float = 128 bytes per HVX vector int leftover_size = left_over * sizeof(float); HVX_Vector * input_v_ptr = (HVX_Vector *) src; HVX_UVector * output_v_ptr = (HVX_UVector *) dst; HVX_Vector slinep; HVX_Vector slinec; HVX_Vector sline; slinep = *input_v_ptr++; #pragma unroll(4) for (int i = step_of_1 - 1; i > 0; i--) { slinec = *input_v_ptr++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src); *((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec)); /* Prepare slinep for next iteration */ slinep = slinec; } if (step_of_1 > 0) { slinec = htp_is_aligned(input_v_ptr, VLEN) && left_over == 0 ? slinep : *input_v_ptr++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src); *((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec)); slinep = slinec; } if (leftover_size > 0) { slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) ? slinep : *input_v_ptr++); sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src); HVX_Vector sout = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec)); hvx_vec_store_u(output_v_ptr, leftover_size, sout); handled_leftover = true; } } if (left_over > 0 && !handled_leftover) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, val_vec); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } void hvx_sub_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_sub_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_sub_f32: unaligned loop in hvx op, possibly slower execution\n"); } if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, *vec_in2++); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32); HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32); HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); } } if (left_over > 0) { const float * src0f = (const float *) src0 + num_elems_whole; const float * src1f = (const float *) src1 + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in1 = *(HVX_UVector *) src0f; HVX_Vector in2 = *(HVX_UVector *) src1f; HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } void hvx_sub_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems) { htp_binary_ops_preamble; for (int i = 0; i < step_of_4; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); src0_curr += 4 * VLEN; HVX_Vector v3 = Q6_Vqf32_vsub_VsfVsf(v3a, v3b); *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); HVX_Vector v4 = Q6_Vqf32_vsub_VsfVsf(v4a, v4b); src1_curr += 4 * VLEN; *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); dst_curr += 4 * VLEN; } for (int i = 0; i < step_of_2; i++) { HVX_Vector v1a = *(HVX_Vector *) src0_curr; HVX_Vector v1b = *(HVX_Vector *) src1_curr; HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b); HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); src0_curr += 2 * VLEN; HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b); src1_curr += 2 * VLEN; *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); dst_curr += 2 * VLEN; } for (int i = 0; i < step_of_1; i++) { HVX_Vector va = *(HVX_Vector *) src0_curr; src0_curr += VLEN; HVX_Vector vb = *(HVX_Vector *) src1_curr; src1_curr += VLEN; HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(va, vb); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); dst_curr += VLEN; } if (remaining > 0) { HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); } } void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_sub_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_sub_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector val_vec = hvx_vec_splat_fp32(val); if (0 == unaligned_loop) { HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, val_vec); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; if (0 == htp_is_aligned((void *) src, VLEN)) { FARF(HIGH, "hvx_sum_of_squares_f32: unaligned address in hvx op, possibly slower execution\n"); } assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole)); HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; HVX_Vector sum_vec_acc = Q6_V_vsplat_R(0x00000000); HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000); #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1, *vec_in1); sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, v); vec_in1++; } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; HVX_Vector vec_left = *(HVX_UVector *) srcf; HVX_Vector vec_left_sq = Q6_Vqf32_vmpy_VsfVsf(vec_left, vec_left); HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left_sq, zero_vec, left_over * SIZEOF_FP32); sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, vec_tmp); } HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec_acc); return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v)); } float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if (0 == htp_is_aligned((void *) src, VLEN)) { FARF(HIGH, "hvx_self_sum_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_self_sum_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000); HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000); if (0 == unaligned_loop) { HVX_Vector * vec_in = (HVX_Vector *) src; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, *vec_in++); sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), *vec_in++); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), in); } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; HVX_Vector vec_left = *(HVX_UVector *) srcf; HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left, zero_vec, left_over * SIZEOF_FP32); // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, vec_tmp); sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), vec_tmp); } HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec); return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v)); } void hvx_scale_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, const float scale) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_scale_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_scale_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector scale_vec = hvx_vec_splat_fp32(scale); if (0 == unaligned_loop) { HVX_Vector * vec_in1 = (HVX_Vector *) src; HVX_Vector * vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, scale_vec); *vec_out++ = Q6_Vsf_equals_Vqf32(v); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, scale_vec); *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, scale_vec); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); } } float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems) { int left_over = num_elems & (VLEN_FP32 - 1); int num_elems_whole = num_elems - left_over; int unaligned_addr = 0; int unaligned_loop = 0; if (0 == htp_is_aligned((void *) src, VLEN)) { FARF(HIGH, "hvx_self_max_f32: unaligned address in hvx op, possibly slower execution\n"); unaligned_addr = 1; } if ((1 == unaligned_addr) && (num_elems_whole != 0)) { unaligned_loop = 1; FARF(HIGH, "hvx_self_max_f32: unaligned loop in hvx op, possibly slower execution\n"); } HVX_Vector vec_max = hvx_vec_splat_fp32(((const float *) src)[0]); HVX_Vector vec_first = hvx_vec_splat_fp32(((const float *) src)[0]); if (0 == unaligned_loop) { HVX_Vector * restrict vec_in = (HVX_Vector *) src; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, *vec_in++); } } else { #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, in); } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector temp = Q6_V_valign_VVR(in, vec_first, left_over * SIZEOF_FP32); vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, temp); } HVX_Vector v = hvx_vec_reduce_max_fp32(vec_max); return hvx_vec_get_fp32(v); } void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unalign_address = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_min_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); unalign_address = 1; } const float * src_f = (const float *) src; HVX_Vector vec_min = hvx_vec_splat_fp32(val); if(unalign_address == 0){ HVX_Vector * restrict vec_in = (HVX_Vector *) src; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++); *vec_out++ = (min_clamp); } }else{ HVX_UVector * restrict vec_in = (HVX_Vector *) src; HVX_UVector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++); *vec_out++ = (min_clamp); } } if (left_over > 0 ) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_UVector in = *(HVX_UVector *) srcf; HVX_UVector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, in); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, (min_clamp)); } } void hvx_clamp_scalar_f32(const uint8_t * restrict src, const float limit_left, const float limit_right, uint8_t * restrict dst, const int num_elems) { size_t left_over = num_elems & (VLEN_FP32 - 1); size_t num_elems_whole = num_elems - left_over; int unalign_address = 0; if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { FARF(HIGH, "hvx_clamp_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); unalign_address = 1; } HVX_Vector range_left = hvx_vec_splat_fp32(limit_left); HVX_Vector range_right = hvx_vec_splat_fp32(limit_right); if(unalign_address == 0){ HVX_Vector * restrict vec_in = (HVX_Vector *) src; HVX_Vector * restrict vec_out = (HVX_Vector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in_vec = *vec_in++; HVX_Vector temp_v = in_vec; HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right); HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec); in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v); in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec); *vec_out++ = in_vec; } }else{ HVX_UVector * restrict vec_in = (HVX_UVector *) src; HVX_UVector * restrict vec_out = (HVX_UVector *) dst; #pragma unroll(4) for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { HVX_Vector in_vec = *vec_in++; HVX_Vector temp_v = in_vec; HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right); HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec); in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v); in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec); *vec_out++ = in_vec; } } if (left_over > 0) { const float * srcf = (const float *) src + num_elems_whole; float * dstf = (float *) dst + num_elems_whole; HVX_Vector in_vec = *(HVX_UVector *) srcf; HVX_Vector temp_v = in_vec; HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right); HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec); in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v); in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec); hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, in_vec); } } ggml-org-ggml-3678254/src/ggml-hexagon/htp/hvx-utils.h000066400000000000000000001166621512524704700224530ustar00rootroot00000000000000#ifndef HVX_UTILS_H #define HVX_UTILS_H #include "ops-utils.h" #include #include #define SIZEOF_FP32 (4) #define SIZEOF_FP16 (2) #define VLEN (128) #define VLEN_FP32 (VLEN / SIZEOF_FP32) #define VLEN_FP16 (VLEN / SIZEOF_FP16) typedef union { HVX_Vector v; uint8_t b[VLEN]; uint16_t h[VLEN_FP16]; uint32_t w[VLEN_FP32]; __fp16 fp16[VLEN_FP16]; float fp32[VLEN_FP32]; } __attribute__((aligned(VLEN), packed)) HVX_VectorAlias; /* Q6_Vsf_equals_Vw is only available on v73+.*/ #if __HVX_ARCH__ < 73 static inline HVX_Vector int32_to_qfloat(HVX_Vector const in) { HVX_Vector const vzero = Q6_V_vzero(); HVX_VectorPred is_zero = Q6_Q_vcmp_eq_VwVw(in, vzero); HVX_Vector lshift = Q6_Vw_vnormamt_Vw(in); HVX_Vector normalized = Q6_Vw_vasl_VwVw(in, lshift); HVX_Vector vexp = Q6_Vw_vsub_VwVw(Q6_V_vsplat_R(0x7f + 30), lshift); HVX_Vector mant = Q6_V_vand_VV(Q6_V_vsplat_R(0xFFFFFF00), normalized); HVX_Vector ret = Q6_V_vmux_QVV(is_zero, vzero, Q6_Vw_vadd_VwVw(mant, vexp)); return ret; } static inline HVX_Vector Q6_Vsf_equals_Vw(HVX_Vector const in) { return Q6_Vsf_equals_Vqf32(int32_to_qfloat(in)); } #endif static inline HVX_Vector hvx_vec_splat_fp32(float i) { union { float f; int32_t i; } fp32 = { .f = i }; return Q6_V_vsplat_R(fp32.i); } static inline void hvx_vec_store_u(void * addr, uint32_t n, HVX_Vector v) { // Rotate as needed. v = Q6_V_vlalign_VVR(v, v, (size_t) addr); uint32_t left_off = (size_t) addr & 127; uint32_t right_off = left_off + n; HVX_VectorPred ql_not = Q6_Q_vsetq_R((size_t) addr); HVX_VectorPred qr = Q6_Q_vsetq2_R(right_off); if (right_off > 128) { Q6_vmem_QRIV(qr, (HVX_Vector *) addr + 1, v); // all 1's qr = Q6_Q_vcmp_eq_VbVb(v, v); } ql_not = Q6_Q_or_QQn(ql_not, qr); Q6_vmem_QnRIV(ql_not, (HVX_Vector *) addr, v); } static inline void hvx_vec_store_a(void * ptr, size_t n, HVX_Vector v) { assert((unsigned long) ptr % 128 == 0); HVX_VectorPred ql_not = Q6_Q_vsetq_R((size_t) ptr); HVX_VectorPred qr = Q6_Q_vsetq2_R(n); ql_not = Q6_Q_or_QQn(ql_not, qr); Q6_vmem_QnRIV(ql_not, (HVX_Vector *) ptr, v); } static inline HVX_Vector hvx_vec_repl4(HVX_Vector v) { // vdelta control to replicate first 4 bytes across all elements static const uint8_t __attribute__((aligned(128))) repl[128] = { 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, }; HVX_Vector ctrl = *(HVX_Vector *) repl; return Q6_V_vdelta_VV(v, ctrl); } // copy n fp16 elements : source and destination are aligned to HVX Vector (128) static inline void hvx_copy_fp16_aa(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_Vector * restrict vdst = (HVX_Vector *) dst; HVX_Vector * restrict vsrc = (HVX_Vector *) src; assert((unsigned long) dst % 128 == 0); assert((unsigned long) src % 128 == 0); uint32_t nvec = n / 64; uint32_t nloe = n % 64; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); } } // copy n fp16 elements : source is aligned, destination is potentially unaligned static inline void hvx_copy_fp16_ua(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_UVector * restrict vdst = (HVX_UVector *) dst; HVX_Vector * restrict vsrc = (HVX_Vector *) src; assert((unsigned long) src % 128 == 0); uint32_t nvec = n / 64; uint32_t nloe = n % 64; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); } } // copy n fp16 elements : source is aligned, destination is potentially unaligned static inline void hvx_copy_fp16_au(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_Vector * restrict vdst = (HVX_Vector *) dst; HVX_UVector * restrict vsrc = (HVX_UVector *) src; assert((unsigned long) dst % 128 == 0); uint32_t nvec = n / 64; uint32_t nloe = n % 64; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); } } // copy n fp32 elements : source and destination are aligned to HVX Vector (128) static inline void hvx_copy_fp32_aa(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_Vector * restrict vdst = (HVX_Vector *) dst; HVX_Vector * restrict vsrc = (HVX_Vector *) src; assert((unsigned long) dst % 128 == 0); assert((unsigned long) src % 128 == 0); uint32_t nvec = n / 32; uint32_t nloe = n % 32; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); } } // copy n fp32 elements : source is aligned, destination is unaligned static inline void hvx_copy_fp32_ua(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_UVector * restrict vdst = (HVX_UVector *) dst; HVX_Vector * restrict vsrc = (HVX_Vector *) src; assert((unsigned long) src % 128 == 0); uint32_t nvec = n / 32; uint32_t nloe = n % 32; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); } } // copy n fp32 elements : source is unaligned, destination is aligned static inline void hvx_copy_fp32_au(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { HVX_Vector * restrict vdst = (HVX_Vector *) dst; HVX_UVector * restrict vsrc = (HVX_UVector *) src; assert((unsigned long) dst % 128 == 0); uint32_t nvec = n / 32; uint32_t nloe = n % 32; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { HVX_Vector v = vsrc[i]; vdst[i] = v; } if (nloe) { HVX_Vector v = vsrc[i]; hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); } } // bcast 1 fp32 element from source to n fp32 elements in destination : destination is aligned static inline void hvx_bcast_fp32_a(uint8_t * restrict dst, float elem, uint32_t n) { HVX_Vector * restrict vdst = (HVX_Vector *) dst; HVX_Vector velem = hvx_vec_splat_fp32(elem); assert((unsigned long) dst % 128 == 0); uint32_t nvec = n / 32; uint32_t nloe = n % 32; uint32_t i = 0; #pragma unroll(4) for (; i < nvec; i++) { vdst[i] = velem; } if (nloe) { hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), velem); } } /* Return whether 'n' elements from vector are in the one chunk of 'chunk_size'. */ static __attribute__((always_inline)) int32_t is_in_one_chunk(void * addr, uint32_t n, uint32_t chunk_size) { uint32_t left_off = (size_t) addr & (chunk_size - 1); uint32_t right_off = left_off + n; return right_off <= chunk_size; } static void hvx_vec_dump_fp16_n(char * pref, HVX_Vector v, uint32_t n) { HVX_VectorAlias u = { .v = v }; const uint32_t n0 = n / 16; const uint32_t n1 = n % 16; int i = 0; for (; i < n0; i++) { htp_dump_fp16_line(pref, u.fp16 + (16 * i), 16); } if (n1) { htp_dump_fp16_line(pref, u.fp16 + (16 * i), n1); } } static void hvx_vec_dump_fp16(char * pref, HVX_Vector v) { hvx_vec_dump_fp16_n(pref, v, 64); } static void hvx_vec_dump_fp32_n(char * pref, HVX_Vector v, uint32_t n) { union { HVX_Vector v; float d[32]; } u = { .v = v }; const uint32_t n0 = n / 16; const uint32_t n1 = n % 16; int i = 0; for (; i < n0; i++) { htp_dump_fp32_line(pref, u.d + (16 * i), 16); } if (n1) { htp_dump_fp32_line(pref, u.d + (16 * i), n1); } } static void hvx_vec_dump_fp32_hmt(char * pref, HVX_Vector v) { union { HVX_Vector v; float d[32]; } u = { .v = v }; FARF(HIGH, "%s: %.6f %.6f %.6f %.6f ... %.6f %.6f %.6f %.6f ... %.6f %.6f %.6f %.6f\n", pref, u.d[0], u.d[1], u.d[2], u.d[3], u.d[12], u.d[13], u.d[14], u.d[15], u.d[28], u.d[29], u.d[30], u.d[31]); } static void hvx_vec_dump_fp32(char * pref, HVX_Vector v) { hvx_vec_dump_fp32_n(pref, v, 32); } static void hvx_vec_dump_int32(char * pref, HVX_Vector v) { union { HVX_Vector v; int32_t d[32]; } u = { .v = v }; for (int i = 0; i < 32 / 16; i++) { htp_dump_int32_line(pref, u.d + (16 * i), 16); } } static void hvx_vec_dump_int32_hmt(char * pref, HVX_Vector v) { union { HVX_Vector v; int32_t d[32]; } u = { .v = v }; FARF(HIGH, "%s: %d %d %d %d ... %d %d %d %d ... %d %d %d %d\n", pref, u.d[0], u.d[1], u.d[2], u.d[3], u.d[12], u.d[13], u.d[14], u.d[15], u.d[28], u.d[29], u.d[30], u.d[31]); } static void hvx_vec_dump_int8_hmt(char * pref, HVX_Vector v) { union { HVX_Vector v; int8_t d[128]; } u = { .v = v }; FARF(HIGH, "%s: %d %d %d %d ... %d %d %d %d ... %d %d %d %d\n", pref, u.d[0], u.d[1], u.d[2], u.d[3], u.d[60], u.d[61], u.d[62], u.d[63], u.d[124], u.d[125], u.d[126], u.d[127]); } static void hvx_vec_dump_int8(char * pref, HVX_Vector v) { union { HVX_Vector v; int8_t d[128]; } u = { .v = v }; for (int i = 0; i < 128 / 16; i++) { htp_dump_int8_line(pref, u.d + (16 * i), 16); } } static void hvx_vec_dump_uint8(char * pref, HVX_Vector v) { union { HVX_Vector v; uint8_t d[128]; } u = { .v = v }; for (int i = 0; i < 128 / 16; i++) { htp_dump_uint8_line(pref, u.d + (16 * i), 16); } } static bool hvx_vec_eq(HVX_Vector v0, HVX_Vector v1, size_t n) { typedef union { HVX_Vector v; int8_t d[128]; } U; U u0 = { .v = v0 }; U u1 = { .v = v1 }; for (int i = 0; i < n; i++) { if (u0.d[i] != u1.d[i]) { return false; } } return true; } static inline float hvx_vec_get_fp32(HVX_Vector v) { float __attribute__((aligned(128))) x; hvx_vec_store_a(&x, 4, v); return x; } static inline HVX_Vector hvx_vec_int32_reduce_sum_n(HVX_Vector in, unsigned int n) { unsigned int total = n * 4; // total vec nbytes unsigned int width = 4; // int32 HVX_Vector sum = in, sum_t; while (width < total) { sum_t = Q6_V_vror_VR(sum, width); // rotate right sum = Q6_Vw_vadd_VwVw(sum_t, sum); // elementwise sum width = width << 1; } return sum; } static inline HVX_Vector hvx_vec_int32_reduce_sum(HVX_Vector in) { return hvx_vec_int32_reduce_sum_n(in, 32); } static inline HVX_Vector hvx_vec_qf32_reduce_sum_n(HVX_Vector in, unsigned int n) { unsigned int total = n * 4; // total vec nbytes unsigned int width = 4; // fp32 nbytes HVX_Vector sum = in, sum_t; while (width < total) { sum_t = Q6_V_vror_VR(Q6_Vsf_equals_Vqf32(sum), width); // rotate right sum = Q6_Vqf32_vadd_Vqf32Vsf(sum, sum_t); // elementwise sum width = width << 1; } return sum; } static inline HVX_Vector hvx_vec_qf32_reduce_sum(HVX_Vector in) { return hvx_vec_qf32_reduce_sum_n(in, 32); } static inline HVX_Vector hvx_vec_fp32_reduce_sum_n(HVX_Vector in, unsigned int n) { unsigned int total = n * 4; // total vec nbytes unsigned int width = 4; // fp32 nbytes HVX_Vector sum = in, sum_t; while (width < total) { sum_t = Q6_V_vror_VR(sum, width); // rotate right sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sum, sum_t)); // elementwise sum width = width << 1; } return sum; } static inline HVX_Vector hvx_vec_fp32_reduce_sum(HVX_Vector in) { return hvx_vec_fp32_reduce_sum_n(in, 32); } static inline HVX_Vector hvx_vec_reduce_max_fp16(HVX_Vector in) { unsigned total = 128; // total vec nbytes unsigned width = 2; // fp16 nbytes HVX_Vector _max = in, _max_t; while (width < total) { _max_t = Q6_V_vror_VR(_max, width); // rotate right _max = Q6_Vhf_vmax_VhfVhf(_max_t, _max); // elementwise max width = width << 1; } return _max; } static inline HVX_Vector hvx_vec_reduce_max2_fp16(HVX_Vector in, HVX_Vector _max) { unsigned total = 128; // total vec nbytes unsigned width = 2; // fp32 nbytes HVX_Vector _max_t; _max = Q6_Vhf_vmax_VhfVhf(in, _max); while (width < total) { _max_t = Q6_V_vror_VR(_max, width); // rotate right _max = Q6_Vhf_vmax_VhfVhf(_max_t, _max); // elementwise max width = width << 1; } return _max; } static inline HVX_Vector hvx_vec_reduce_max_fp32(HVX_Vector in) { unsigned total = 128; // total vec nbytes unsigned width = 4; // fp32 nbytes HVX_Vector _max = in, _max_t; while (width < total) { _max_t = Q6_V_vror_VR(_max, width); // rotate right _max = Q6_Vsf_vmax_VsfVsf(_max_t, _max); // elementwise max width = width << 1; } return _max; } static inline HVX_Vector hvx_vec_reduce_max2_fp32(HVX_Vector in, HVX_Vector _max) { unsigned total = 128; // total vec nbytes unsigned width = 4; // fp32 nbytes HVX_Vector _max_t; _max = Q6_Vsf_vmax_VsfVsf(in, _max); while (width < total) { _max_t = Q6_V_vror_VR(_max, width); // rotate right _max = Q6_Vsf_vmax_VsfVsf(_max_t, _max); // elementwise max width = width << 1; } return _max; } static inline HVX_Vector hvx_vec_abs_fp16(HVX_Vector v) { // abs by clearing the fp16 sign bit HVX_Vector mask = Q6_Vh_vsplat_R(0x7fff); return Q6_V_vand_VV(v, mask); } static inline HVX_Vector hvx_vec_neg_fp16(HVX_Vector v) { // neg by setting the fp16 sign bit HVX_Vector mask = Q6_Vh_vsplat_R(0x8000); return Q6_V_vxor_VV(v, mask); } static inline HVX_Vector hvx_vec_abs_fp32(HVX_Vector v) { // abs by clearing the fp32 sign bit HVX_Vector mask = Q6_V_vsplat_R(0x7fffffff); return Q6_V_vand_VV(v, mask); } static inline HVX_Vector hvx_vec_neg_fp32(HVX_Vector v) { #if __HTP_ARCH__ > 75 return Q6_Vsf_vfneg_Vsf(v); #else // neg by setting the fp32 sign bit HVX_Vector mask = Q6_V_vsplat_R(0x80000000); return Q6_V_vxor_VV(v, mask); #endif // __HTP_ARCH__ > 75 } // ==================================================== // FUNCTION: 1/(x+1) y(0) = 1, y(0.5) = 0.6667, y(1) = 0.5 // Order:3; continuity: True; Ends forced: True // Mode: unsigned; Result fractional bits: 14 // Peak Error: 1.1295e-04 Rms Error: 2.8410e-05 Mean Error: 1.1370e-05 // 32769 -32706 31252 -10589 // 32590 -30635 22793 -4493 // 32066 -27505 16481 -2348 // 31205 -24054 11849 -1306 static inline HVX_Vector hvx_vec_recip_xp1_O3_unsigned(HVX_Vector vx) { // input is 0..0xffff representing 0.0 .. 1.0 HVX_Vector p; p = Q6_Vh_vlut4_VuhPh(vx, 0xFAE6F6D4EE73D6A3ull); p = Q6_Vh_vmpa_VhVhVuhPuh_sat(p, vx, 0x2E49406159097A14ull); p = Q6_Vh_vmps_VhVhVuhPuh_sat(p, vx, 0x5DF66B7177AB7FC2ull); p = Q6_Vh_vmpa_VhVhVuhPuh_sat(p, vx, 0x79E57D427F4E8001ull); return p; // signed result, 14 fractional bits } // Find reciprocal of fp16. // (1) first, convert to fp32, multiplying by 1.0; this is done to // handle denormals. Ignoring sign and zero, result should be at // least 5.9604645e-08 (32-bit code 0x33800000) and at most 131008 (0x47ffe000) // (exponent in range [103,143]) // (2) extract the mantissa into 16-bit unsigned; find reciprocal using a fitted poly // (3) put this, along with '253-exp' (exp from (1)) together to make an qf32 // (4) convert that to fp16 // (5) put sign back in. Also, if the original value (w/o sign) was <0x81, replace // the result with the max value. static inline HVX_Vector hvx_vec_inverse_fp16(HVX_Vector vals) { HVX_Vector em_mask = Q6_Vh_vsplat_R(0x7FFF); HVX_Vector avals = Q6_V_vand_VV(vals, em_mask); HVX_VectorPred is_neg = Q6_Q_vcmp_gt_VhVh(avals, vals); // is too small to 1/x ? for 'standard' fp16, this would be 0x101 HVX_VectorPred is_small = Q6_Q_vcmp_gt_VhVh(Q6_Vh_vsplat_R(0x101), avals); HVX_VectorPair to_qf32 = Q6_Wqf32_vmpy_VhfVhf(avals, Q6_Vh_vsplat_R(0x3C00)); // *1.0 HVX_Vector to_f32_0 = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(to_qf32)); HVX_Vector to_f32_1 = Q6_Vsf_equals_Vqf32(Q6_V_hi_W(to_qf32)); // bits 22..13 contain the mantissa now (w/o hidden bit); move to bit 14..5 of a 16-bit vector HVX_Vector mant_u16 = Q6_Vh_vshuffo_VhVh(Q6_Vw_vasl_VwR(to_f32_1, 9), Q6_Vw_vasl_VwR(to_f32_0, 9)); // likewise extract the upper 16 from each, containing the exponents in range 103..142 HVX_Vector exp_u16 = Q6_Vh_vshuffo_VhVh(to_f32_1, to_f32_0); //Get exponent in IEEE 32-bit representation exp_u16 = Q6_Vuh_vlsr_VuhR(exp_u16, 7); // so, mant_u16 contains an unbiased mantissa in upper 10 bits of each u16 lane // We can consider it to be x-1.0, with 16 fractional bits, where 'x' is in range [1.0,2.0) // Use poly to transform to 1/x, with 14 fractional bits // HVX_Vector rm = hvx_vec_recip_xp1_O3_unsigned(mant_u16); HVX_Vector vcl0 = Q6_Vuh_vcl0_Vuh(rm); //count leading zeros // Get mantissa for 16-bit represenation HVX_Vector mant_recip = Q6_V_vand_VV(Q6_Vh_vasr_VhR(Q6_Vh_vasl_VhVh(rm, vcl0), 5), Q6_Vh_vsplat_R(0x03FF)); //Compute Reciprocal Exponent HVX_Vector exp_recip = Q6_Vh_vsub_VhVh(Q6_Vh_vsub_VhVh(Q6_Vh_vsplat_R(254), exp_u16), Q6_Vh_vsub_VhVh(vcl0, Q6_Vh_vsplat_R(1))); //Convert it for 16-bit representation exp_recip = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vsub_VhVh(exp_recip, Q6_Vh_vsplat_R(127)), Q6_Vh_vsplat_R(15)); exp_recip = Q6_Vh_vasl_VhR(exp_recip, 10); //Merge exponent and mantissa for reciprocal HVX_Vector recip = Q6_V_vor_VV(exp_recip, mant_recip); // map 'small' inputs to standard largest value 0x7bff recip = Q6_V_vmux_QVV(is_small, Q6_Vh_vsplat_R(0x7bff), recip); // add sign back recip = Q6_V_vandor_VQR(recip, is_neg, 0x80008000); return recip; } #define IEEE_VSF_EXPLEN (8) #define IEEE_VSF_EXPBIAS (127) #define IEEE_VSF_EXPMASK (0xFF) #define IEEE_VSF_MANTLEN (23) #define IEEE_VSF_MANTMASK (0x7FFFFF) #define IEEE_VSF_MIMPMASK (0x800000) static inline HVX_Vector hvx_vec_truncate_fp32(HVX_Vector in_vec) { HVX_Vector mask_mant_v = Q6_V_vsplat_R(IEEE_VSF_MANTMASK); HVX_Vector mask_impl_v = Q6_V_vsplat_R(IEEE_VSF_MIMPMASK); HVX_Vector const_zero_v = Q6_V_vzero(); HVX_VectorPred q_negative = Q6_Q_vcmp_gt_VwVw(const_zero_v, in_vec); HVX_Vector expval_v = in_vec >> IEEE_VSF_MANTLEN; expval_v &= IEEE_VSF_EXPMASK; expval_v -= IEEE_VSF_EXPBIAS; // negative exp == fractional value HVX_VectorPred q_negexp = Q6_Q_vcmp_gt_VwVw(const_zero_v, expval_v); HVX_Vector rshift_v = IEEE_VSF_MANTLEN - expval_v; // fractional bits - exp shift HVX_Vector mant_v = in_vec & mask_mant_v; // obtain mantissa HVX_Vector vout = Q6_Vw_vadd_VwVw(mant_v, mask_impl_v); // add implicit 1.0 vout = Q6_Vw_vasr_VwVw(vout, rshift_v); // shift to obtain truncated integer vout = Q6_V_vmux_QVV(q_negexp, const_zero_v, vout); // expval<0 -> 0 HVX_Vector neg_vout = -vout; vout = Q6_V_vmux_QVV(q_negative, neg_vout, vout); // handle negatives return (vout); } static inline HVX_Vector hvx_vec_floor_fp32(HVX_Vector in_vec) { HVX_Vector mask_mant_v = Q6_V_vsplat_R(IEEE_VSF_MANTMASK); HVX_Vector mask_impl_v = Q6_V_vsplat_R(IEEE_VSF_MIMPMASK); HVX_Vector const_mnlen_v = Q6_V_vsplat_R(IEEE_VSF_MANTLEN); HVX_Vector const_zero_v = Q6_V_vzero(); HVX_Vector const_negone_v = Q6_V_vsplat_R(0xbf800000); // -1 IEEE vsf HVX_VectorPred q_negative = Q6_Q_vcmp_gt_VwVw(const_zero_v, in_vec); HVX_Vector expval_v = in_vec >> IEEE_VSF_MANTLEN; expval_v &= IEEE_VSF_EXPMASK; expval_v -= IEEE_VSF_EXPBIAS; HVX_VectorPred q_negexp = Q6_Q_vcmp_gt_VwVw(const_zero_v, expval_v); HVX_VectorPred q_expltmn = Q6_Q_vcmp_gt_VwVw(const_mnlen_v, expval_v); HVX_VectorPred q_negexp_pos = Q6_Q_vcmp_gtand_QVwVw(q_negexp, in_vec, const_zero_v); HVX_VectorPred q_negexp_neg = Q6_Q_vcmp_gtand_QVwVw(q_negexp, const_zero_v, in_vec); // if expval < 0 (q_negexp) // <0, floor is 0 // if vin > 0 // floor = 0 // if vin < 0 // floor = -1 // if expval < mant_len (q_expltmn) // >0, but fraction may exist // get sign (q_negative) // mask >> expval // fraction bits to mask off // vout = ~(mask) // apply mask to remove fraction // if (qneg) // negative floor is one less (more, sign bit for neg) // vout += ((impl_mask) >> expval) // if (mask && vin) // vout = vin // else // already an integer // ; // no change // compute floor mask_mant_v >>= expval_v; HVX_Vector neg_addin_v = mask_impl_v >> expval_v; HVX_Vector vout_neg_addin = Q6_Vw_vadd_VwVw(in_vec, neg_addin_v); HVX_Vector vout = Q6_V_vmux_QVV(q_negative, vout_neg_addin, in_vec); HVX_Vector mask_chk_v = Q6_V_vand_VV(in_vec, mask_mant_v); // chk if bits set HVX_VectorPred q_integral = Q6_Q_vcmp_eq_VwVw(const_zero_v, mask_chk_v); HVX_Vector not_mask_v = Q6_V_vnot_V(mask_mant_v); // frac bits to clear HVX_Vector vfrfloor_v = Q6_V_vand_VV(vout, not_mask_v); // clear frac bits vout = in_vec; vout = Q6_V_vmux_QVV(q_expltmn, vfrfloor_v, vout); // expval0 -> 0 vout = Q6_V_vmux_QVV(q_negexp_neg, const_negone_v, vout); // expval<0 x<0 -> -1 return vout; } static inline HVX_Vector hvx_vec_i16_from_hf_rnd_sat(HVX_Vector vin) { // This looks complicated. // Ideally should just be Q6_Vh_equals_Vhf(vin) // but that instruction does not do proper rounding. // convert to qf32, multiplying by 1.0 in the process. HVX_VectorPair v32 = Q6_Wqf32_vmpy_VhfVhf(vin, Q6_Vh_vsplat_R(0x3C00)); // 'in-range' values are +/32752. // add 192K to it, convert to sf HVX_Vector v192K = Q6_V_vsplat_R(0x48400000); HVX_Vector vsf_0 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(Q6_V_lo_W(v32), v192K)); HVX_Vector vsf_1 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(Q6_V_hi_W(v32), v192K)); // for in-range cases, result is {163858... 229360} so the exponent is always 144. // if we extract bits 21..0 as a signed quantity, and round 6 bits off, that will be the answer. // Start by <<10 to get the final 'sign' bit in bit 15... vsf_0 = Q6_Vw_vasl_VwR(vsf_0, 10); vsf_1 = Q6_Vw_vasl_VwR(vsf_1, 10); // now round down to 16 return Q6_Vh_vround_VwVw_sat(vsf_1, vsf_0); } static inline HVX_Vector hvx_vec_inverse_fp32(HVX_Vector v_sf) { HVX_Vector inv_aprox_sf = Q6_V_vsplat_R(0x7EEEEBB3); HVX_Vector two_sf = hvx_vec_splat_fp32(2.0); // First approximation HVX_Vector i_sf = Q6_Vw_vsub_VwVw(inv_aprox_sf, v_sf); HVX_Vector r_qf; // Refine r_qf = Q6_Vqf32_vmpy_VsfVsf( i_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(i_sf, v_sf))))); r_qf = Q6_Vqf32_vmpy_Vqf32Vqf32( r_qf, Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(r_qf), v_sf)))); r_qf = Q6_Vqf32_vmpy_Vqf32Vqf32( r_qf, Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(r_qf), v_sf)))); return Q6_Vsf_equals_Vqf32(r_qf); } #define FAST_SIGMOID_LOG2F (0x3fb8aa3b) // 1.442695022 #define FAST_SIGMOID_C1 (0x3d009076) // 0.03138777 #define FAST_SIGMOID_C2 (0x3e8d74bd) // 0.276281267 #define FAST_SIGMOID_C3 (0x3f000000) // 0.5 static inline HVX_Vector hvx_vec_fast_sigmoid_fp32(HVX_Vector v) { v = Q6_Vqf32_vmpy_VsfVsf(v, Q6_V_vsplat_R(FAST_SIGMOID_LOG2F)); v = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v), Q6_V_vsplat_R(FAST_SIGMOID_C3)); HVX_Vector in_int = hvx_vec_truncate_fp32(Q6_Vsf_equals_Vqf32(v)); HVX_Vector x = Q6_Vqf32_vsub_Vqf32Vsf(v, Q6_Vsf_equals_Vw(in_int)); HVX_Vector xx = Q6_Vqf32_vmpy_Vqf32Vqf32(x, x); HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(xx), Q6_V_vsplat_R(FAST_SIGMOID_C2)); v1 = Q6_Vqf32_vadd_Vqf32Vsf(v1, Q6_V_vsplat_R(FAST_SIGMOID_LOG2F)); HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(x), Q6_V_vsplat_R(FAST_SIGMOID_C1)); v2 = Q6_Vqf32_vmpy_Vqf32Vqf32(v2, xx); v2 = Q6_Vqf32_vadd_Vqf32Vqf32(v2, x); HVX_Vector v3 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(v2, v1)); HVX_Vector v3_exponent = Q6_Vw_vasl_VwR(v3, 1); v3_exponent = Q6_Vuw_vlsr_VuwR(v3_exponent, 24); v3_exponent = Q6_Vw_vadd_VwVw(in_int, v3_exponent); v3 = Q6_Vw_vaslacc_VwVwR(v3, in_int, 24); HVX_Vector v4 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(v2, v1)); HVX_Vector v5 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_VsfVsf(v3, v4)); HVX_Vector res = hvx_vec_inverse_fp32(v5); res = Q6_Vqf32_vmpy_VsfVsf(v3, res); return Q6_Vsf_equals_Vqf32(res); } #define EXP_COEFF_5 (0x39506967) // 0.000198757 = 1/(7!) #define EXP_COEFF_4 (0x3AB743CE) // 0.0013982 = 1/(6!) #define EXP_COEFF_3 (0x3C088908) // 0.00833345 = 1/(5!) #define EXP_COEFF_2 (0x3D2AA9C1) // 0.416658 = 1/(4!) #define EXP_COEFF_1 (0x3E2AAAAA) // 0.16666667 = 1/(3!) #define EXP_COEFF_0 (0x3F000000) // 0.5 = 1/(2!) #define EXP_LOGN2 (0x3F317218) // ln(2) = 0.6931471805 #define EXP_LOG2E (0x3FB8AA3B) // log2(e) = 1/ln(2) = 1.4426950408 #define EXP_ONE (0x3f800000) // 1.0 #define EXP_RANGE_R (0x41a00000) // 20.0 #define EXP_RANGE_L (0xc1a00000) // -20.0 static inline HVX_Vector hvx_vec_exp_fp32(HVX_Vector in_vec) { HVX_Vector z_qf32_v; HVX_Vector x_v; HVX_Vector x_qf32_v; HVX_Vector y_v; HVX_Vector k_v; HVX_Vector f_v; HVX_Vector epsilon_v; HVX_Vector log2e = Q6_V_vsplat_R(EXP_LOG2E); HVX_Vector logn2 = Q6_V_vsplat_R(EXP_LOGN2); HVX_Vector E_const; HVX_Vector zero_v = Q6_V_vzero(); // exp(x) is approximated as follows: // f = floor(x/ln(2)) = floor(x*log2(e)) // epsilon = x - f*ln(2) // exp(x) = exp(epsilon+f*ln(2)) // = exp(epsilon)*exp(f*ln(2)) // = exp(epsilon)*2^f // // Since epsilon is close to zero, it can be approximated with its Taylor series: // exp(x) ~= 1+x+x^2/2!+x^3/3!+...+x^n/n!+... // Preserving the first eight elements, we get: // exp(x) ~= 1+x+e0*x^2+e1*x^3+e2*x^4+e3*x^5+e4*x^6+e5*x^7 // = 1+x+(E0+(E1+(E2+(E3+(E4+E5*x)*x)*x)*x)*x)*x^2 HVX_Vector temp_v = in_vec; // Clamp inputs to (-20.0, 20.0) HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, Q6_V_vsplat_R(EXP_RANGE_R)); HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(Q6_V_vsplat_R(EXP_RANGE_L), in_vec); in_vec = Q6_V_vmux_QVV(pred_cap_right, Q6_V_vsplat_R(EXP_RANGE_R), temp_v); in_vec = Q6_V_vmux_QVV(pred_cap_left, Q6_V_vsplat_R(EXP_RANGE_L), temp_v); epsilon_v = Q6_Vqf32_vmpy_VsfVsf(log2e, in_vec); epsilon_v = Q6_Vsf_equals_Vqf32(epsilon_v); // f_v is the floating point result and k_v is the integer result f_v = hvx_vec_floor_fp32(epsilon_v); k_v = hvx_vec_truncate_fp32(f_v); x_qf32_v = Q6_Vqf32_vadd_VsfVsf(in_vec, zero_v); // x = x - f_v * logn2; epsilon_v = Q6_Vqf32_vmpy_VsfVsf(f_v, logn2); x_qf32_v = Q6_Vqf32_vsub_Vqf32Vqf32(x_qf32_v, epsilon_v); // normalize before every QFloat's vmpy x_qf32_v = Q6_Vqf32_vadd_Vqf32Vsf(x_qf32_v, zero_v); // z = x * x; z_qf32_v = Q6_Vqf32_vmpy_Vqf32Vqf32(x_qf32_v, x_qf32_v); z_qf32_v = Q6_Vqf32_vadd_Vqf32Vsf(z_qf32_v, zero_v); x_v = Q6_Vsf_equals_Vqf32(x_qf32_v); // y = E4 + E5 * x; E_const = Q6_V_vsplat_R(EXP_COEFF_5); y_v = Q6_Vqf32_vmpy_VsfVsf(E_const, x_v); E_const = Q6_V_vsplat_R(EXP_COEFF_4); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = E3 + y * x; E_const = Q6_V_vsplat_R(EXP_COEFF_3); y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = E2 + y * x; E_const = Q6_V_vsplat_R(EXP_COEFF_2); y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = E1 + y * x; E_const = Q6_V_vsplat_R(EXP_COEFF_1); y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = E0 + y * x; E_const = Q6_V_vsplat_R(EXP_COEFF_0); y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = x + y * z; y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, z_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vqf32(y_v, x_qf32_v); y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); // y = y + 1.0; y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, Q6_V_vsplat_R(EXP_ONE)); // insert exponents // y = ldexpf(y, k); // y_v += k_v; // qf32 // modify exponent y_v = Q6_Vsf_equals_Vqf32(y_v); // add k_v to the exponent of y_v HVX_Vector y_v_exponent = Q6_Vw_vasl_VwR(y_v, 1); y_v_exponent = Q6_Vuw_vlsr_VuwR(y_v_exponent, IEEE_VSF_MANTLEN + 1); y_v_exponent = Q6_Vw_vadd_VwVw(k_v, y_v_exponent); // exponent cannot be negative; if overflow is detected, result is set to zero HVX_VectorPred qy_v_negative_exponent = Q6_Q_vcmp_gt_VwVw(zero_v, y_v_exponent); y_v = Q6_Vw_vaslacc_VwVwR(y_v, k_v, IEEE_VSF_MANTLEN); y_v = Q6_V_vmux_QVV(qy_v_negative_exponent, zero_v, y_v); return y_v; } #define RSQRT_CONST 0x5f3759df // Constant for fast inverse square root calculation #define RSQRT_ONE_HALF 0x3f000000 // 0.5 #define RSQRT_THREE_HALVES 0x3fc00000 // 1.5 static inline HVX_Vector hvx_vec_rsqrt_fp32(HVX_Vector in_vec) { //Algorithm : // x2 = input*0.5 // y = * (long *) &input // y = 0x5f3759df - (y>>2) // y = y*(threehalfs - x2*y*y) HVX_Vector rsqrtconst = Q6_V_vsplat_R(RSQRT_CONST); HVX_Vector onehalf = Q6_V_vsplat_R(RSQRT_ONE_HALF); HVX_Vector threehalfs = Q6_V_vsplat_R(RSQRT_THREE_HALVES); HVX_Vector x2, y, ypower2, temp; x2 = Q6_Vqf32_vmpy_VsfVsf(in_vec, onehalf); x2 = Q6_Vqf32_vadd_Vqf32Vsf(x2, Q6_V_vzero()); y = Q6_Vw_vasr_VwR(in_vec, 1); y = Q6_Vw_vsub_VwVw(rsqrtconst, y); // 1st iteration ypower2 = Q6_Vqf32_vmpy_VsfVsf(y, y); ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); temp = Q6_Vqf32_vmpy_VsfVsf(y, Q6_Vsf_equals_Vqf32(temp)); // 2nd iteration y = Q6_Vqf32_vadd_Vqf32Vsf(temp, Q6_V_vzero()); ypower2 = Q6_Vqf32_vmpy_Vqf32Vqf32(y, y); ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); temp = Q6_Vqf32_vmpy_Vqf32Vqf32(y, temp); // 3rd iteration y = Q6_Vqf32_vadd_Vqf32Vsf(temp, Q6_V_vzero()); ypower2 = Q6_Vqf32_vmpy_Vqf32Vqf32(y, y); ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); temp = Q6_Vqf32_vmpy_Vqf32Vqf32(y, temp); return Q6_Vsf_equals_Vqf32(temp); } static inline HVX_Vector hvx_vec_fast_sigmoid_fp32_guard(HVX_Vector v, HVX_Vector one, HVX_Vector max_exp, HVX_Vector min_exp) { const HVX_VectorPred pred_max = Q6_Q_vcmp_gt_VsfVsf(max_exp, v); const HVX_VectorPred pred_min = Q6_Q_vcmp_gt_VsfVsf(v, min_exp); HVX_Vector out = hvx_vec_fast_sigmoid_fp32(v); out = Q6_V_vmux_QVV(pred_max, out, one); return Q6_V_vmux_QVV(pred_min, out, Q6_V_vzero()); } static inline void hvx_fast_sigmoid_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems) { int step_of_1 = num_elems >> 5; int remaining = num_elems - step_of_1 * VLEN_FP32; const HVX_Vector * restrict v_src = (HVX_Vector *) src; HVX_Vector * restrict v_dst = (HVX_Vector *) dst; static const float kMinExp = -87.f; // 0 static const float kMaxExp = 87.f; // 1 const HVX_Vector one = hvx_vec_splat_fp32(1.f); const HVX_Vector max_exp = hvx_vec_splat_fp32(kMaxExp); const HVX_Vector min_exp = hvx_vec_splat_fp32(kMinExp); #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { v_dst[i] = hvx_vec_fast_sigmoid_fp32_guard(v_src[i], one, max_exp, min_exp); } if (remaining > 0) { const float * srcf = ((const float *) src) + step_of_1* VLEN_FP32; float * dstf = (float *) dst + step_of_1*VLEN_FP32; HVX_Vector in = *(HVX_UVector *) srcf; HVX_Vector out = hvx_vec_fast_sigmoid_fp32_guard(in, one, max_exp, min_exp); hvx_vec_store_u((void *) dstf, remaining * SIZEOF_FP32, out); } } static inline void hvx_sigmoid_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems){ int step_of_1 = num_elems >> 5; // divby 32, because 32 float = 128 bytes per HVX vector int leftover = num_elems - (step_of_1 * VLEN_FP32); int32_t leftover_size = leftover * sizeof(float); static const float kMinExp = -87.f; // 0 static const float kMaxExp = 87.f; // 1 const HVX_Vector one = hvx_vec_splat_fp32(1.f); const HVX_Vector max_exp = hvx_vec_splat_fp32(kMaxExp); const HVX_Vector min_exp = hvx_vec_splat_fp32(kMinExp); const float *input = (float *)src; float *output = (float *)dst; HVX_Vector * input_v_ptr = (HVX_Vector *) input; HVX_UVector * output_v_ptr = (HVX_UVector *) output; HVX_Vector slinep; HVX_Vector slinec; HVX_Vector sline; slinep = *input_v_ptr++; #pragma unroll(4) for (int i = step_of_1 - 1; i > 0; i--) { slinec = *input_v_ptr++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); *((HVX_UVector *) (output_v_ptr++)) = hvx_vec_fast_sigmoid_fp32_guard(sline, one, max_exp, min_exp); /* Prepare slinep for next iteration */ slinep = slinec; } if (step_of_1 > 0) { slinec = htp_is_aligned(input_v_ptr, 128) && leftover == 0 ? slinep : *input_v_ptr++; sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); *((HVX_UVector *) (output_v_ptr++)) = hvx_vec_fast_sigmoid_fp32_guard(sline, one, max_exp, min_exp); ; slinep = slinec; } if (leftover > 0) { slinec = (is_in_one_chunk(input_v_ptr, leftover_size, 128) ? slinep : *input_v_ptr++); sline = Q6_V_valign_VVR(slinec, slinep, (size_t) input); HVX_Vector sout = hvx_vec_fast_sigmoid_fp32_guard(sline, one, max_exp, min_exp); hvx_vec_store_u(output_v_ptr, leftover_size, sout); } } float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems); void hvx_mul_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_mul_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_mul_mul_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, const uint8_t * restrict src2, uint8_t * restrict dst, const int num_elems); void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); void hvx_add_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_add_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); void hvx_sub_f32(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_sub_f32_opt(const uint8_t * restrict src0, const uint8_t * restrict src1, uint8_t * restrict dst, const int num_elems); void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); void hvx_scale_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, const float scale); void hvx_inverse_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems); void hvx_sigmoid_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems); void hvx_exp_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, bool negate); float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems); float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems); void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); void hvx_clamp_scalar_f32(const uint8_t * restrict src, const float limit_left, const float limit_right, uint8_t * restrict dst, const int num_elems); #endif /* HVX_UTILS_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/main.c000066400000000000000000000666501512524704700214300ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" #pragma clang diagnostic ignored "-Wunused-function" #define FARF_ERROR 1 #define FARF_HIGH 1 #define FARF_MEDIUM 0 #define FARF_LOW 0 #include #include #include #include #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "ops-utils.h" #include "worker-pool.h" AEEResult htp_iface_open(const char * uri, remote_handle64 * handle) { struct htp_context * ctx; int err = 0; ctx = calloc(1, sizeof(*ctx)); if (ctx == NULL) { return AEE_ENOMEMORY; } // Use the context structure as a handle *handle = (remote_handle64) ctx; // Enable FARF logs HAP_setFARFRuntimeLoggingParams(0xffff, NULL, 0); // Set client class { HAP_power_request_t request; memset(&request, 0, sizeof(HAP_power_request_t)); request.type = HAP_power_set_apptype; request.apptype = HAP_POWER_COMPUTE_CLIENT_CLASS; if ((err = HAP_power_set((void *) ctx, &request)) != 0) { return err; } } { HAP_power_request_t request; memset(&request, 0, sizeof(request)); request.type = HAP_power_set_DCVS_v3; request.dcvs_v3.set_dcvs_enable = TRUE; request.dcvs_v3.dcvs_enable = TRUE; request.dcvs_v3.dcvs_option = HAP_DCVS_V2_PERFORMANCE_MODE; request.dcvs_v3.set_bus_params = TRUE; request.dcvs_v3.bus_params.min_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.bus_params.max_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.bus_params.target_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.set_core_params = TRUE; request.dcvs_v3.core_params.min_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.core_params.max_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.core_params.target_corner = HAP_DCVS_VCORNER_MAX; request.dcvs_v3.set_sleep_disable = TRUE; request.dcvs_v3.sleep_disable = TRUE; if ((err = HAP_power_set((void *) ctx, &request)) != 0) { return err; } memset(&request, 0, sizeof(request)); request.type = HAP_power_set_HVX; request.hvx.power_up = TRUE; if ((err = HAP_power_set((void *) ctx, &request)) != 0) { return err; } } { // Power on HMX HAP_power_request_t request; memset(&request, 0, sizeof(HAP_power_request_t)); request.type = HAP_power_set_HMX; request.hmx.power_up = TRUE; FARF(ALWAYS, "Powering HMX on\n"); err = HAP_power_set((void *) &ctx, &request); if (err != AEE_SUCCESS) { FARF(ERROR, "Error powering on HMX."); return err; } } return AEE_SUCCESS; } AEEResult htp_iface_close(remote_handle64 handle) { struct htp_context * ctx = (struct htp_context *) handle; if (!ctx) { return AEE_EBADPARM; } if (ctx->queue) { FARF(ERROR, "Closing handle with queue still open"); return AEE_EITEMBUSY; } free(ctx); return AEE_SUCCESS; } AEEResult htp_iface_enable_etm(remote_handle64 handle) { int err = HAP_user_etm_enable(); if (err) { if (err == AEE_EVERSIONNOTSUPPORT) { FARF(ERROR, "API HAP_user_etm_enable is not supported\n"); } else { FARF(ERROR, "Error executing HAP_user_etm_enable with error code : 0x%x\n", err); } } return err; } AEEResult htp_iface_disable_etm(remote_handle64 handle) { int err = HAP_user_etm_disable(); if (err) { if (err == AEE_EVERSIONNOTSUPPORT) { FARF(ERROR, "API HAP_user_etm_disable is not supported\n"); } else { FARF(ERROR, "Error executing HAP_user_etm_disable with error code : 0x%x\n", err); } } return err; } static int vtcm_acquire(struct htp_context * ctx) { int err; if (!ctx->vtcm_valid) { // Temporarily bump thread priority to make sure it's higher than other sessions. // This way the resource manager will notify the other thread to release VTCM. // Note that we need to reaquire VTCM at normal priority for this to work next time. qurt_thread_set_priority(qurt_thread_get_id(), ctx->thread_prio - 10); err = HAP_compute_res_acquire_cached(ctx->vtcm_rctx, 1000000); if (err != 0) { FARF(ERROR, "Failed to acquire VTCM: 0x%08x", (unsigned)err); abort(); } HAP_compute_res_release_cached(ctx->vtcm_rctx); qurt_thread_set_priority(qurt_thread_get_id(), ctx->thread_prio); err = HAP_compute_res_acquire_cached(ctx->vtcm_rctx, 1000000); if (err != 0) { FARF(ERROR, "Failed to acquire VTCM: 0x%08x", (unsigned)err); abort(); } ctx->vtcm_valid = true; } ctx->vtcm_inuse = true; return 0; } static int vtcm_release(struct htp_context * ctx) { ctx->vtcm_inuse = false; if (ctx->vtcm_valid && ctx->vtcm_needs_release) { ctx->vtcm_valid = false; ctx->vtcm_needs_release = false; HAP_compute_res_release_cached(ctx->vtcm_rctx); } return 0; } static int vtcm_release_callback(unsigned int rctx, void * state) { struct htp_context * ctx = (struct htp_context *) state; if (!ctx || ctx->vtcm_rctx != rctx) { return AEE_EBADPARM; } // If VTCM is not inuse (not processing Ops) release it right here // otherwise we'll release it once we're done with the current Op. if (ctx->vtcm_inuse) { ctx->vtcm_needs_release = false; return 0; } ctx->vtcm_valid = false; HAP_compute_res_release_cached(ctx->vtcm_rctx); return 0; } static int vtcm_alloc(struct htp_context * ctx) { unsigned int vtcm_size = 8 * 1024 * 1024; // 8MB default HAP_compute_res_query_VTCM(0, &vtcm_size, NULL, NULL, NULL); compute_res_attr_t attr; HAP_compute_res_attr_init(&attr); HAP_compute_res_attr_set_serialize(&attr, 0); HAP_compute_res_attr_set_cache_mode(&attr, 1); HAP_compute_res_attr_set_vtcm_param_v2(&attr, vtcm_size, 0, vtcm_size); HAP_compute_res_attr_set_release_callback(&attr, vtcm_release_callback, (void *) ctx); HAP_compute_res_attr_set_hmx_param(&attr, 1); // Allocate VTCM for scratch pads uint32_t rctx = HAP_compute_res_acquire(&attr, 1000000 /* timeout */); if (!rctx) { FARF(ERROR, "failed to allocate %zu bytes VTCM\n", ctx->vtcm_size); return AEE_ENOMEMORY; } void * vtcm_ptr; if (HAP_compute_res_attr_get_vtcm_ptr_v2(&attr, &vtcm_ptr, &vtcm_size) != 0) { HAP_compute_res_release(rctx); FARF(ERROR, "failed to allocate %zu bytes VTCM (new)\n", ctx->vtcm_size); return AEE_ENOMEMORY; } ctx->vtcm_base = (uint8_t *) vtcm_ptr; ctx->vtcm_size = vtcm_size; ctx->vtcm_rctx = rctx; ctx->vtcm_valid = false; ctx->vtcm_inuse = false; ctx->vtcm_needs_release = false; return 0; } static void vtcm_free(struct htp_context * ctx) { if (ctx->vtcm_rctx) { HAP_compute_res_release(ctx->vtcm_rctx); ctx->vtcm_base = 0; ctx->vtcm_rctx = 0; } } static void htp_packet_callback(dspqueue_t queue, int error, void * context); static void htp_error_callback(dspqueue_t queue, int error, void * context); AEEResult htp_iface_start(remote_handle64 handle, uint32 sess_id, uint64 dsp_queue_id, uint32 n_hvx) { struct htp_context * ctx = (struct htp_context *) handle; if (!ctx) { return AEE_EBADPARM; } if (ctx->queue) { FARF(ERROR, "Queue already open"); return AEE_EITEMBUSY; } // Import queue created on the CPU int err = dspqueue_import(dsp_queue_id, // Queue ID from dspqueue_export htp_packet_callback, // Packet callback htp_error_callback, // Error callback; no errors expected on the DSP (void *) ctx, // Callback context &ctx->queue); if (err) { FARF(ERROR, "Queue import failed with 0x%08x", (unsigned) err); return err; } ctx->thread_id = qurt_thread_get_id(); ctx->thread_prio = qurt_thread_get_priority(ctx->thread_id); // allocate VTCM err = vtcm_alloc(ctx); if (err != AEE_SUCCESS) { FARF(ERROR, "Unable to allocate VTCM"); return AEE_ENOMEMORY; } qurt_sysenv_max_hthreads_t hw_threads; qurt_sysenv_get_max_hw_threads(&hw_threads); uint32_t hw_nhvx = (qurt_hvx_get_units() >> 8) & 0xFF; if (n_hvx == 0) { n_hvx = hw_nhvx; } if (n_hvx > hw_threads.max_hthreads) { n_hvx = hw_threads.max_hthreads; } if (n_hvx > HTP_MAX_NTHREADS) { n_hvx = HTP_MAX_NTHREADS; } ctx->n_threads = n_hvx; for (int i = 0; i < ctx->n_threads; i++) { // see discussion https://github.com/ggml-org/llama.cpp/pull/18151#discussion_r2632388541 ctx->dma[i] = dma_queue_create(64); } // init worker pool err = worker_pool_init(&ctx->worker_pool, n_hvx); if (err != AEE_SUCCESS) { FARF(ERROR, "Unable to create worker pool"); return err; } FARF(HIGH, "session %u started: n-hvx %u vtcm-size %zu vtcm-rctx %u n-threads %u thread-id %d thread-prio %d \n", sess_id, hw_nhvx, ctx->vtcm_size, ctx->vtcm_rctx, ctx->n_threads, ctx->thread_id, ctx->thread_prio); return AEE_SUCCESS; } AEEResult htp_iface_stop(remote_handle64 handle) { struct htp_context * ctx = (struct htp_context *) handle; if (!ctx) { return AEE_EBADPARM; } if (!ctx->queue) { FARF(ERROR, "Queue not open"); return AEE_EBADSTATE; } // Close queue. dspqueue_close() will also wait for callbacks to finish. int err = dspqueue_close(ctx->queue); ctx->queue = NULL; if (err != 0) { FARF(ERROR, "Queue close failed with 0x%08x", (unsigned) err); return err; } if (ctx->worker_pool) { // Release worker pool worker_pool_release(&ctx->worker_pool); } for (int i = 0; i < ctx->n_threads; i++) { dma_queue_delete(ctx->dma[i]); } vtcm_free(ctx); return AEE_SUCCESS; } static void htp_error_callback(dspqueue_t queue, int error, void * context) { // No errors expected on the DSP. FARF(ERROR, "Error callback: 0x%08x", (unsigned) error); } struct profile_data { uint64_t usecs; uint64_t cycles; uint64_t pkts; }; static inline void profile_start(struct profile_data * d) { d->usecs = HAP_perf_get_qtimer_count(); d->cycles = htp_get_cycles(); d->pkts = htp_get_pktcnt(); } static inline void profile_stop(struct profile_data * d) { d->usecs = HAP_perf_qtimer_count_to_us(HAP_perf_get_qtimer_count() - d->usecs); d->cycles = htp_get_cycles() - d->cycles; d->pkts = htp_get_pktcnt() - d->pkts; } static int send_htp_rsp(struct htp_context * c, uint32_t op, uint32_t status, struct dspqueue_buffer * bufs, size_t n_bufs, struct profile_data * prof) { // Prep response struct struct htp_general_rsp rsp; rsp.op = op; rsp.status = status; rsp.prof_usecs = prof->usecs; rsp.prof_cycles = prof->cycles; rsp.prof_pkts = prof->pkts; int err = dspqueue_write(c->queue, 0, // Flags n_bufs, bufs, // Buffer references sizeof(rsp), (const uint8_t *) &rsp, // Message DSPQUEUE_TIMEOUT_NONE); if (err != 0) { FARF(ERROR, "dspqueue_write failed: 0x%08x", (unsigned) err); } return err; } static void proc_matmul_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs, size_t n_bufs) { struct dspqueue_buffer rsp_bufs[1]; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[2].fd; rsp_bufs[0].ptr = bufs[2].ptr; rsp_bufs[0].size = bufs[2].size; rsp_bufs[0].offset = bufs[2].offset; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.src1 = req->src1; octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.src1.data = (uint32_t) bufs[1].ptr; octx.dst.data = (uint32_t) bufs[2].ptr; octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_matmul(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_matmul_id_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs, size_t n_bufs) { struct dspqueue_buffer rsp_bufs[1]; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[3].fd; rsp_bufs[0].ptr = bufs[3].ptr; rsp_bufs[0].size = bufs[3].size; rsp_bufs[0].offset = bufs[3].offset; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.src1 = req->src1; octx.src2 = req->src2; octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.src1.data = (uint32_t) bufs[1].ptr; octx.src2.data = (uint32_t) bufs[2].ptr; octx.dst.data = (uint32_t) bufs[3].ptr; octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_matmul_id(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_binary_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { struct dspqueue_buffer rsp_bufs[1]; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[2].fd; rsp_bufs[0].ptr = bufs[2].ptr; rsp_bufs[0].offset = bufs[2].offset; rsp_bufs[0].size = bufs[2].size; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.src1 = req->src1; octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.src1.data = (uint32_t) bufs[1].ptr; octx.dst.data = (uint32_t) bufs[2].ptr; octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_binary(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_add_id_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { struct dspqueue_buffer rsp_bufs[1]; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[3].fd; rsp_bufs[0].ptr = bufs[3].ptr; rsp_bufs[0].offset = bufs[3].offset; rsp_bufs[0].size = bufs[3].size; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.src1 = req->src1; octx.src2 = req->src2; octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.src1.data = (uint32_t) bufs[1].ptr; octx.src2.data = (uint32_t) bufs[2].ptr; octx.dst.data = (uint32_t) bufs[3].ptr; octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_binary(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_unary_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[1].fd; rsp_bufs[0].ptr = bufs[1].ptr; rsp_bufs[0].offset = bufs[1].offset; rsp_bufs[0].size = bufs[1].size; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.dst.data = (uint32_t) bufs[1].ptr; octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_unary(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_activations_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs, uint32_t n_bufs) { struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; int write_idx = (n_bufs == 3) ? 2 : 1; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[write_idx].fd; rsp_bufs[0].ptr = bufs[write_idx].ptr; rsp_bufs[0].offset = bufs[write_idx].offset; rsp_bufs[0].size = bufs[write_idx].size; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; if (3 == n_bufs) { octx.src1 = req->src1; } octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; if (3 == n_bufs) { octx.src1.data = (uint32_t) bufs[1].ptr; octx.dst.data = (uint32_t) bufs[2].ptr; } else { octx.dst.data = (uint32_t) bufs[1].ptr; } octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { if (octx.op == HTP_OP_SOFTMAX) { rsp_status = op_softmax(&octx); } else { rsp_status = op_activations(&octx); } vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void proc_rope_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs, uint32_t n_bufs) { struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; int write_idx = (n_bufs == 4) ? 3 : 2; // We had written to the output buffer, we'd also need to flush it rsp_bufs[0].fd = bufs[write_idx].fd; rsp_bufs[0].ptr = bufs[write_idx].ptr; rsp_bufs[0].offset = bufs[write_idx].offset; rsp_bufs[0].size = bufs[write_idx].size; rsp_bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush HTP DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU // Setup Op context struct htp_ops_context octx = { 0 }; octx.ctx = ctx; octx.src0 = req->src0; octx.src1 = req->src1; if (4 == n_bufs) { octx.src2 = req->src2; } octx.dst = req->dst; octx.flags = req->flags; octx.op = req->op; memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); // Update data pointers octx.src0.data = (uint32_t) bufs[0].ptr; octx.src1.data = (uint32_t) bufs[1].ptr; if (4 == n_bufs) { octx.src2.data = (uint32_t) bufs[2].ptr; octx.dst.data = (uint32_t) bufs[3].ptr; } else { octx.dst.data = (uint32_t) bufs[2].ptr; } octx.n_threads = ctx->n_threads; struct profile_data prof; profile_start(&prof); uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; if (vtcm_acquire(ctx) == AEE_SUCCESS) { rsp_status = op_rope(&octx); vtcm_release(ctx); } profile_stop(&prof); send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 1, &prof); } static void htp_packet_callback(dspqueue_t queue, int error, void * context) { struct htp_context * ctx = (struct htp_context *) context; // Repeatedly read packets from the queue until it's empty. We don't // necessarily get a separate callback for each packet, and new packets // may arrive while we're processing the previous one. This ensures we // keep the DSP busy as much as possible and avoid waiting for the CPU. while (1) { struct htp_general_req req; uint32_t req_size; struct dspqueue_buffer bufs[HTP_MAX_PACKET_BUFFERS]; uint32_t n_bufs; uint32_t flags; // Read packet from queue int err = dspqueue_read_noblock(queue, &flags, HTP_MAX_PACKET_BUFFERS, // Maximum number of buffer references &n_bufs, // Number of buffer references bufs, // Buffer references sizeof(req), // Max message length &req_size, // Message length (uint8_t *) &req); // Message if (err == AEE_EWOULDBLOCK) { // Consumed all packets available for now return; } if (err != 0) { FARF(ERROR, "dspqueue_read_noblock failed: 0x%08x", (unsigned) err); return; } if (req_size != sizeof(req)) { FARF(ERROR, "Invalid request size"); continue; } if (req.flags & HTP_OPFLAGS_EARLY_WAKEUP) { // Host wants early notification dspqueue_write_early_wakeup_noblock(ctx->queue, 10, 0); } // Process packet based on its message type switch (req.op) { case HTP_OP_MUL_MAT: if (n_bufs != 3) { FARF(ERROR, "Bad matmul-req buffer list"); continue; } proc_matmul_req(ctx, &req, bufs, n_bufs); break; case HTP_OP_MUL_MAT_ID: if (n_bufs != 4) { FARF(ERROR, "Bad matmul-id-req buffer list"); continue; } proc_matmul_id_req(ctx, &req, bufs, n_bufs); break; case HTP_OP_MUL: case HTP_OP_ADD: case HTP_OP_SUB: if (n_bufs != 3) { FARF(ERROR, "Bad binary-req buffer list"); continue; } proc_binary_req(ctx, &req, bufs); break; case HTP_OP_RMS_NORM: if (n_bufs != 2) { FARF(ERROR, "Bad unary-req buffer list"); continue; } proc_unary_req(ctx, &req, bufs); break; case HTP_OP_UNARY_SILU: case HTP_OP_UNARY_GELU: if (n_bufs != 2) { FARF(ERROR, "Bad act-req buffer list"); continue; } proc_activations_req(ctx, &req, bufs, n_bufs); break; case HTP_OP_GLU_SWIGLU: case HTP_OP_GLU_SWIGLU_OAI: case HTP_OP_SOFTMAX: if ((n_bufs != 2) && (n_bufs != 3)) { FARF(ERROR, "Bad act-req buffer list"); continue; } proc_activations_req(ctx, &req, bufs, n_bufs); break; case HTP_OP_ADD_ID: if (n_bufs != 4) { FARF(ERROR, "Bad add-id-req buffer list"); continue; } proc_add_id_req(ctx, &req, bufs); break; case HTP_OP_ROPE: if ((n_bufs != 3) && (n_bufs != 4)) { FARF(ERROR, "Bad rope-req buffer list"); continue; } proc_rope_req(ctx, &req, bufs, n_bufs); break; default: FARF(ERROR, "Unknown Op %u", req.op); break; } } } ggml-org-ggml-3678254/src/ggml-hexagon/htp/matmul-ops.c000066400000000000000000003264641512524704700226040ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" struct htp_matmul_type { const char * type; void (*vec_dot)(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); void (*vec_dot_rx2)(const int n, float * restrict s, const void * restrict vx, uint32_t vx_row_size, const void * restrict vy); }; typedef struct { HVX_Vector v[2]; } HVX_Vector_x2; typedef struct { HVX_Vector v[4]; } HVX_Vector_x4; typedef struct { HVX_Vector v[8]; } HVX_Vector_x8; // vdelta control to replicate first 4x fp32 values across lanes static const uint8_t __attribute__((aligned(128))) repl_4x_fp32[128] = { 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x44, 0x44, 0x44, 0x44, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, }; // vdelta control to replicate and interleave first 8x fp32 values across lanes static const uint8_t __attribute__((aligned(128))) repl_interleave_8x_fp32[128] = { 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x44, 0x44, 0x44, 0x44, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x44, 0x44, 0x44, 0x44, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, }; // vdelta control to replicate first fp32 value across all elements static const uint8_t __attribute__((aligned(128))) repl_1x_fp32[128] = { 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, }; // vdelta control to replicate first fp16 value across all elements static const uint8_t __attribute__((aligned(128))) repl_1x_fp16[128] = { 0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x40, 0x40, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, }; // vdelta control to replicate first fp16 value across all elements static const uint8_t __attribute__((aligned(128))) repl_2x_fp16[128] = { 0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, }; // vdelta control to expand first 32 e8m0 values into 32 uint32 elements static const uint8_t __attribute__((aligned(128))) expand_x32_e8m0[128] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, 0x11, 0x10, 0x10, 0x10, 0x02, 0x00, 0x04, 0x00, 0x01, 0x02, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x22, 0x20, 0x20, 0x20, 0x21, 0x22, 0x20, 0x24, 0x04, 0x00, 0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x11, 0x12, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08, 0x01, 0x02, 0x00, 0x04, 0x44, 0x40, 0x40, 0x40, 0x41, 0x40, 0x40, 0x40, 0x42, 0x40, 0x44, 0x40, 0x41, 0x42, 0x48, 0x48, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x12, 0x10, 0x10, 0x10, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x22, 0x20, 0x24, 0x20, 0x21, 0x22, 0x20, 0x20, }; static const uint8_t __attribute__((aligned(VLEN))) kvalues_mxfp4_lut[] = { 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 6, 0, 8, 0, 12, 0, 0, 0, 0xff, 0, 0xfe, 0, 0xfd, 0, 0xfc, 0, 0xfa, 0, 0xf8, 0, 0xf4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; // q4x4x2 and q8x4x2 are the flat q4/8_0 formats where all quants are stored first followed by all scales static inline size_t q8x4x2_row_size(uint32_t ne) { // ensures perfect alignment of quants and full row const uint32_t qk = QK_Q8_0x4x2; const uint32_t nb = (ne + qk - 1) / qk; return htp_round_up(ne + nb * 8 * sizeof(__fp16), 128); } static inline HVX_Vector_x8 hvx_vec_load_q4x4x8(const uint8_t * restrict ptr) { const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes) HVX_Vector v2_3 = vptr[1]; // ... HVX_Vector v4_5 = vptr[2]; // ... HVX_Vector v6_7 = vptr[3]; // ... const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F); HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4 HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4 HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4 HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4 // Convert uint4 to int4 (i.e. x - 8) const HVX_Vector i8 = Q6_Vb_vsplat_R(8); v0 = Q6_Vb_vsub_VbVb(v0, i8); v1 = Q6_Vb_vsub_VbVb(v1, i8); v2 = Q6_Vb_vsub_VbVb(v2, i8); v3 = Q6_Vb_vsub_VbVb(v3, i8); v4 = Q6_Vb_vsub_VbVb(v4, i8); v5 = Q6_Vb_vsub_VbVb(v5, i8); v6 = Q6_Vb_vsub_VbVb(v6, i8); v7 = Q6_Vb_vsub_VbVb(v7, i8); HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; return r; } static inline HVX_Vector_x8 hvx_vec_load_mxfp4x4x8(const uint8_t * restrict ptr) { const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes) HVX_Vector v2_3 = vptr[1]; // ... HVX_Vector v4_5 = vptr[2]; // ... HVX_Vector v6_7 = vptr[3]; // ... const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F); HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4 HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4 HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4 HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4 HVX_Vector lut = *(const HVX_Vector *) kvalues_mxfp4_lut; v0 = Q6_Vb_vlut32_VbVbI(v0, lut, 0); v1 = Q6_Vb_vlut32_VbVbI(v1, lut, 0); v2 = Q6_Vb_vlut32_VbVbI(v2, lut, 0); v3 = Q6_Vb_vlut32_VbVbI(v3, lut, 0); v4 = Q6_Vb_vlut32_VbVbI(v4, lut, 0); v5 = Q6_Vb_vlut32_VbVbI(v5, lut, 0); v6 = Q6_Vb_vlut32_VbVbI(v6, lut, 0); v7 = Q6_Vb_vlut32_VbVbI(v7, lut, 0); HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; return r; } static inline HVX_Vector_x8 hvx_vec_load_q8x4x8(const uint8_t * restrict ptr) { const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; HVX_Vector v0 = vptr[0]; // first 128 vals HVX_Vector v1 = vptr[1]; // ... HVX_Vector v2 = vptr[2]; // ... HVX_Vector v3 = vptr[3]; // ... HVX_Vector v4 = vptr[4]; // ... HVX_Vector v5 = vptr[5]; // ... HVX_Vector v6 = vptr[6]; // ... HVX_Vector v7 = vptr[7]; // ... HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; return r; } static inline HVX_Vector_x4 hvx_vec_load_x4_f16(const uint8_t * restrict ptr) { const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; HVX_Vector v0 = vptr[0]; // first 64 vals HVX_Vector v1 = vptr[1]; // second 64 vals HVX_Vector v2 = vptr[2]; // third 64 vals HVX_Vector v3 = vptr[3]; // forth 64 vals HVX_Vector_x4 r = { v0, v1, v2, v3 }; return r; } static inline HVX_Vector_x4 hvx_vec_load_x4_f32_as_f16(const uint8_t * restrict ptr) { const HVX_VectorPair * restrict vptr = (const HVX_VectorPair *) ptr; HVX_VectorPair v0 = vptr[0]; // first 64 vals HVX_VectorPair v1 = vptr[1]; // second 64 vals HVX_VectorPair v2 = vptr[2]; // third 64 vals HVX_VectorPair v3 = vptr[3]; // forth 64 vals HVX_Vector vq0_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v0), Q6_V_vzero()); HVX_Vector vq0_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v0), Q6_V_vzero()); HVX_Vector vq1_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v1), Q6_V_vzero()); HVX_Vector vq1_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v1), Q6_V_vzero()); HVX_Vector vq2_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v2), Q6_V_vzero()); HVX_Vector vq2_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v2), Q6_V_vzero()); HVX_Vector vq3_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v3), Q6_V_vzero()); HVX_Vector vq3_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v3), Q6_V_vzero()); HVX_Vector vh0 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq0_hi, vq0_lo)); HVX_Vector vh1 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq1_hi, vq1_lo)); HVX_Vector vh2 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq2_hi, vq2_lo)); HVX_Vector vh3 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq3_hi, vq3_lo)); // vcombine does a shuffle, use vdeal to undo HVX_Vector_x4 r = { Q6_Vh_vdeal_Vh(vh0), Q6_Vh_vdeal_Vh(vh1), Q6_Vh_vdeal_Vh(vh2), Q6_Vh_vdeal_Vh(vh3) }; return r; } // Reduce multiply 1024 x 1024 int8 elements (32x q4/8 blocks in 8x HVX vectors). // Accumulate each block into a single int32 value. // Return a single HVX vector with 32x int32 accumulators. // This version is parameterized to support less than 1024 elements. // if() checks are optimized out at compile time -- make sure to pass N as a constexpr. static inline HVX_Vector hvx_vec_rmpy_x8_n(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) { HVX_Vector r0 = Q6_V_vsplat_R(0); HVX_Vector r1 = Q6_V_vsplat_R(0); HVX_Vector r2 = Q6_V_vsplat_R(0); HVX_Vector r3 = Q6_V_vsplat_R(0); HVX_Vector r4 = Q6_V_vsplat_R(0); HVX_Vector r5 = Q6_V_vsplat_R(0); HVX_Vector r6 = Q6_V_vsplat_R(0); HVX_Vector r7 = Q6_V_vsplat_R(0); HVX_VectorPair p3; HVX_VectorPair p2; HVX_VectorPair p1; HVX_VectorPair p0; if (n >= 128) { r0 = Q6_Vw_vrmpy_VbVb(x.v[0], y.v[0]); } if (n >= 256) { r1 = Q6_Vw_vrmpy_VbVb(x.v[1], y.v[1]); } if (n >= 384) { r2 = Q6_Vw_vrmpy_VbVb(x.v[2], y.v[2]); } if (n >= 512) { r3 = Q6_Vw_vrmpy_VbVb(x.v[3], y.v[3]); } if (n >= 640) { r4 = Q6_Vw_vrmpy_VbVb(x.v[4], y.v[4]); } if (n >= 768) { r5 = Q6_Vw_vrmpy_VbVb(x.v[5], y.v[5]); } if (n >= 896) { r6 = Q6_Vw_vrmpy_VbVb(x.v[6], y.v[6]); } if (n >= 1024) { r7 = Q6_Vw_vrmpy_VbVb(x.v[7], y.v[7]); } if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } if (n >= 384) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); } if (n >= 640) { p2 = Q6_W_vdeal_VVR(r5, r4, -4); } if (n >= 896) { p3 = Q6_W_vdeal_VVR(r7, r6, -4); } if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } if (n >= 384) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); } if (n >= 640) { r2 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p2), Q6_V_hi_W(p2)); } if (n >= 896) { r3 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p3), Q6_V_hi_W(p3)); } if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } if (n >= 640) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); } if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } if (n >= 640) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); } if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } return r0; } static inline HVX_Vector hvx_vec_rmpy_x8_full(HVX_Vector_x8 x, HVX_Vector_x8 y) { return hvx_vec_rmpy_x8_n(x, y, 1024); } // Handle most common cases of tensors not multiple of 1024. static inline HVX_Vector hvx_vec_rmpy_x8_nloe(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) { if (n <= 256) { return hvx_vec_rmpy_x8_n(x, y, 256); }; if (n <= 512) { return hvx_vec_rmpy_x8_n(x, y, 512); }; if (n <= 768) { return hvx_vec_rmpy_x8_n(x, y, 768); }; return hvx_vec_rmpy_x8_n(x, y, 1024); } static void vec_dot_q4x4x2_q8x4x2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_Q4_0x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t x_qblk_size = qk / 2; // int4 const uint32_t x_qrow_size = n / 2; // int4 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks const uint32_t nloe = n % qk; // num leftover elemements uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); // Zero out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Reduce and convert into fp32 r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); hvx_vec_store_u(&s[0], 4, r0_sum); } static void vec_dot_q4x4x2_q8x4x2_rx2(const int n, float * restrict s, const void * restrict vx, uint32_t vx_row_size, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_Q4_0x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t x_qblk_size = qk / 2; // int4 const uint32_t x_qrow_size = n / 2; // int4 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); HVX_Vector r1_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks const uint32_t nloe = n % qk; // num leftover elemements uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r1_q, vy_q, nloe)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); // Zero out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); r1_dd = Q6_V_vand_QV(bmask, r1_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Convert into fp32 and reduce r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); } static void vec_dot_q8x4x2_q8x4x2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_Q4_0x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t x_qblk_size = qk; // int8 const uint32_t x_qrow_size = n; // int8 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks int32_t nloe = n % qk; // num leftover elemements (must be signed) uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); // Zero out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Reduce and convert into fp32 r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); hvx_vec_store_u(&s[0], 4, r0_sum); } static void vec_dot_q8x4x2_q8x4x2_rx2(const int n, float * restrict s, const void * restrict vx, uint32_t vx_row_size, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_Q4_0x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t x_qblk_size = qk; // int8 const uint32_t x_qrow_size = n; // int8 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); HVX_Vector r1_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks int32_t nloe = n % qk; // num leftover elemements (must be signed) uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r1_q, vy_q, nloe)); HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); // Zero out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); r1_dd = Q6_V_vand_QV(bmask, r1_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Convert into fp32 and reduce r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); } static void vec_dot_mxfp4x4x2_q8x4x2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_MXFP4x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0 const uint32_t x_qblk_size = qk / 2; // fp4 const uint32_t x_qrow_size = n / 2; // fp4 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks int32_t nloe = n % qk; // num leftover elemements (must be signed) uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); vy_d = Q6_Vsf_equals_Vqf32(vy_d); // Convert rX_d scales from e8m0 to fp32 // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... // Left shift with zero fill to create FP32 // FIXME: might need to handle zero as a special case (see ggml-cpu code) HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); r0_d = Q6_V_vdelta_VV(r0_d, expand); r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); r0_d = Q6_Vw_vasl_VwR(r0_d, 23); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Process leftovers if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); vy_d = Q6_Vsf_equals_Vqf32(vy_d); // Convert rX_d scales from e8m0 to fp32 // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... // Left shift with zero fill to create FP32 // FIXME: might need to handle zero as a special case (see ggml-cpu code) HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); r0_d = Q6_V_vdelta_VV(r0_d, expand); r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); r0_d = Q6_Vw_vasl_VwR(r0_d, 23); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); // Zero-out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); } // Reduce and convert into fp32 r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); hvx_vec_store_u(&s[0], 4, r0_sum); } static void vec_dot_mxfp4x4x2_q8x4x2_rx2(const int n, float * restrict s, const void * restrict vx, uint32_t vx_row_size, const void * restrict vy) { assert(n % 32 == 0); // min sub-block size assert((unsigned long) vx % 128 == 0); assert((unsigned long) vy % 128 == 0); const uint32_t qk = QK_MXFP4x4x2 * 4; const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0 const uint32_t x_qblk_size = qk / 2; // fp4 const uint32_t x_qrow_size = n / 2; // fp4 (not padded) const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 const uint32_t y_qblk_size = qk; // int8 const uint32_t y_qrow_size = n; // int8 (not padded) const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales // Row sum (qf32) HVX_Vector r0_sum = Q6_V_vsplat_R(0); HVX_Vector r1_sum = Q6_V_vsplat_R(0); // Multiply and accumulate into int32. // Compute combined scale (fp32). // Apply scale to acc and accumulate into the row sum (qf32). const uint32_t nb = n / qk; // num full blocks int32_t nloe = n % qk; // num leftover elemements (must be signed) uint32_t i = 0; for (; i < nb; i++) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size); // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); vy_d = Q6_Vsf_equals_Vqf32(vy_d); // Convert rX_d scales from e8m0 to fp32 // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... // Left shift with zero fill to create FP32 // FIXME: might need to handle zero as a special case (see ggml-cpu code) HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); r0_d = Q6_V_vdelta_VV(r0_d, expand); r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); r0_d = Q6_Vw_vasl_VwR(r0_d, 23); r1_d = Q6_V_vdelta_VV(r1_d, expand); r1_d = Q6_V_vand_VV(r1_d, e8m0_mask); r1_d = Q6_Vw_vasl_VwR(r1_d, 23); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d)); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Process leftovers if (nloe) { HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8(r1_x_q + i * x_qblk_size); HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size); // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); vy_d = Q6_Vsf_equals_Vqf32(vy_d); // Convert rX_d scales from e8m0 to fp32 // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... // Left shift with zero fill to create FP32 // FIXME: might need to handle zero as a special case (see ggml-cpu code) HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); r0_d = Q6_V_vdelta_VV(r0_d, expand); r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); r0_d = Q6_Vw_vasl_VwR(r0_d, 23); r1_d = Q6_V_vdelta_VV(r1_d, expand); r1_d = Q6_V_vand_VV(r1_d, e8m0_mask); r1_d = Q6_Vw_vasl_VwR(r1_d, 23); HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d)); // Zero-out unused scales HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); r0_dd = Q6_V_vand_QV(bmask, r0_dd); r1_dd = Q6_V_vand_QV(bmask, r1_dd); HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); } // Convert into fp32 and reduce r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); } #if 1 static void vec_dot_f16_f32(const int n, float * restrict s, const void * restrict x, const void * restrict y) { if (0) { float rsum = 0; const __fp16 * restrict vx = (const __fp16 * restrict) x; const float * restrict vy = (const float * restrict) y; for (uint32_t i = 0; i < n; i++) { rsum += (float)vx[i] * vy[i]; } *s = rsum; return; } const HVX_UVector * restrict vx = (const HVX_UVector * restrict) x; const HVX_UVectorPair * restrict vy = (const HVX_UVectorPair * restrict) y; uint32_t nv0 = n / 64; // num full fp16 hvx vectors uint32_t nv1 = n % 64; // leftover elements // for some reason we need volatile here so that the compiler doesn't try anything funky volatile HVX_Vector rsum = Q6_V_vsplat_R(0); float r_sum_scalar = 0.0f; uint32_t i = 0; for (i = 0; i < nv0; i++) { HVX_VectorPair yp = vy[i]; HVX_Vector x = vx[i]; HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0 //NOTE: need volatile here to prevent compiler optimization // Seem compiler cannot guarantee read-after-write?? volatile HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp)); volatile HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp)); HVX_Vector sum = Q6_Vqf32_vadd_Vqf32Vqf32(hi, lo); rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum); } if (nv1) { // HVX_VectorPair yp = vy[i]; // HVX_Vector x = vx[i]; // HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0 // if (nv1 >= 32) { // volatile HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp)); // rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, hi); // nv1 -= 32; // } // rsum = hvx_vec_qf32_reduce_sum(rsum); // if (nv1) { // volatile HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp)); // HVX_Vector sum = hvx_vec_qf32_reduce_sum_n(lo, nv1); // rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum); // } //process the remainder using scalar loop rsum = hvx_vec_qf32_reduce_sum(rsum); const __fp16 * restrict sx = (const __fp16 * restrict) x; const float * restrict sy = (const float * restrict) y; for (uint32_t i = nv0 * 64; i < n; i++) { r_sum_scalar += (float) sx[i] * sy[i]; } // hvx_vec_dump_fp16("X", x); // hvx_vec_dump_fp16("Y", y); // hvx_vec_dump_fp32("SUM", Q6_Vsf_equals_Vqf32(sum)); // hvx_vec_dump_fp32("RSUM", Q6_Vsf_equals_Vqf32(rsum)); } else { rsum = hvx_vec_qf32_reduce_sum(rsum); } *s = hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(rsum)) + r_sum_scalar; # ifdef HTP_DEBUG { float rsum = 0; const __fp16 * restrict vx = (const __fp16 * restrict) x; const float * restrict vy = (const float * restrict) y; for (uint32_t i = 0; i < n; i++) { rsum += vx[i] * vy[i]; } float diff = fabs(*s - rsum); if (diff > 0.001) { FARF(HIGH, "vec-dot-f16-missmatch: %u (%u:%u) expected %.6f got %.6f\n", n, nv0, nv1, rsum, *s); // htp_dump_f16("x", vx, n); // htp_dump_f32("y", vy, n); } } # endif } #else static void vec_dot_f16_f32(const int n, float * restrict s, const void * restrict x, const void * restrict y) { const uint32_t fk = 64; const uint32_t nb = n / fk; assert(n % fk == 0); assert(nb % 4 == 0); const uint32_t x_blk_size = 2 * fk; // fp16 const uint32_t y_blk_size = 4 * fk; // fp32 // Row sum (qf32) HVX_Vector rsum0 = Q6_V_vsplat_R(0); HVX_Vector rsum1 = Q6_V_vsplat_R(0); HVX_Vector rsum2 = Q6_V_vsplat_R(0); HVX_Vector rsum3 = Q6_V_vsplat_R(0); for (uint32_t i = 0; i < nb; i += 4) { HVX_Vector_x4 vx = hvx_vec_load_x4_f16(x + (i * x_blk_size)); HVX_Vector_x4 vy = hvx_vec_load_x4_f32_as_f16(y + (i * y_blk_size)); HVX_VectorPair fa0 = Q6_Wqf32_vmpy_VhfVhf(vx.v[0], vy.v[0]); HVX_VectorPair fa1 = Q6_Wqf32_vmpy_VhfVhf(vx.v[1], vy.v[1]); HVX_VectorPair fa2 = Q6_Wqf32_vmpy_VhfVhf(vx.v[2], vy.v[2]); HVX_VectorPair fa3 = Q6_Wqf32_vmpy_VhfVhf(vx.v[3], vy.v[3]); rsum0 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa0), Q6_V_hi_W(fa0))); rsum1 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum1, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa1), Q6_V_hi_W(fa1))); rsum2 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum2, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa2), Q6_V_hi_W(fa2))); rsum3 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum3, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa3), Q6_V_hi_W(fa3))); } // Reduce and convert into fp32 rsum0 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, rsum1); rsum2 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum2, rsum3); HVX_Vector rsum = hvx_vec_qf32_reduce_sum(Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, rsum2)); hvx_vec_store_u(s, 4, Q6_Vsf_equals_Vqf32(rsum)); } #endif #define htp_matmul_preamble \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t ne10 = src1->ne[0]; \ const uint32_t ne11 = src1->ne[1]; \ const uint32_t ne12 = src1->ne[2]; \ const uint32_t ne13 = src1->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t nb10 = src1->nb[0]; \ const uint32_t nb11 = src1->nb[1]; \ const uint32_t nb12 = src1->nb[2]; \ const uint32_t nb13 = src1->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; // q8x4 src1 tensor is already in VTCM spad static void matmul(struct htp_matmul_type * mt, struct htp_tensor * restrict src0, struct htp_tensor * restrict src1, struct htp_tensor * restrict dst, struct htp_spad * restrict src0_spad, struct htp_spad * restrict src1_spad, struct htp_spad * restrict dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_matmul_preamble; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src1_nrows = ne11 * ne12 * ne13; // src1 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); // no work for this thread if (src0_start_row >= src0_end_row) { return; } const size_t dst_row_size = nb1; const size_t src0_row_size = nb01; const size_t src1_row_size = q8x4x2_row_size(ne10); const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); // Per-thread VTCM scratchpads for all tensors // Note that the entire src1 tensor is already in VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; uint8_t * restrict src1_data = src1_spad->data; volatile uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const uint8_t * restrict src0_row = (const uint8_t *) src0->data; // Prefill spad with src0 rows #pragma unroll(4) for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const int is0 = (ir0 - src0_start_row); if (is0 >= HTP_SPAD_SRC0_NROWS) { break; } dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } // Process src0 rows for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; #pragma unroll(2) for (uint32_t ir1 = 0; ir1 < src1_nrows; ++ir1) { const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_row_size); float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size)); mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); } // Prefetch next (n + spad_nrows) row const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; if (pr0 < src0_end_row_x2) { dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } } // Process the last row (if any) if (src0_end_row != src0_end_row_x2) { uint32_t ir0 = src0_end_row_x2; const int is0 = (ir0 - src0_start_row); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 1); const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; #pragma unroll(2) for (uint32_t ir1 = 0; ir1 < src1_nrows; ++ir1) { const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_row_size); float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size)); mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "matmul-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } // q8x4x2 src1 tensor is already in VTCM spad static void matvec(struct htp_matmul_type * mt, struct htp_tensor * restrict src0, struct htp_tensor * restrict src1, struct htp_tensor * restrict dst, struct htp_spad * restrict src0_spad, struct htp_spad * restrict src1_spad, struct htp_spad * restrict dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_matmul_preamble; const uint32_t src0_nrows = ne01; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); // no work for this thread if (src0_start_row >= src0_end_row) { return; } const size_t dst_row_size = nb1; const size_t src0_row_size = nb01; const size_t src1_row_size = q8x4x2_row_size(ne10); const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); // Per-thread VTCM scratchpads for all tensors // Note that the entire src1 tensor is already in VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size uint8_t * spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; uint8_t * spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; uint8_t * src1_data = src1_spad->data; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); float * tmp = (float *) spad_dst; const uint8_t * restrict src0_row = (const uint8_t *) src0->data; const uint8_t * restrict src1_col = (const uint8_t *) src1_data; float * restrict dst_col = (float *) dst->data; // Prefill spad with 2x src0 rows #pragma unroll(2) for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const uint32_t is0 = (ir0 - src0_start_row); if (is0 >= HTP_SPAD_SRC0_NROWS) { break; } dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } // Process src0 rows for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; mt->vec_dot_rx2(ne00, &tmp[ir0 - src0_start_row], ss0, src0_row_size_padded, src1_col); // Prefetch next (n + spad_nrows) row const uint32_t pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); const uint32_t is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; if (pr0 < src0_end_row_x2) { dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } } // Process the last row (if any) if (src0_end_row != src0_end_row_x2) { const uint32_t ir0 = src0_end_row_x2; const uint32_t is0 = (ir0 - src0_start_row); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 1); const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; mt->vec_dot(ne00, &tmp[ir0 - src0_start_row], ss0, src1_col); } hvx_copy_fp32_ua((uint8_t *) &dst_col[src0_start_row], (uint8_t *) tmp, src0_end_row - src0_start_row); t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "matvec-%s %u/%u: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ids->ne[0] * ids->ne[1] + (i1)] struct mmid_row_mapping { uint32_t i1; uint32_t i2; }; // q8x4 src1 tensor is already in VTCM spad static void matmul_id(struct htp_matmul_type * mt, struct htp_tensor * restrict src0, struct htp_tensor * restrict src1, struct htp_tensor * restrict ids, struct htp_tensor * restrict dst, struct htp_spad * restrict src0_spad, struct htp_spad * restrict src1_spad, struct htp_spad * restrict src2_spad, struct htp_spad * restrict dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_matmul_preamble; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const uint32_t src0_nrows = ne01; // src0 rows per expert const uint32_t src1_nrows = ne11; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); // no work for this thread if (src0_start_row >= src0_end_row) { return; } const uint32_t n_ids = ids->ne[0]; // n_expert_used const uint32_t n_as = ne02; // n_expert const size_t matrix_row_counts_size = n_as * sizeof(uint32_t); const size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping); const uint32_t * matrix_row_counts = (const uint32_t *) src2_spad->data + 0; const struct mmid_row_mapping * matrix_rows = (const void *) src2_spad->data + matrix_row_counts_size; const size_t dst_row_size = nb1; const size_t src0_row_size = nb01; const size_t src1_row_size = q8x4x2_row_size(ne10); const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); // Per-thread VTCM scratchpads for all tensors // Note that the entire src1 tensor is already in VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; uint8_t * restrict src1_data = src1_spad->data; for (uint32_t cur_a = 0; cur_a < n_as; ++cur_a) { const int32_t cne1 = matrix_row_counts[cur_a]; if (cne1 == 0) { continue; } const uint8_t * src0_row = (const uint8_t *) src0->data + (0 + cur_a * nb02 + 0); // Prefill spad with src0 rows #pragma unroll(4) for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const int is0 = (ir0 - src0_start_row); if (is0 >= HTP_SPAD_SRC0_NROWS) { break; } dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } // Process src0 rows for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; for (uint32_t cid = 0; cid < cne1; ++cid) { struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid); const int rm1 = row_mapping.i1; // expert idx const int rm2 = row_mapping.i2; // token idx const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size); float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0)); mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); } // Prefetch next (n + spad_nrows) row const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; if (pr0 < src0_end_row_x2) { dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } } // Process the last row (if any) if (src0_end_row != src0_end_row_x2) { uint32_t ir0 = src0_end_row_x2; const uint32_t is0 = (ir0 - src0_start_row); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 1); const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; for (uint32_t cid = 0; cid < cne1; ++cid) { struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid); const int rm1 = row_mapping.i1; // expert idx const int rm2 = row_mapping.i2; // token idx const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size); float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0)); mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); } } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "matmul-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } // q8x4 src1 tensor is already in VTCM spad static void matvec_id(struct htp_matmul_type * mt, struct htp_tensor * restrict src0, struct htp_tensor * restrict src1, struct htp_tensor * restrict src2, struct htp_tensor * restrict dst, struct htp_spad * restrict src0_spad, struct htp_spad * restrict src1_spad, struct htp_spad * restrict src2_spad, struct htp_spad * restrict dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_matmul_preamble; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); const uint32_t src0_nrows = ne01; // src0 rows per expert const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); // no work for this thread if (src0_start_row >= src0_end_row) { return; } assert(ne13 % ne03 == 0); const size_t dst_row_size = nb1; const size_t src0_row_size = nb01; const size_t src1_row_size = q8x4x2_row_size(ne10); const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); const uint32_t n_aids = src2->ne[0]; // num activated experts const uint32_t n_ids = ne02; // num experts // Per-thread VTCM scratchpads for all tensors // Note that the entire src1 tensor is already in VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; uint8_t * restrict src1_data = src1_spad->data; for (uint32_t ie1 = 0; ie1 < n_aids; ++ie1) { // for each expert const uint32_t eid = *(const int32_t *) ((const uint8_t *) src2->data + ie1 * src2->nb[0]); assert(eid < n_ids); const uint8_t * restrict src0_row = (const uint8_t *) src0->data + eid * nb02; const uint8_t * restrict src1_col = (const uint8_t *) src1_data; float * restrict dst_row = (float *) (dst->data + ie1 * nb1); // Prefill spad with src0 rows #pragma unroll(4) for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const int is0 = (ir0 - src0_start_row); if (is0 >= HTP_SPAD_SRC0_NROWS) { break; } dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } // Process src0 rows for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); // Prefetch next (n + spad_nrows) row const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; if (pr0 < src0_end_row_x2) { dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size), src0_row_size_padded, src0_row_size, 2); } } // Process the last row (if any) if (src0_end_row != src0_end_row_x2) { uint32_t ir0 = src0_end_row_x2; const uint32_t is0 = (ir0 - src0_start_row); dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size), src0_row_size_padded, src0_row_size, 1); const uint8_t * ss0 = dma_queue_pop(dma_queue).dst; mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "matvec-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } // *** matmul in fp16 static void matmul_f16_f32(struct htp_tensor * restrict src0, struct htp_tensor * restrict src1, struct htp_tensor * restrict dst, struct htp_spad * restrict src0_spad, struct htp_spad * restrict src1_spad, struct htp_spad * restrict dst_spad, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread, dma_queue * dma_queue) { htp_matmul_preamble; uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); assert(ne12 % ne02 == 0); assert(ne13 % ne03 == 0); // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) const uint32_t nr0 = ne0; // This is the size of the rest of the dimensions of the result const uint32_t nr1 = ne1 * ne2 * ne3; // distribute the thread work across the inner or outer loop based on which one is larger uint32_t nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows uint32_t nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows // The number of elements in each chunk const uint32_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; const uint32_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; uint32_t current_chunk = ith; const uint32_t ith0 = current_chunk % nchunk0; const uint32_t ith1 = current_chunk / nchunk0; const uint32_t ir0_start = dr0 * ith0; const uint32_t ir0_end = MIN(ir0_start + dr0, nr0); const uint32_t ir1_start = dr1 * ith1; const uint32_t ir1_end = MIN(ir1_start + dr1, nr1); // broadcast factors const uint32_t r2 = ne12 / ne02; const uint32_t r3 = ne13 / ne03; // no work for this thread if (ir0_start >= ir0_end || ir1_start >= ir1_end) { return; } // block-tiling attempt const uint32_t blck_0 = 64; const uint32_t blck_1 = 64; __attribute__((aligned(128))) float tmp[64]; for (uint32_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { for (uint32_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { for (uint32_t ir1 = iir1; ir1 < MIN(iir1 + blck_1, ir1_end); ir1++) { const uint32_t i13 = (ir1 / (ne12 * ne1)); const uint32_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; const uint32_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); // broadcast src0 into src1 const uint32_t i03 = i13 / r3; const uint32_t i02 = i12 / r2; const uint32_t i1 = i11; const uint32_t i2 = i12; const uint32_t i3 = i13; const uint8_t * restrict src0_base = (const uint8_t *) src0->data + (0 + i02 * nb02 + i03 * nb03); const uint8_t * restrict src1_col = (const uint8_t *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13); float * dst_col = (float *) ((uint8_t * restrict) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); const uint32_t ir0_block_end = MIN(iir0 + blck_0, ir0_end); for (uint32_t ir0 = iir0; ir0 < ir0_block_end; ir0++) { // Use nb01 stride for non-contiguous src0 support const uint8_t * restrict src0_row = src0_base + ir0 * nb01; vec_dot_f16_f32(ne00, &tmp[ir0 - iir0], src0_row, src1_col); } hvx_copy_fp32_ua((uint8_t *) &dst_col[iir0], (uint8_t *) tmp, MIN(iir0 + blck_0, ir0_end) - iir0); } } } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "matmul-f16-f32 %d/%d: %ux%ux%ux%u (%u:%u %u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], ir0_start, ir0_end, ir1_start, ir1_end, src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } // *** dynamic quant static inline void quantize_block_fp32_q8x1(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) { assert((unsigned long) x % 128 == 0); assert((unsigned long) y_q % 128 == 0); HVX_Vector * vx = (HVX_Vector *) x; HVX_Vector zero = Q6_V_vsplat_R(0); // Use reduce max fp32 to find max(abs(e)) first HVX_Vector vmax0_sf = hvx_vec_reduce_max_fp32(hvx_vec_abs_fp32(vx[0])); HVX_Vector vmax1_sf = hvx_vec_reduce_max_fp32(hvx_vec_abs_fp32(vx[1])); HVX_Vector vmax2_sf = hvx_vec_reduce_max_fp32(hvx_vec_abs_fp32(vx[2])); HVX_Vector vmax3_sf = hvx_vec_reduce_max_fp32(hvx_vec_abs_fp32(vx[3])); // Load and convert into QF32 HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements // Convert to QF32 HVX_Vector vmax0_qf = Q6_Vqf32_vsub_VsfVsf(vmax0_sf, zero); HVX_Vector vmax1_qf = Q6_Vqf32_vsub_VsfVsf(vmax1_sf, zero); HVX_Vector vmax2_qf = Q6_Vqf32_vsub_VsfVsf(vmax2_sf, zero); HVX_Vector vmax3_qf = Q6_Vqf32_vsub_VsfVsf(vmax3_sf, zero); // Combine and convert to fp16 HVX_Vector vmax01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vmax1_qf, vmax0_qf))); HVX_Vector vmax23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vmax3_qf, vmax2_qf))); // Convert into fp16 HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf))); HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf))); // Replicate first fp16 scale across all lanes HVX_Vector ctrl = *(const HVX_Vector *) repl_2x_fp16; vmax01_hf = Q6_V_vdelta_VV(vmax01_hf, ctrl); vmax23_hf = Q6_V_vdelta_VV(vmax23_hf, ctrl); HVX_Vector vd01_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax01_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 HVX_Vector vd23_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax23_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 HVX_Vector vd01_hf = Q6_Vhf_equals_Vqf16(vd01_qf16); HVX_Vector vd23_hf = Q6_Vhf_equals_Vqf16(vd23_qf16); hvx_vec_store_u(y_d + 0, 2, vd01_hf); HVX_Vector rotated_vd_hf = Q6_V_vror_VR(vd01_hf, 64); hvx_vec_store_u(y_d + 2, 2, rotated_vd_hf); hvx_vec_store_u(y_d + 4, 2, vd23_hf); rotated_vd_hf = Q6_V_vror_VR(vd23_hf, 64); hvx_vec_store_u(y_d + 6, 2, rotated_vd_hf); // Divide input by the scale HVX_Vector vd01_inv_hf = hvx_vec_inverse_fp16(vd01_hf); HVX_Vector vd23_inv_hf = hvx_vec_inverse_fp16(vd23_hf); vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd01_inv_hf)); vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd23_inv_hf)); // Convert to int8 HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf); HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf); HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16); *(HVX_Vector *) y_q = vx_i8; } static inline void quantize_block_fp32_q8x2(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) { assert((unsigned long) x % 128 == 0); assert((unsigned long) y_q % 128 == 0); HVX_Vector * vx = (HVX_Vector *) x; // Load and convert into QF32 HVX_Vector zero = Q6_V_vsplat_R(0); HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements // Convert into fp16 HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf))); HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf))); // Compute max and scale HVX_Vector vmax01_hf = hvx_vec_reduce_max_fp16(hvx_vec_abs_fp16(vx01_hf)); HVX_Vector vmax23_hf = hvx_vec_reduce_max_fp16(hvx_vec_abs_fp16(vx23_hf)); // Replicate first fp16 scale across all lanes HVX_Vector ctrl = *(const HVX_Vector *) repl_1x_fp16; vmax01_hf = Q6_V_vdelta_VV(vmax01_hf, ctrl); vmax23_hf = Q6_V_vdelta_VV(vmax23_hf, ctrl); HVX_Vector vd01_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax01_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 HVX_Vector vd23_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax23_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 HVX_Vector vd01_hf = Q6_Vhf_equals_Vqf16(vd01_qf16); HVX_Vector vd23_hf = Q6_Vhf_equals_Vqf16(vd23_qf16); hvx_vec_store_u(y_d + 0, 4, vd01_hf); hvx_vec_store_u(y_d + 4, 4, vd23_hf); // Divide input by the scale HVX_Vector vd01_inv_hf = hvx_vec_inverse_fp16(vd01_hf); HVX_Vector vd23_inv_hf = hvx_vec_inverse_fp16(vd23_hf); vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd01_inv_hf)); vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd23_inv_hf)); // Convert to int8 HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf); HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf); HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16); *(HVX_Vector *) y_q = vx_i8; } static inline void quantize_block_fp32_q8x4(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) { assert((unsigned long) x % 128 == 0); assert((unsigned long) y_q % 128 == 0); HVX_Vector * vx = (HVX_Vector *) x; // Load and convert into QF32 HVX_Vector zero = Q6_V_vsplat_R(0); HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements // Convert into fp16 HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf))); HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf))); // Compute max and scale HVX_Vector vmax_hf = hvx_vec_reduce_max_fp16(hvx_vec_abs_fp16(vx01_hf)); vmax_hf = hvx_vec_reduce_max2_fp16(hvx_vec_abs_fp16(vx23_hf), vmax_hf); // Replicate first fp16 scale across all lanes HVX_Vector ctrl = *(const HVX_Vector *) repl_1x_fp16; vmax_hf = Q6_V_vdelta_VV(vmax_hf, ctrl); HVX_Vector vd_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 HVX_Vector vd_hf = Q6_Vhf_equals_Vqf16(vd_qf16); *(HVX_UVector *) y_d = vd_hf; // Divide input by the scale HVX_Vector vd_inv_hf = hvx_vec_inverse_fp16(vd_hf); vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd_inv_hf)); vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd_inv_hf)); // Convert to int8 HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf); HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf); HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16); *(HVX_Vector *) y_q = vx_i8; } // Overrides input x static void quantize_row_fp32_q8x4x2(float * restrict x, uint8_t * restrict y, uint32_t k) { assert(k % 32 == 0); const uint32_t qk = QK_Q8_0x4x2; const uint32_t nb = (k + qk - 1) / qk; const uint32_t qrow_size = k; // int8 const uint32_t dblk_size = 8 * 2; // 8x __fp16 const uint32_t qblk_size = QK_Q8_0x4x2; // int8 uint8_t * restrict y_q = (y + 0); // quants first uint8_t * restrict y_d = (y + qrow_size); // then scales // Temp scales override input since we're working off of the aligned temp buffer in VTCM uint8_t * restrict t_d = (uint8_t *) x; for (uint32_t i = 0; i < nb; i++) { #if FP32_QUANTIZE_GROUP_SIZE == 32 quantize_block_fp32_q8x1(x + (i * 2 + 0) * qk / 2, y_q + (i * 2 + 0) * qblk_size / 2, t_d + (i * 2 + 0) * dblk_size / 2); quantize_block_fp32_q8x1(x + (i * 2 + 1) * qk / 2, y_q + (i * 2 + 1) * qblk_size / 2, t_d + (i * 2 + 1) * dblk_size / 2); #elif FP32_QUANTIZE_GROUP_SIZE == 64 quantize_block_fp32_q8x2(x + (i * 2 + 0) * qk / 2, y_q + (i * 2 + 0) * qblk_size / 2, t_d + (i * 2 + 0) * dblk_size / 2); quantize_block_fp32_q8x2(x + (i * 2 + 1) * qk / 2, y_q + (i * 2 + 1) * qblk_size / 2, t_d + (i * 2 + 1) * dblk_size / 2); #elif FP32_QUANTIZE_GROUP_SIZE == 128 quantize_block_fp32_q8x4(x + (i * 2 + 0) * qk / 2, y_q + (i * 2 + 0) * qblk_size / 2, t_d + (i * 2 + 0) * dblk_size / 2); quantize_block_fp32_q8x4(x + (i * 2 + 1) * qk / 2, y_q + (i * 2 + 1) * qblk_size / 2, t_d + (i * 2 + 1) * dblk_size / 2); #else #error "FP32_QUANTIZE_GROUP_SIZE must be 32, 64, or 128" #endif } // now copy the scales into final location hvx_copy_fp16_ua(y_d, t_d, nb * 8); } static void quantize_fp32_q8x4x2(const struct htp_tensor * src, uint8_t * restrict dst, struct htp_spad * spad, uint32_t nth, uint32_t ith, uint32_t nrows_per_thread) { uint64_t t1 = HAP_perf_get_qtimer_count(); const uint32_t ne0 = src->ne[0]; const uint32_t ne1 = src->ne[1]; const uint32_t ne2 = src->ne[2]; const uint32_t ne3 = src->ne[3]; const uint32_t nrows = ne1 * ne2 * ne3; // total n_rows const uint32_t ir_first = nrows_per_thread * ith; // first row const uint32_t ir_last = MIN(ir_first + nrows_per_thread, nrows); // last row const size_t src_row_size = src->nb[1]; const size_t dst_row_size = q8x4x2_row_size(ne0); uint8_t * restrict src_data = (uint8_t *) src->data + (src_row_size * ir_first); uint8_t * restrict dst_data = (uint8_t *) dst + (dst_row_size * ir_first); uint8_t * restrict tmp_data = (uint8_t *) spad->data + (spad->size_per_thread * ith); const size_t src_row_size_padded = htp_round_up(src_row_size, QK_Q8_0x4x2 * sizeof(float)); memset(tmp_data, 0, src_row_size_padded); // zero-out temp row data for padding for (uint32_t i = ir_first; i < ir_last; ++i) { htp_l2fetch(src_data, 2, src_row_size, src_row_size); hvx_copy_fp32_aa(tmp_data, src_data, ne0); // FARF(HIGH, "quantize-q8x4-row: %u\n", i); quantize_row_fp32_q8x4x2((float *) tmp_data, dst_data, ne0); dst_data += dst_row_size; src_data += src_row_size; } uint64_t t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "quantize-fp32-q8x4: %u/%u : n-rows %u (%u:%u) row-size %u -> %u usec %u\n", ith, nth, nrows, ir_first, ir_last, src_row_size, dst_row_size, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void htp_quantize_fp32_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; quantize_fp32_q8x4x2(&octx->src1, octx->src1_spad.data, &octx->src0_spad, n, i, octx->src1_nrows_per_thread); } // ** matmul callbacks for worker_pool static void htp_matvec_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q4x4x2-q8x4x2"; mt.vec_dot = vec_dot_q4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q4x4x2-q8x4x2"; mt.vec_dot = vec_dot_q4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matvec_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q8x4x2-q8x4x2"; mt.vec_dot = vec_dot_q8x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q8x4x2-q8x4x2"; mt.vec_dot = vec_dot_q8x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matvec_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "mxfp4x4x2-q8x4x2"; mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "mxfp4x4x2-q8x4x2"; mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_f16_f32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; matmul_f16_f32(&octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } // ** matmul-id callbacks for worker_pool static void htp_matvec_id_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q4x4x2-q8x4x2"; mt.vec_dot = vec_dot_q4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_id_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q4x4x2-q8x4x2"; mt.vec_dot = vec_dot_q4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matvec_id_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q8x4x2-q8x4x2"; mt.vec_dot = vec_dot_q8x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_id_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "q8x4x2-q8x4x2"; mt.vec_dot = vec_dot_q8x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matvec_id_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "mxfp4x4x2-q8x4x2"; mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } static void htp_matmul_id_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = data; struct htp_matmul_type mt; mt.type = "mxfp4x4x2-q8x4x2"; mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); } // ** main matmul entry point int op_matmul(struct htp_ops_context * octx) { const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; htp_matmul_preamble; const char * op_type; const uint32_t src0_nrows = ne01 * ne02 * ne03; const uint32_t src1_nrows = ne11 * ne12 * ne13; const size_t src0_row_size = nb01; const size_t dst_row_size = nb1; size_t src1_row_size = nb11; const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); size_t src1_row_size_padded; worker_callback_t quant_job_func; worker_callback_t matmul_job_func; bool need_quant = !(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE); switch (src0->type) { case HTP_TYPE_Q4_0: op_type = "q4x4x2-fp32"; quant_job_func = htp_quantize_fp32_q8x4x2; if (src1_nrows > 1) { matmul_job_func = htp_matmul_q4x4x2_q8x4x2; } else { matmul_job_func = htp_matvec_q4x4x2_q8x4x2; } src1_row_size = q8x4x2_row_size(ne10); // row size post quantization // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; case HTP_TYPE_Q8_0: op_type = "q8x4x2-fp32"; quant_job_func = htp_quantize_fp32_q8x4x2; if (src1_nrows > 1) { matmul_job_func = htp_matmul_q8x4x2_q8x4x2; } else { matmul_job_func = htp_matvec_q8x4x2_q8x4x2; } src1_row_size = q8x4x2_row_size(ne10); // row size post quantization // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; case HTP_TYPE_MXFP4: op_type = "mxfp4x4x2-f32"; quant_job_func = htp_quantize_fp32_q8x4x2; if (src1_nrows > 1) { matmul_job_func = htp_matmul_mxfp4x4x2_q8x4x2; } else { matmul_job_func = htp_matvec_mxfp4x4x2_q8x4x2; } src1_row_size = q8x4x2_row_size(ne10); // row size post quantization // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; case HTP_TYPE_F16: op_type = "f16-f32"; quant_job_func = NULL; // htp_quantize_f32_f16; matmul_job_func = htp_matmul_f16_f32; // For all tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size, 256); octx->src1_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC1_NROWS * src1_row_size, 256); octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->src1_spad.size = octx->src1_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; need_quant = false; break; default: return HTP_STATUS_NO_SUPPORT; } // VTCM scratchpads for all tensors size_t spad_size = octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size; FARF(HIGH, "matmul-%s : src0-spad-size %u src1-spad-size %u dst-spad-size %u (%zu)\n", op_type, octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size, spad_size); FARF(HIGH, "matmul-%s : %ux%ux%ux%u * %ux%ux%ux%u-> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], src0->data, src1->data, dst->data); // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "matmul-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; octx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads; octx->src0_nrows_per_thread += (octx->src0_nrows_per_thread & 1); // round up to even if (need_quant) { // Run quant jobs const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads); octx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs; worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, octx, n_quant_jobs); } if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { // Run matmul jobs const uint32_t n_matmul_jobs = octx->n_threads; worker_pool_run_func(octx->ctx->worker_pool, matmul_job_func, octx, n_matmul_jobs); } return HTP_STATUS_OK; } // ** main matmul-id entry point int op_matmul_id(struct htp_ops_context * octx) { const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; const struct htp_tensor * ids = &octx->src2; struct htp_tensor * dst = &octx->dst; htp_matmul_preamble; const char * op_type; worker_callback_t quant_job_func; worker_callback_t matmul_id_job_func; const size_t src0_row_size = nb01; const size_t dst_row_size = nb1; const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); const uint32_t src0_nrows = ne01; // per expert const uint32_t src1_nrows = ne11 * ne12 * ne13; size_t src1_row_size; size_t src1_row_size_padded; // row groups const int n_ids = ids->ne[0]; // n_expert_used const int n_as = ne02; // n_expert size_t matrix_row_counts_size = n_as * sizeof(uint32_t); size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping); switch (src0->type) { case HTP_TYPE_Q4_0: op_type = "q4x2x2-f32"; quant_job_func = htp_quantize_fp32_q8x4x2; src1_row_size = q8x4x2_row_size(ne10); // row size post quantization if (src1_nrows > 1) { matmul_id_job_func = htp_matmul_id_q4x4x2_q8x4x2; } else { matmul_id_job_func = htp_matvec_id_q4x4x2_q8x4x2; } // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src2_spad.size = octx->src2_spad.size_per_thread; octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; case HTP_TYPE_Q8_0: op_type = "q8x2x2-f32"; quant_job_func = htp_quantize_fp32_q8x4x2; src1_row_size = q8x4x2_row_size(ne10); // row size post quantization if (src1_nrows > 1) { matmul_id_job_func = htp_matmul_id_q8x4x2_q8x4x2; } else { matmul_id_job_func = htp_matvec_id_q8x4x2_q8x4x2; } // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src2_spad.size = octx->src2_spad.size_per_thread; octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; case HTP_TYPE_MXFP4: op_type = "mxfp4x2x2-f32"; quant_job_func = htp_quantize_fp32_q8x4x2; src1_row_size = q8x4x2_row_size(ne10); // row size post quantization if (src1_nrows > 1) { matmul_id_job_func = htp_matmul_id_mxfp4x4x2_q8x4x2; } else { matmul_id_job_func = htp_matvec_id_mxfp4x4x2_q8x4x2; } // Entire src1 tensor is placed into the VTCM // For other tensors we allocate N rows per thread, padded to HVX vector size octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); // src0 spad is also used in dynamic quantizer to store padded src1 rows src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); if (octx->src0_spad.size_per_thread < src1_row_size_padded) { octx->src0_spad.size_per_thread = src1_row_size_padded; } octx->src2_spad.size = octx->src2_spad.size_per_thread; octx->src1_spad.size = octx->src1_spad.size_per_thread; octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; break; default: return HTP_STATUS_NO_SUPPORT; } size_t spad_size = octx->src2_spad.size + octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size; FARF(HIGH, "matmul-id-%s : src0-spad-size %u src1-spad-size %u src2-spad-size %u dst-spad-size %u (%zu)\n", op_type, octx->src0_spad.size, octx->src1_spad.size, octx->src2_spad.size, octx->dst_spad.size, spad_size); FARF(HIGH, "matmul-id-%s : %ux%ux%ux%u * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], src0->data, src1->data, dst->data); // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "matmul-id-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->src2_spad.data = octx->src1_spad.data + octx->src1_spad.size; octx->dst_spad.data = octx->src2_spad.data + octx->src2_spad.size; octx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads; octx->src0_nrows_per_thread += (octx->src0_nrows_per_thread & 1); // round up to even if (src1_nrows > 1) { // initialize matrix_row_counts and map uint32_t * matrix_row_counts = (uint32_t *) octx->src2_spad.data + 0; struct mmid_row_mapping * matrix_rows = (void *) octx->src2_spad.data + matrix_row_counts_size; memset(matrix_row_counts, 0, n_as * sizeof(uint32_t)); // group rows by src0 matrix for (uint32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { // token idx for (uint32_t id = 0; id < n_ids; ++id) { // expert idx const uint32_t i02 = *(const uint32_t *) ((const uint8_t *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); assert(i02 >= 0 && i02 < n_as); MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) { id, iid1 }; matrix_row_counts[i02] += 1; } } } // Setup worker pool callbacks if (!(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE)) { // Run quant jobs const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads); octx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs; worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, octx, n_quant_jobs); } if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { // Run matmul-id jobs const uint32_t n_matmul_jobs = octx->n_threads; worker_pool_run_func(octx->ctx->worker_pool, matmul_id_job_func, octx, n_matmul_jobs); } return HTP_STATUS_OK; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/ops-utils.h000066400000000000000000000105611512524704700224360ustar00rootroot00000000000000#ifndef OPS_UTILS_H #define OPS_UTILS_H #include "htp-msg.h" #ifndef MAX # define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif #ifndef MIN # define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif static inline uint64_t htp_get_cycles() { uint64_t cycles = 0; asm volatile(" %0 = c15:14\n" : "=r"(cycles)); return cycles; } static inline uint64_t htp_get_pktcnt() { uint64_t pktcnt; asm volatile(" %0 = c19:18\n" : "=r"(pktcnt)); return pktcnt; } static inline int32_t htp_is_aligned(void * addr, uint32_t align) { return ((size_t) addr & (align - 1)) == 0; } static inline uint32_t htp_round_up(uint32_t n, uint32_t m) { return m * ((n + m - 1) / m); } // See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. // Precompute mp (m' in the paper) and L such that division // can be computed using a multiply (high 32b of 64b result) // and a shift: // // n/d = (mulhi(n, mp) + n) >> L; struct fastdiv_values { uint32_t mp; uint32_t l; }; static inline struct fastdiv_values init_fastdiv_values(uint32_t d) { struct fastdiv_values result = { 0, 0 }; // compute L = ceil(log2(d)); while (result.l < 32 && ((uint32_t) 1 << result.l) < d) { ++(result.l); } result.mp = (uint32_t) (((uint64_t) 1 << 32) * (((uint64_t) 1 << result.l) - d) / d + 1); return result; } static inline uint32_t fastdiv(uint32_t n, const struct fastdiv_values * vals) { // Compute high 32 bits of n * mp const uint32_t hi = (uint32_t) (((uint64_t) n * vals->mp) >> 32); // mulhi(n, mp) // add n, apply bit shift return (hi + n) >> vals->l; } static inline uint32_t fastmodulo(uint32_t n, uint32_t d, const struct fastdiv_values * vals) { return n - fastdiv(n, vals) * d; } static inline void htp_l2fetch(const void * p, uint32_t height, uint32_t width, uint32_t stride) { const uint64_t control = Q6_P_combine_RR(stride, Q6_R_combine_RlRl(width, height)); asm volatile(" l2fetch(%0,%1) " : : "r"(p), "r"(control)); } static inline int32_t htp_is_one_chunk(void * addr, uint32_t n, uint32_t chunk_size) { uint32_t left_off = (size_t) addr & (chunk_size - 1); uint32_t right_off = left_off + n; return right_off <= chunk_size; } static inline void htp_dump_int8_line(char * pref, const int8_t * x, int n) { char str[1024], *p = str, *p_end = str + sizeof(str); p += snprintf(p, p_end - p, "%s: ", pref); for (int i = 0; i < n && p < p_end; i++) { p += snprintf(p, p_end - p, "%d, ", x[i]); } FARF(HIGH, "%s\n", str); } static inline void htp_dump_uint8_line(char * pref, const uint8_t * x, uint32_t n) { char str[1024], *p = str, *p_end = str + sizeof(str); p += snprintf(p, p_end - p, "%s: ", pref); for (int i = 0; i < n && p < p_end; i++) { p += snprintf(p, p_end - p, "%d, ", x[i]); } FARF(HIGH, "%s\n", str); } static inline void htp_dump_int32_line(char * pref, const int32_t * x, uint32_t n) { char str[1024], *p = str, *p_end = str + sizeof(str); p += snprintf(p, p_end - p, "%s: ", pref); for (int i = 0; i < n; i++) { p += snprintf(p, p_end - p, "%d, ", (int) x[i]); } FARF(HIGH, "%s\n", str); } static inline void htp_dump_fp16_line(char * pref, const __fp16 * x, uint32_t n) { char str[1024], *p = str, *p_end = str + sizeof(str); p += snprintf(p, p_end - p, "%s: ", pref); for (int i = 0; i < n; i++) { p += snprintf(p, p_end - p, "%.6f, ", (float) x[i]); } FARF(HIGH, "%s\n", str); } static inline void htp_dump_fp32_line(char * pref, const float * x, uint32_t n) { char str[1024], *p = str, *p_end = str + sizeof(str); p += snprintf(p, p_end - p, "%s: ", pref); for (int i = 0; i < n; i++) { p += snprintf(p, p_end - p, "%.6f, ", x[i]); } FARF(HIGH, "%s\n", str); } static inline void htp_dump_f32(char * pref, const float * x, uint32_t n) { uint32_t n0 = n / 16; uint32_t n1 = n % 16; uint32_t i = 0; for (; i < n0; i++) { htp_dump_fp32_line(pref, x + (16 * i), 16); } if (n1) { htp_dump_fp32_line(pref, x + (16 * i), n1); } } static inline void htp_dump_f16(char * pref, const __fp16 * x, uint32_t n) { uint32_t n0 = n / 16; uint32_t n1 = n % 16; uint32_t i = 0; for (; i < n0; i++) { htp_dump_fp16_line(pref, x + (16 * i), 16); } if (n1) { htp_dump_fp16_line(pref, x + (16 * i), n1); } } #endif /* OPS_UTILS_H */ ggml-org-ggml-3678254/src/ggml-hexagon/htp/rope-ops.c000066400000000000000000000440771512524704700222470ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" // Redefined the types GGML_ROPE_TYPE_NORMAL & GGML_ROPE_TYPE_NEOX as we cant include ggml.h #define HTP_ROPE_TYPE_NORMAL 0 #define HTP_ROPE_TYPE_NEOX 2 #define htp_rope_preamble \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; struct rope_th_ctx { int32_t n_dims; int32_t mode; int32_t n_ctx_orig; int32_t sections[4]; float freq_base; float freq_scale; float ext_factor; float attn_factor; float beta_fast; float beta_slow; float theta_scale; float corr_dims[2]; struct htp_ops_context * octx; }; static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / MAX(0.001f, high - low); return (1 - MIN(1, MAX(0, y))); } static void rope_cache_init(const float theta_base, const float freq_scale, const float * freq_factors, float * corr_dims, const uint32_t ne0, const float ext_factor, const float mscale, float * cache, const float theta_scale) { // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py float theta = theta_base; for (uint32_t i0 = 0; i0 < ne0; i0 += 2) { const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; float theta_extrap = theta / ff; // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta_final = theta_interp; float mscale_final = mscale; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta_final = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale_final *= 1.0f + 0.1f * logf(1.0f / freq_scale); } cache[i0 + 0] = cosf(theta_final) * mscale_final; cache[i0 + 1] = sinf(theta_final) * mscale_final; theta *= theta_scale; } } #define M_PI 3.1415926535897932384626433 static void rope_corr_dims(int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float * dims) { float start = floorf(n_dims * logf(n_ctx_orig / (beta_fast * 2 * (float) M_PI)) / (2 * logf(freq_base))); float end = ceilf(n_dims * logf(n_ctx_orig / (beta_slow * 2 * (float) M_PI)) / (2 * logf(freq_base))); dims[0] = MAX(0, start); dims[1] = MIN(n_dims - 1, end); } static void init_rope_ctx(struct rope_th_ctx * rope_ctx, struct htp_ops_context * octx) { memset(rope_ctx, 0, sizeof(struct rope_th_ctx)); const int32_t * op_params = &octx->op_params[0]; rope_ctx->n_dims = ((const int32_t *) op_params)[1]; rope_ctx->mode = ((const int32_t *) op_params)[2]; rope_ctx->n_ctx_orig = ((const int32_t *) op_params)[4]; memcpy(&rope_ctx->freq_base, (int32_t *) op_params + 5, sizeof(float)); memcpy(&rope_ctx->freq_scale, (int32_t *) op_params + 6, sizeof(float)); memcpy(&rope_ctx->ext_factor, (int32_t *) op_params + 7, sizeof(float)); memcpy(&rope_ctx->attn_factor, (int32_t *) op_params + 8, sizeof(float)); memcpy(&rope_ctx->beta_fast, (int32_t *) op_params + 9, sizeof(float)); memcpy(&rope_ctx->beta_slow, (int32_t *) op_params + 10, sizeof(float)); memcpy(&rope_ctx->sections, (int32_t *) op_params + 11, sizeof(int) * 4); rope_ctx->theta_scale = powf(rope_ctx->freq_base, -2.0f / rope_ctx->n_dims); rope_corr_dims(rope_ctx->n_dims, rope_ctx->n_ctx_orig, rope_ctx->freq_base, rope_ctx->beta_fast, rope_ctx->beta_slow, rope_ctx->corr_dims); rope_ctx->octx = octx; FARF(HIGH, "rope-f32 n_dims:%d, ext_factor:%.6f, theta_scale:%.6f, attn_factor:%.6f\n", rope_ctx->n_dims, rope_ctx->ext_factor, rope_ctx->theta_scale, rope_ctx->attn_factor); } static void hvx_calc_rope_neox_f32(const float * restrict src0, float * restrict dst, const int num_elems, const float * restrict theta_cache) { // for (int i = 0; i < num_elems; i += 2) { //const float cos_theta = theta_cache[i + 0]; //const float sin_theta = theta_cache[i + 1]; //const float x0 = src[0]; //const float x1 = src[num_elems/2]; //dst[0] = x0*cos_theta - x1*sin_theta; //dst[num_elems/2] = x0*sin_theta + x1*cos_theta; //src += 1; //dst += 1; // } const uint8_t * restrict src0_curr = (const uint8_t *) src0; const uint8_t * restrict theta_curr = (const uint8_t *) theta_cache; uint8_t * restrict dst_curr = (uint8_t *) dst; int step_of_1 = num_elems >> 6; // 6 because we process two vectors at once int half_size = (sizeof(float) * (num_elems / 2)); for (int i = 0; i < step_of_1; i++) { HVX_Vector v0 = *(HVX_Vector *) src0_curr; HVX_Vector v1 = *(HVX_Vector *) (src0_curr + half_size); HVX_Vector v2 = *(HVX_Vector *) theta_curr; HVX_Vector v3 = *(HVX_Vector *) (theta_curr + VLEN); HVX_VectorPair vcos_sin = Q6_W_vdeal_VVR(v3, v2, -4); // vcos_sin[0] = cos_theta, vcos_sin[1] = sin_theta HVX_Vector vx0_c = Q6_Vqf32_vmpy_VsfVsf(v0, Q6_V_lo_W(vcos_sin)); HVX_Vector vx0_s = Q6_Vqf32_vmpy_VsfVsf(v0, Q6_V_hi_W(vcos_sin)); HVX_Vector vx1_c = Q6_Vqf32_vmpy_VsfVsf(v1, Q6_V_lo_W(vcos_sin)); HVX_Vector vx1_s = Q6_Vqf32_vmpy_VsfVsf(v1, Q6_V_hi_W(vcos_sin)); HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s); HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v4); *(HVX_Vector *) (dst_curr + half_size) = Q6_Vsf_equals_Vqf32(v5); src0_curr += VLEN; theta_curr += 2 * VLEN; dst_curr += VLEN; } } static void hvx_calc_rope_f32(const float * restrict src0, float * restrict dst, const int num_elems, const float * restrict theta_cache) { // for (int i = 0; i < num_elems; i += 2) { //const float cos_theta = theta_cache[i + 0]; //const float sin_theta = theta_cache[i + 1]; //const float x0 = src[0]; //const float x1 = src[1]; //dst[0] = x0*cos_theta - x1*sin_theta; //dst[1] = x0*sin_theta + x1*cos_theta; //src += 2; //dst += 2; // } const uint8_t * restrict src0_curr = (const uint8_t *) src0; const uint8_t * restrict theta_curr = (const uint8_t *) theta_cache; uint8_t * restrict dst_curr = (uint8_t *) dst; int step_of_1 = num_elems >> 6; // 6 because we process two vectors at once for (int i = 0; i < step_of_1; i++) { HVX_Vector v0 = *(HVX_Vector *) src0_curr; HVX_Vector v1 = *(HVX_Vector *) (src0_curr + VLEN); HVX_Vector v2 = *(HVX_Vector *) theta_curr; HVX_Vector v3 = *(HVX_Vector *) (theta_curr + VLEN); HVX_VectorPair vx0_x1 = Q6_W_vdeal_VVR(v1, v0, -4); // vx0_x1[0] = x0, vx0_x1[1] = x1 HVX_VectorPair vcos_sin = Q6_W_vdeal_VVR(v3, v2, -4); // vcos_sin[0] = cos_theta, vcos_sin[1] = sin_theta HVX_Vector vx0_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_lo_W(vcos_sin)); HVX_Vector vx0_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_hi_W(vcos_sin)); HVX_Vector vx1_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_lo_W(vcos_sin)); HVX_Vector vx1_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_hi_W(vcos_sin)); HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s); HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c); HVX_VectorPair vstore = Q6_W_vshuff_VVR(Q6_Vsf_equals_Vqf32(v5), Q6_Vsf_equals_Vqf32(v4), -4); *(HVX_Vector *) dst_curr = Q6_V_lo_W(vstore); *(HVX_Vector *) (dst_curr + VLEN) = Q6_V_hi_W(vstore); src0_curr += 2 * VLEN; theta_curr += 2 * VLEN; dst_curr += 2 * VLEN; } } static void rope_hex_f32(struct rope_th_ctx * rope_ctx, const uint32_t ir0, const uint32_t ir1, int nth, int ith, const int opt_path) { struct htp_ops_context * octx = rope_ctx->octx; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; const struct htp_tensor * src2 = &octx->src2; struct htp_tensor * dst = &octx->dst; const int32_t mode = rope_ctx->mode; const bool is_neox = mode & HTP_ROPE_TYPE_NEOX; htp_rope_preamble; const int32_t * pos = (const int32_t *) src1->data; float * wp0 = (float *) (octx->src0_spad.data + (ith * nb01)); const float * freq_factors = NULL; if (src2 != NULL) { freq_factors = (const float *) src2->data; } const uint32_t i1_end = MIN(ir1, ne1); const int32_t half_dims = rope_ctx->n_dims / 2; const size_t remain_bytes = (ne0 - rope_ctx->n_dims) * sizeof(float); for (uint32_t i3 = 0; i3 < ne3; i3++) { // batch for (uint32_t i2 = 0; i2 < ne2; i2++) { // seq-len const int32_t p = pos[i2]; rope_cache_init(p, rope_ctx->freq_scale, freq_factors, rope_ctx->corr_dims, ne0, rope_ctx->ext_factor, rope_ctx->attn_factor, wp0, rope_ctx->theta_scale); for (uint32_t i1 = ir0; i1 < i1_end; i1++) { // attn-heads const float * src = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); float * dst_data = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); const float * src_loc = src; float * dst_data_loc = dst_data; if (1 == opt_path) { if (is_neox) { hvx_calc_rope_neox_f32(src_loc, dst_data_loc, rope_ctx->n_dims, wp0); } else { hvx_calc_rope_f32(src_loc, dst_data_loc, rope_ctx->n_dims, wp0); } src_loc += rope_ctx->n_dims; dst_data_loc += rope_ctx->n_dims; } else { for (uint32_t i0 = 0; i0 < rope_ctx->n_dims; i0 += 2) { const float cos_theta = wp0[i0 + 0]; const float sin_theta = wp0[i0 + 1]; if (is_neox) { const float x0 = src_loc[0]; const float x1 = src_loc[half_dims]; dst_data_loc[0] = x0 * cos_theta - x1 * sin_theta; dst_data_loc[half_dims] = x0 * sin_theta + x1 * cos_theta; src_loc += 1; dst_data_loc += 1; } else { const float x0 = src_loc[0]; const float x1 = src_loc[1]; dst_data_loc[0] = x0 * cos_theta - x1 * sin_theta; dst_data_loc[1] = x0 * sin_theta + x1 * cos_theta; src_loc += 2; dst_data_loc += 2; } } src_loc += (is_neox ? half_dims : 0); dst_data_loc += (is_neox ? half_dims : 0); } // TODO: use simd to speed up the remaining elements copy memcpy(dst_data_loc, src_loc, remain_bytes); } } } } static void rope_job_f32_per_thread(struct rope_th_ctx * rope_ctx, int nth, int ith) { struct htp_ops_context * octx = rope_ctx->octx; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; htp_rope_preamble; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); int is_aligned = 1; int opt_path = 0; if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || (0 == htp_is_aligned((void *) dst->data, VLEN))) { FARF(HIGH, "rope-f32: unaligned addresses in rope op, possibly slower execution\n"); is_aligned = 0; } if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { opt_path = 1; } rope_hex_f32(rope_ctx, src0_start_row, src0_end_row, nth, ith, opt_path); t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "rope-f32: %d/%d/%d: (%u:%u) usec %u\n", ith, nth, opt_path, src0_start_row, src0_end_row, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void rope_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { struct rope_th_ctx * rope_ctx = (struct rope_th_ctx *) data; rope_job_f32_per_thread(rope_ctx, n, i); } static int execute_op_rope_f32(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; const struct htp_tensor * src2 = &octx->src2; struct htp_tensor * dst = &octx->dst; worker_callback_t op_func; const char * op_type = NULL; struct rope_th_ctx rope_ctx; switch (octx->op) { case HTP_OP_ROPE: op_func = rope_job_dispatcher_f32; op_type = "rope-f32"; init_rope_ctx(&rope_ctx, octx); break; default: FARF(ERROR, "Unsupported Op %u\n", octx->op); return HTP_STATUS_NO_SUPPORT; } const uint32_t n_threads = octx->n_threads; const size_t src0_row_size = src0->nb[1]; const size_t src1_row_size = src0_row_size; const size_t dst_row_size = dst->nb[1]; // VTCM scratchpads for all tensors // N rows per thread, padded to HVX vector size octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; if (src2->ne[0]) { FARF(HIGH, "%s: %ux%ux%ux%u (x %ux%ux%ux%u x %ux%ux%ux%u) -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u " "dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } else { FARF(HIGH, "%s: %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { uint32_t n_jobs = MIN(n_threads, src0_nrows); octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; worker_pool_run_func(octx->ctx->worker_pool, op_func, &rope_ctx, n_jobs); } return err; } int op_rope(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; switch (octx->src0.type) { case HTP_TYPE_F32: err = execute_op_rope_f32(octx); break; default: err = HTP_STATUS_NO_SUPPORT; break; } return err; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/softmax-ops.c000066400000000000000000000360701512524704700227550ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" #define htp_softmax_preamble3 \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ const uint32_t ne03 = src0->ne[3]; \ \ const uint32_t nb00 = src0->nb[0]; \ const uint32_t nb01 = src0->nb[1]; \ const uint32_t nb02 = src0->nb[2]; \ const uint32_t nb03 = src0->nb[3]; \ \ const uint32_t ne10 = (src1->ne[0]) ? src1->ne[0] : 1; \ const uint32_t ne11 = (src1->ne[0]) ? src1->ne[1] : 1; \ const uint32_t ne12 = (src1->ne[0]) ? src1->ne[2] : 1; \ const uint32_t ne13 = (src1->ne[0]) ? src1->ne[3] : 1; \ \ const uint32_t nb10 = (src1->ne[0]) ? src1->nb[0] : 1; \ const uint32_t nb11 = (src1->ne[0]) ? src1->nb[1] : 1; \ const uint32_t nb12 = (src1->ne[0]) ? src1->nb[2] : 1; \ const uint32_t nb13 = (src1->ne[0]) ? src1->nb[3] : 1; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; struct softmax_th_ctx { bool use_f16; bool use_src1; uint32_t n_head; uint32_t n_head_log2; float scale; float max_bias; float m0; float m1; struct htp_ops_context * octx; }; static void init_softmax_ctx(struct softmax_th_ctx * softmax_ctx, struct htp_ops_context * octx) { const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; memset(softmax_ctx, 0, sizeof(struct softmax_th_ctx)); memcpy(&softmax_ctx->scale, (float *) octx->op_params, sizeof(float)); memcpy(&softmax_ctx->max_bias, (float *) octx->op_params + 1, sizeof(float)); softmax_ctx->n_head = src0->ne[2]; softmax_ctx->n_head_log2 = 1u << (uint32_t) floor(log2(softmax_ctx->n_head)); softmax_ctx->m0 = powf(2.0f, -(softmax_ctx->max_bias) / softmax_ctx->n_head_log2); softmax_ctx->m1 = powf(2.0f, -(softmax_ctx->max_bias / 2.0f) / softmax_ctx->n_head_log2); softmax_ctx->use_src1 = (src1->ne[0] != 0); softmax_ctx->use_f16 = (src1->ne[0] != 0) && (src1->type == HTP_TYPE_F16); softmax_ctx->octx = octx; } static void hvx_fast_softmax_prep_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, float scale, const uint8_t * restrict mask, float slope) { const uint8_t * restrict src_curr = src; uint8_t * restrict dst_curr = dst; const uint8_t * restrict mask_curr = mask; HVX_Vector scale_vec = hvx_vec_splat_fp32(scale); HVX_Vector slope_vec = hvx_vec_splat_fp32(slope); int step_of_1 = num_elems >> 5; #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = *(HVX_Vector *) src_curr; HVX_Vector v3 = *(HVX_Vector *) mask_curr; HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_vec); HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v3, slope_vec); HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(v2, v4); *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v5); src_curr += VLEN; dst_curr += VLEN; mask_curr += VLEN; } } static void hvx_fast_softmax_f32(const uint8_t * restrict src, uint8_t * restrict dst, uint8_t * restrict pad, const int num_elems) { const HVX_Vector * restrict v_src = (HVX_Vector *) src; HVX_Vector * restrict v_pad = (HVX_Vector *) pad; HVX_Vector * restrict v_dst = (HVX_Vector *) dst; HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000); HVX_Vector max_vec = hvx_vec_splat_fp32(((const float *) src)[0]); HVX_Vector zero_v = Q6_V_vzero(); HVX_Vector one_v = hvx_vec_splat_fp32(1.0); int step_of_1 = num_elems >> 5; #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = v_src[i]; max_vec = Q6_Vsf_vmax_VsfVsf(max_vec, v1); } HVX_Vector v = hvx_vec_reduce_max_fp32(max_vec); max_vec = hvx_vec_repl4(v); #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = v_src[i]; HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v1, max_vec); HVX_Vector v3 = hvx_vec_exp_fp32(Q6_Vsf_equals_Vqf32(v2)); sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), v3); v_pad[i] = v3; } v = hvx_vec_qf32_reduce_sum(sum_vec); sum_vec = hvx_vec_repl4(Q6_Vsf_equals_Vqf32(v)); HVX_VectorPred pos_sum = Q6_Q_vcmp_gt_VwVw(sum_vec, zero_v); HVX_Vector v4 = hvx_vec_inverse_fp32(sum_vec); HVX_Vector scale_vec = Q6_V_vmux_QVV(pos_sum, v4, one_v); #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = v_pad[i]; HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_vec); v_dst[i] = Q6_Vsf_equals_Vqf32(v2); } } static float hvx_softmax_f32(const uint8_t * restrict src, uint8_t * restrict dst, uint8_t * restrict spad, const int num_elems, const float max) { hvx_sub_scalar_f32(src, max, spad, num_elems); hvx_exp_f32(spad, dst, num_elems, false); float sum = hvx_self_sum_f32(dst, num_elems); return sum; } static void softmax_htp_f32(int nth, int ith, struct softmax_th_ctx * softmax_ctx, int opt_path) { struct htp_ops_context * octx = softmax_ctx->octx; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; const struct htp_tensor * dst = &octx->dst; htp_softmax_preamble3; uint8_t * src0_spad_data = octx->src0_spad.data + (ith * nb01); uint8_t * src1_spad_data = octx->src1_spad.data + (ith * nb01); uint8_t * dst_spad_data = octx->dst_spad.data + (ith * nb1); float * wp0 = (float *) src0_spad_data; float * wp1 = (float *) src1_spad_data; float * wp2 = (float *) dst_spad_data; for (uint32_t i03 = 0; i03 < ne03; i03++) { for (uint32_t i02 = 0; i02 < ne02; i02++) { for (uint32_t i01 = ith; i01 < ne01; i01 += nth) { const uint32_t i11 = i01; const uint32_t i12 = i02 % ne12; const uint32_t i13 = i03 % ne13; // ALiBi const uint32_t h = i02; // head const float slope = (softmax_ctx->max_bias > 0.0f) ? h < softmax_ctx->n_head_log2 ? powf(softmax_ctx->m0, h + 1) : powf(softmax_ctx->m1, 2 * (h - softmax_ctx->n_head_log2) + 1) : 1.0f; float * sp = (float *) ((char *) octx->src0.data + i01 * nb01 + i02 * nb02 + i03 * nb03); float * dp = (float *) ((char *) octx->dst.data + i01 * nb1 + i02 * nb2 + i03 * nb3); // broadcast the mask across rows __fp16 * mp_f16 = (softmax_ctx->use_src1) ? (__fp16 *) ((char *) octx->src1.data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; float * mp_f32 = (softmax_ctx->use_src1) ? (float *) ((char *) octx->src1.data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; if ((1 == opt_path) && (mp_f32) && !(softmax_ctx->use_f16)) { hvx_fast_softmax_prep_f32((const uint8_t *) sp, (uint8_t *) wp0, ne00, softmax_ctx->scale, (const uint8_t *) mp_f32, slope); } else { hvx_scale_f32((const uint8_t *) sp, (uint8_t *) wp0, ne00, softmax_ctx->scale); if (mp_f32) { if (softmax_ctx->use_f16) { for (int i = 0; i < ne00; ++i) { wp0[i] += slope * (float) mp_f16[i]; } } else { for (int i = 0; i < ne00; ++i) { wp0[i] += slope * mp_f32[i]; } } } } if (1 == opt_path) { hvx_fast_softmax_f32((const uint8_t *) wp0, (uint8_t *) dp, (uint8_t *) wp1, ne00); } else { float max = hvx_self_max_f32((const uint8_t *) wp0, ne00); float sum = hvx_softmax_f32((const uint8_t *) wp0, (uint8_t *) wp2, (uint8_t *) wp1, ne00, max); sum = sum > 0.0 ? (1.0 / sum) : 1; hvx_scale_f32((const uint8_t *) wp2, (uint8_t *) dp, ne00, sum); } } } } } static void softmax_job_f32_per_thread(struct softmax_th_ctx * softmax_ctx, int nth, int ith) { struct htp_ops_context * octx = softmax_ctx->octx; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; htp_softmax_preamble3; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); int is_aligned = 1; int opt_path = 0; if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { is_aligned = 0; FARF(HIGH, "softmax-f32: unaligned addresses in elementwise op, possibly slower execution\n"); } if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { opt_path = 1; } softmax_htp_f32(nth, ith, softmax_ctx, opt_path); t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "softmax-f32 %d/%d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, softmax_ctx->use_f16, opt_path, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void softmax_job_dispatcher_f32(unsigned int n, unsigned int i, void * p_data) { struct softmax_th_ctx * p_softmax_ctx = (struct softmax_th_ctx *) p_data; softmax_job_f32_per_thread(p_softmax_ctx, n, i); } static int execute_op_softmax_f32(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; const struct htp_tensor * src0 = &octx->src0; const struct htp_tensor * src1 = &octx->src1; struct htp_tensor * dst = &octx->dst; worker_callback_t op_func; const char * op_type = NULL; struct softmax_th_ctx softmax_ctx; switch (octx->op) { case HTP_OP_SOFTMAX: op_func = softmax_job_dispatcher_f32; op_type = "softmax-f32"; init_softmax_ctx(&softmax_ctx, octx); break; default: FARF(ERROR, "Unsupported Op %u\n", octx->op); return HTP_STATUS_NO_SUPPORT; } const uint32_t n_threads = octx->n_threads; const size_t src0_row_size = src0->nb[1]; const size_t src1_row_size = src0_row_size; const size_t dst_row_size = dst->nb[1]; // VTCM scratchpads for all tensors // N rows per thread, padded to HVX vector size octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; if (src1->ne[0]) { FARF(HIGH, "%s: %ux%ux%ux%u x %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } else { FARF(HIGH, "%s: %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); } // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { uint32_t n_jobs = MIN(n_threads, src0_nrows); octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; worker_pool_run_func(octx->ctx->worker_pool, op_func, &softmax_ctx, n_jobs); } return err; } int op_softmax(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; switch (octx->src0.type) { case HTP_TYPE_F32: err = execute_op_softmax_f32(octx); break; default: err = HTP_STATUS_NO_SUPPORT; break; } return err; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/unary-ops.c000066400000000000000000000220311512524704700224220ustar00rootroot00000000000000#pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wunused-but-set-variable" #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include #include #include #include #include #include #include #include #include #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "htp-ctx.h" #include "htp-dma.h" #include "htp-msg.h" #include "htp-ops.h" #include "hvx-utils.h" #include "ops-utils.h" #define htp_unary_preamble \ const uint32_t ne00 = src->ne[0]; \ const uint32_t ne01 = src->ne[1]; \ const uint32_t ne02 = src->ne[2]; \ const uint32_t ne03 = src->ne[3]; \ \ const uint32_t ne0 = dst->ne[0]; \ const uint32_t ne1 = dst->ne[1]; \ const uint32_t ne2 = dst->ne[2]; \ const uint32_t ne3 = dst->ne[3]; \ \ const uint32_t nb00 = src->nb[0]; \ const uint32_t nb01 = src->nb[1]; \ const uint32_t nb02 = src->nb[2]; \ const uint32_t nb03 = src->nb[3]; \ \ const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ const uint32_t nb3 = dst->nb[3]; static void hvx_fast_rms_norm_f32(const uint8_t * restrict src, uint8_t * restrict dst, uint8_t * restrict pad, const int num_elems, float epsilon) { const HVX_Vector * restrict v_src = (HVX_Vector *) src; HVX_Vector * restrict v_dst = (HVX_Vector *) dst; HVX_Vector sum_v = Q6_V_vsplat_R(0x00000000); HVX_Vector epsilon_v = hvx_vec_splat_fp32(epsilon); int step_of_1 = num_elems >> 5; #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = v_src[i]; HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, v1); sum_v = Q6_Vqf32_vadd_Vqf32Vqf32(sum_v, v2); } HVX_Vector reduced_sum = hvx_vec_qf32_reduce_sum(sum_v); sum_v = hvx_vec_repl4(Q6_Vsf_equals_Vqf32(reduced_sum)); HVX_Vector t_v = hvx_vec_splat_fp32((float) num_elems); HVX_Vector denom_v = hvx_vec_inverse_fp32(t_v); HVX_Vector mean_v = Q6_Vqf32_vmpy_VsfVsf(sum_v, denom_v); HVX_Vector mean_epsilon_v = Q6_Vqf32_vadd_Vqf32Vsf(mean_v, epsilon_v); HVX_Vector scale_v = hvx_vec_rsqrt_fp32(Q6_Vsf_equals_Vqf32(mean_epsilon_v)); #pragma unroll(4) for (int i = 0; i < step_of_1; i++) { HVX_Vector v1 = v_src[i]; HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_v); v_dst[i] = Q6_Vsf_equals_Vqf32(v2); } } static void rms_norm_htp_f32(const float * restrict src, float * restrict dst, uint8_t * restrict spad, const uint32_t num_rows, const uint32_t row_elems, const size_t row_size, int32_t * op_params, int opt_path) { float epsilon = 0.f; memcpy(&epsilon, op_params, sizeof(float)); for (uint32_t ir = 0; ir < num_rows; ir++) { const float * restrict src_local = src + (ir * row_elems); float * restrict dst_local = dst + (ir * row_elems); if (ir + 1 < num_rows) { htp_l2fetch(src_local + row_elems, 1, row_size, row_size); } if (1 == opt_path) { hvx_fast_rms_norm_f32((const uint8_t *) src_local, (uint8_t *) dst_local, spad, row_elems, epsilon); } else { float sum = hvx_sum_of_squares_f32((const uint8_t *) src_local, row_elems); const float mean = sum / row_elems; const float scale = 1.0f / sqrtf(mean + epsilon); hvx_scale_f32((const uint8_t *) src_local, (uint8_t *) dst_local, row_elems, scale); } } } static void unary_job_f32_per_thread(const struct htp_tensor * src, struct htp_tensor * dst, uint8_t * spad, int htp_op, int32_t * op_params, uint32_t nth, uint32_t ith, uint32_t src0_nrows_per_thread) { htp_unary_preamble; const size_t src0_row_size = nb01; const size_t dst_row_size = nb1; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); // no work for this thread if (src0_start_row >= src0_end_row) { return; } uint64_t t1, t2; t1 = HAP_perf_get_qtimer_count(); int is_aligned = 1; int opt_path = 0; if ((0 == htp_is_aligned((void *) src->data, VLEN)) || (0 == htp_is_aligned((void *) dst->data, VLEN))) { is_aligned = 0; FARF(HIGH, "unary-f32: unaligned addresses in unary op, possibly slower execution\n"); } if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { opt_path = 1; } const uint8_t * restrict data_src = (const uint8_t *) src->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; const float * restrict src_th = (float *) (data_src + (src0_start_row * src0_row_size)); float * restrict dst_th = (float *) (data_dst + (src0_start_row * dst_row_size)); uint8_t * restrict spad_th = (uint8_t *) spad + (ith * nb01); switch (htp_op) { case HTP_OP_RMS_NORM: rms_norm_htp_f32(src_th, dst_th, spad_th, src0_end_row - src0_start_row, ne0, nb1, op_params, opt_path); break; default: break; } t2 = HAP_perf_get_qtimer_count(); FARF(HIGH, "unary-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, src->ne[0], src->ne[1], src->ne[2], src->ne[3], src0_start_row, src0_end_row, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } static void unary_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { struct htp_ops_context * octx = (struct htp_ops_context *) data; unary_job_f32_per_thread(&octx->src0, &octx->dst, octx->src0_spad.data, octx->op, octx->op_params, n, i, octx->src0_nrows_per_thread); } static int execute_op_unary_f32(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; const struct htp_tensor * src0 = &octx->src0; struct htp_tensor * dst = &octx->dst; worker_callback_t unary_op_func; const char * op_type = NULL; switch (octx->op) { case HTP_OP_RMS_NORM: unary_op_func = unary_job_dispatcher_f32; op_type = "rmsnorm-f32"; break; default: FARF(ERROR, "Unsupported unary Op %u\n", octx->op); return HTP_STATUS_NO_SUPPORT; } const int n_threads = octx->n_threads; const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; const size_t src0_row_size = src0->nb[1]; const size_t dst_row_size = dst->nb[1]; // VTCM scratchpads for all tensors octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; size_t spad_size = octx->src0_spad.size + octx->dst_spad.size; FARF(HIGH, "%s: (%ux%ux%ux%u) -> (%ux%ux%ux%u) : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); // Make sure the reserved vtcm size is sufficient if (octx->ctx->vtcm_size < spad_size) { FARF(ERROR, "unary-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, spad_size); return HTP_STATUS_VTCM_TOO_SMALL; } octx->src0_spad.data = octx->ctx->vtcm_base; octx->dst_spad.data = octx->src0_spad.data + octx->src0_spad.size; if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { uint32_t n_jobs = MIN(n_threads, src0_nrows); octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; worker_pool_run_func(octx->ctx->worker_pool, unary_op_func, octx, n_jobs); } return err; } int op_unary(struct htp_ops_context * octx) { int err = HTP_STATUS_OK; switch (octx->src0.type) { case HTP_TYPE_F32: err = execute_op_unary_f32(octx); break; default: err = HTP_STATUS_NO_SUPPORT; break; } return err; } ggml-org-ggml-3678254/src/ggml-hexagon/htp/worker-pool.c000066400000000000000000000205761512524704700227610ustar00rootroot00000000000000#include "worker-pool.h" #include #include #include #include #include #include #ifdef HTP_DEBUG # define FARF_HIGH 1 #endif #include "HAP_farf.h" #define WORKER_THREAD_STACK_SZ (2 * 16384) #define LOWEST_USABLE_QURT_PRIO (254) struct worker_pool_s; // internal structure kept in thread-local storage per instance of worker pool typedef struct { struct worker_pool_s * pool; unsigned int id; } worker_context_t; // internal structure kept in thread-local storage per instance of worker pool typedef struct worker_pool_s { worker_pool_job_t job[MAX_NUM_WORKERS]; // list of job descriptors qurt_thread_t thread[MAX_NUM_WORKERS]; // thread ID's of the workers worker_context_t context[MAX_NUM_WORKERS]; // worker contexts void * stack[MAX_NUM_WORKERS]; // thread stack pointers unsigned int n_threads; // number of workers in this pool atomic_uint seqn; // seqno used to detect new jobs atomic_uint next_job; // next job index atomic_uint n_pending; // number of pending jobs atomic_uint n_jobs; // number of current jobs atomic_bool killed; // threads need to exit } worker_pool_t; static void worker_pool_main(void * context) { worker_context_t * me = (worker_context_t *) context; worker_pool_t * pool = me->pool; FARF(HIGH, "worker-pool: thread %u started", me->id); unsigned int prev_seqn = 0; while (!atomic_load(&pool->killed)) { unsigned int seqn = atomic_load(&pool->seqn); if (seqn == prev_seqn) { // Nothing to do qurt_futex_wait(&pool->seqn, prev_seqn); continue; } // New job prev_seqn = seqn; unsigned int n = atomic_load(&pool->n_jobs); unsigned int i = atomic_fetch_add(&pool->next_job, 1); if (i >= n) { // Spurios wakeup continue; } pool->job[i].func(n, i, pool->job[i].data); atomic_fetch_sub(&pool->n_pending, 1); } FARF(HIGH, "worker-pool: thread %u stopped", me->id); } AEEResult worker_pool_init_with_stack_size(worker_pool_context_t * context, uint32_t n_threads, uint32_t stack_size) { int err = 0; if (NULL == context) { FARF(ERROR, "NULL context passed to worker_pool_init()."); return AEE_EBADPARM; } // Allocations int size = (stack_size * n_threads) + (sizeof(worker_pool_t)); unsigned char * mem_blob = (unsigned char *) malloc(size); if (!mem_blob) { FARF(ERROR, "Could not allocate memory for worker pool!!"); return AEE_ENOMEMORY; } worker_pool_t * me = (worker_pool_t *) (mem_blob + stack_size * n_threads); // name for the first worker, useful in debugging threads char name[19]; snprintf(name, 12, "0x%8x:", (int) me); strcat(name, "worker0"); me->n_threads = n_threads; // initializations for (unsigned int i = 0; i < me->n_threads; i++) { me->stack[i] = NULL; me->thread[i] = 0; me->context[i].id = i; me->context[i].pool = me; } // initialize job queue me->n_pending = 0; me->n_jobs = 0; me->next_job = 0; me->seqn = 0; me->killed = 0; // launch the workers qurt_thread_attr_t attr; qurt_thread_attr_init(&attr); for (unsigned int i = 0; i < me->n_threads; i++) { // set up stack me->stack[i] = mem_blob; mem_blob += stack_size; qurt_thread_attr_set_stack_addr(&attr, me->stack[i]); qurt_thread_attr_set_stack_size(&attr, stack_size); // set up name qurt_thread_attr_set_name(&attr, name); name[17] = (name[17] + 1); // name threads context:worker0, context:worker1, .. (recycle at 9, but num threads should be less than that anyway) if (name[17] > '9') { name[17] = '0'; } // set up priority - by default, match the creating thread's prio int prio = qurt_thread_get_priority(qurt_thread_get_id()); if (prio < 1) { prio = 1; } if (prio > LOWEST_USABLE_QURT_PRIO) { prio = LOWEST_USABLE_QURT_PRIO; } qurt_thread_attr_set_priority(&attr, prio); // launch err = qurt_thread_create(&me->thread[i], &attr, worker_pool_main, (void *) &me->context[i]); if (err) { FARF(ERROR, "Could not launch worker threads!"); worker_pool_release((worker_pool_context_t *) &me); return AEE_EQURTTHREADCREATE; } } *context = (worker_pool_context_t *) me; return AEE_SUCCESS; } AEEResult worker_pool_init(worker_pool_context_t * context, uint32_t n_threads) { return worker_pool_init_with_stack_size(context, n_threads, WORKER_THREAD_STACK_SZ); } // clean up worker pool void worker_pool_release(worker_pool_context_t * context) { worker_pool_t * me = (worker_pool_t *) *context; // if no worker pool exists, return error. if (NULL == me) { return; } atomic_store(&me->killed, 1); atomic_fetch_add(&me->seqn, 1); qurt_futex_wake(&me->seqn, me->n_threads); // de-initializations for (unsigned int i = 0; i < me->n_threads; i++) { if (me->thread[i]) { int status; (void) qurt_thread_join(me->thread[i], &status); } } // free allocated memory (were allocated as a single buffer starting at stack[0]) if (me->stack[0]) { free(me->stack[0]); } *context = NULL; } // run jobs AEEResult worker_pool_run_jobs(worker_pool_context_t context, worker_pool_job_t * job, unsigned int n) { worker_pool_t * me = (worker_pool_t *) context; if (NULL == me) { FARF(ERROR, "worker-pool: invalid context"); return AEE_EBADPARM; } if (n > me->n_threads) { FARF(ERROR, "worker-pool: invalid number of jobs %u for n-threads %u", n, me->n_threads); return AEE_EBADPARM; } memcpy(me->job, job, sizeof(worker_pool_job_t) * n); if (n > 1) { atomic_store(&me->next_job, 1); atomic_store(&me->n_jobs, n); atomic_store(&me->n_pending, n - 1); // wake up workers atomic_fetch_add(&me->seqn, 1); qurt_futex_wake(&me->seqn, n - 1); } // main thread runs job #0 me->job[0].func(n, 0, me->job[0].data); if (n > 1) { while (atomic_load(&me->n_pending)) ; } return 0; } // run func AEEResult worker_pool_run_func(worker_pool_context_t context, worker_callback_t func, void * data, unsigned int n) { worker_pool_job_t job[n]; for (unsigned int i = 0; i < n; i++) { job[i].func = func; job[i].data = data; } return worker_pool_run_jobs(context, job, n); } AEEResult worker_pool_set_thread_priority(worker_pool_context_t context, unsigned int prio) { worker_pool_t * me = (worker_pool_t *) context; // if no worker pool exists, return error. if (!me) { return AEE_ENOMORE; } int result = AEE_SUCCESS; if (prio < 1) { prio = 1; } if (prio > LOWEST_USABLE_QURT_PRIO) { prio = LOWEST_USABLE_QURT_PRIO; } for (unsigned int i = 0; i < me->n_threads; i++) { int res = qurt_thread_set_priority(me->thread[i], (unsigned short) prio); if (0 != res) { result = AEE_EBADPARM; FARF(ERROR, "QURT failed to set priority of thread %d, ERROR = %d", me->thread[i], res); } } return result; } AEEResult worker_pool_retrieve_thread_id(worker_pool_context_t context, unsigned int * tids) { worker_pool_t * me = (worker_pool_t *) context; if (!me) { FARF(ERROR, "worker-pool: invalid context"); return AEE_EBADPARM; ; } for (int i = 0; i < me->n_threads; i++) { tids[i] = me->thread[i]; } return AEE_SUCCESS; } AEEResult worker_pool_get_thread_priority(worker_pool_context_t context, unsigned int * prio) { worker_pool_t * me = (worker_pool_t *) context; if (!me) { FARF(ERROR, "worker-pool: invalid context"); return AEE_EBADPARM; } int priority = qurt_thread_get_priority(me->thread[0]); if (priority > 0) { *prio = priority; return 0; } else { *prio = 0; return AEE_EBADSTATE; } } ggml-org-ggml-3678254/src/ggml-hexagon/htp/worker-pool.h000066400000000000000000000041301512524704700227520ustar00rootroot00000000000000#ifndef HTP_WORKER_POOL_H #define HTP_WORKER_POOL_H // MACRO enables function to be visible in shared-library case. #define WORKERPOOL_API __attribute__((visibility("default"))) #include #include #include #ifdef __cplusplus extern "C" { #endif /// signature of callbacks to be invoked by worker threads typedef void (*worker_callback_t)(unsigned int n, unsigned int i, void *); /// Typedef of worker_pool context typedef void * worker_pool_context_t; /// descriptor for requested callback typedef struct { worker_callback_t func; void * data; } worker_pool_job_t; /// Maximum supported number of worker threads. #define MAX_NUM_WORKERS 10 // Initialize worker pool. WORKERPOOL_API AEEResult worker_pool_init(worker_pool_context_t * context, uint32_t n_threads); // Initialize worker pool with custom stack size WORKERPOOL_API AEEResult worker_pool_init_with_stack_size(worker_pool_context_t * context, uint32_t n_threads, uint32_t stack_size); // Kill worker threads and release worker pool resources WORKERPOOL_API void worker_pool_release(worker_pool_context_t * context); // Run jobs with the worker pool. WORKERPOOL_API AEEResult worker_pool_run_jobs(worker_pool_context_t context, worker_pool_job_t * job, unsigned int n); WORKERPOOL_API AEEResult worker_pool_run_func(worker_pool_context_t context, worker_callback_t func, void * data, unsigned int n); WORKERPOOL_API AEEResult worker_pool_set_thread_priority(worker_pool_context_t context, unsigned int prio); WORKERPOOL_API AEEResult worker_pool_get_thread_priority(worker_pool_context_t context, unsigned int * prio); WORKERPOOL_API AEEResult worker_pool_retrieve_thread_id(worker_pool_context_t context, unsigned int * tids); #ifdef __cplusplus } #endif #endif // #ifndef HTP_WORKER_POOL_H ggml-org-ggml-3678254/src/ggml-hexagon/op-desc.h000066400000000000000000000104621512524704700212360ustar00rootroot00000000000000#ifndef OP_DESC_H #define OP_DESC_H #define GGML_COMMON_IMPL_CPP #include "ggml-backend-impl.h" #include "ggml-common.h" #include #include struct op_desc { char strides[64 * GGML_MAX_SRC]; char dims[64 * GGML_MAX_SRC]; char types[16 * GGML_MAX_SRC]; char buffs[64 * GGML_MAX_SRC]; char names[64 * GGML_MAX_SRC]; int format_tensor_dims(char * str, const struct ggml_tensor * t) { if (t->ne[2] == 1 && t->ne[3] == 1) { return sprintf(str, "%d:%d", (int) t->ne[0], (int) t->ne[1]); } else { return sprintf(str, "%d:%d:%d:%d", (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); } } void format_op_dims(char * str, const struct ggml_tensor * t) { char * p = str; // append src0 and src1 (if any) if (t->src[0]) { p += format_tensor_dims(p, t->src[0]); for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { p += sprintf(p, " x "); p += format_tensor_dims(p, t->src[i]); } p += sprintf(p, " -> "); } // format self dims separately for better visual alignment char self[64]; format_tensor_dims(self, t); p += sprintf(p, "%s", self); } int format_tensor_strides(char * str, const struct ggml_tensor * t) { const char * c = ggml_is_contiguous(t) ? "" : "!"; if (t->ne[2] == 1 && t->ne[3] == 1) { return sprintf(str, "%zu:%zu%s", (size_t) t->nb[0], (size_t) t->nb[1], c); } else { return sprintf(str, "%zu:%zu:%zu:%zu%s", (size_t) t->nb[0], (size_t) t->nb[1], (size_t) t->nb[2], (size_t) t->nb[3], c); } } void format_op_strides(char * str, const struct ggml_tensor * t) { char * p = str; // append src0 and src1 (if any) if (t->src[0]) { p += format_tensor_strides(p, t->src[0]); for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { p += sprintf(p, " x "); p += format_tensor_strides(p, t->src[i]); } p += sprintf(p, " -> "); } // format self dims separately for better visual alignment char self[64]; format_tensor_strides(self, t); p += sprintf(p, "%s", self); } void format_op_types(char * str, const struct ggml_tensor * t) { char * p = str; // append src0 and src1 (if any) if (t->src[0]) { p += sprintf(p, "%s", ggml_type_name(t->src[0]->type)); for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { p += sprintf(p, " x "); p += sprintf(p, "%s", ggml_type_name(t->src[i]->type)); } p += sprintf(p, " -> "); } p += sprintf(p, "%s", ggml_type_name(t->type)); } const char * tensor_buff_name(const struct ggml_tensor * t) { if (t->buffer) { return ggml_backend_buffer_name(t->buffer); } return "NONE"; } void format_op_buffs(char * str, const struct ggml_tensor * t) { char * p = str; // append src0 and src1 (if any) if (t->src[0]) { p += sprintf(p, "%s", tensor_buff_name(t->src[0])); for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { p += sprintf(p, " x "); p += sprintf(p, "%s", tensor_buff_name(t->src[i])); } p += sprintf(p, " -> "); } p += sprintf(p, "%s", tensor_buff_name(t)); } void format_op_names(char * str, const struct ggml_tensor * t) { char * p = str; // append src0 and src1 (if any) if (t->src[0]) { p += sprintf(p, "%s", t->src[0]->name); for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { p += sprintf(p, " x "); p += sprintf(p, "%s", t->src[i]->name); } p += sprintf(p, " -> "); } p += sprintf(p, "%s", t->name); } void format(const ggml_tensor * op) { format_op_dims(dims, op); format_op_strides(strides, op); format_op_types(types, op); format_op_buffs(buffs, op); format_op_names(names, op); } op_desc() {} op_desc(const ggml_tensor * op) { format(op); } }; #endif // OP_DESC_H ggml-org-ggml-3678254/src/ggml-hip/000077500000000000000000000000001512524704700166575ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-hip/CMakeLists.txt000066400000000000000000000101211512524704700214120ustar00rootroot00000000000000if (NOT EXISTS $ENV{ROCM_PATH}) if (NOT EXISTS /opt/rocm) set(ROCM_PATH /usr) else() set(ROCM_PATH /opt/rocm) endif() else() set(ROCM_PATH $ENV{ROCM_PATH}) endif() list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}) list(APPEND CMAKE_PREFIX_PATH "${ROCM_PATH}/lib64/cmake") # CMake on Windows doesn't support the HIP language yet if (WIN32) set(CXX_IS_HIPCC TRUE) else() string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}") endif() if (CXX_IS_HIPCC) if (LINUX) if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") endif() message(WARNING "Setting hipcc as the C++ compiler is legacy behavior." " Prefer setting the HIP compiler directly. See README for details.") endif() else() # Forward (AMD)GPU_TARGETS to CMAKE_HIP_ARCHITECTURES. if(AMDGPU_TARGETS AND NOT GPU_TARGETS) set(GPU_TARGETS ${AMDGPU_TARGETS}) endif() if(GPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) set(CMAKE_HIP_ARCHITECTURES ${GPU_TARGETS}) endif() cmake_minimum_required(VERSION 3.21) enable_language(HIP) endif() find_package(hip REQUIRED) find_package(hipblas REQUIRED) find_package(rocblas REQUIRED) if (${hip_VERSION} VERSION_LESS 6.1) message(FATAL_ERROR "At least ROCM/HIP V6.1 is required") endif() message(STATUS "HIP and hipBLAS found") # Workaround old compilers set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} --gpu-max-threads-per-block=1024") file(GLOB GGML_HEADERS_ROCM "../ggml-cuda/*.cuh") list(APPEND GGML_HEADERS_ROCM "../../include/ggml-cuda.h") file(GLOB GGML_SOURCES_ROCM "../ggml-cuda/*.cu") file(GLOB SRCS "../ggml-cuda/template-instances/fattn-tile*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) if (GGML_CUDA_FA_ALL_QUANTS) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) else() file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*f16-f16.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) endif() ggml_add_backend_library(ggml-hip ${GGML_HEADERS_ROCM} ${GGML_SOURCES_ROCM} ) # TODO: do not use CUDA definitions for HIP if (NOT GGML_BACKEND_DL) target_compile_definitions(ggml PUBLIC GGML_USE_CUDA) endif() add_compile_definitions(GGML_USE_HIP) if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() if (GGML_CUDA_FORCE_CUBLAS) add_compile_definitions(GGML_CUDA_FORCE_CUBLAS) endif() if (GGML_CUDA_NO_PEER_COPY) add_compile_definitions(GGML_CUDA_NO_PEER_COPY) endif() if (GGML_HIP_GRAPHS) add_compile_definitions(GGML_HIP_GRAPHS) endif() if (GGML_HIP_NO_VMM) add_compile_definitions(GGML_HIP_NO_VMM) endif() if (GGML_HIP_ROCWMMA_FATTN) add_compile_definitions(GGML_HIP_ROCWMMA_FATTN) endif() if (NOT GGML_HIP_MMQ_MFMA) add_compile_definitions(GGML_HIP_NO_MMQ_MFMA) endif() if (GGML_HIP_EXPORT_METRICS) set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Rpass-analysis=kernel-resource-usage --save-temps") endif() if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() if (CXX_IS_HIPCC) set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) target_link_libraries(ggml-hip PRIVATE hip::device) else() set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP) endif() if (GGML_STATIC) message(FATAL_ERROR "Static linking not supported for HIP/ROCm") endif() target_link_libraries(ggml-hip PRIVATE ggml-base hip::host roc::rocblas roc::hipblas) ggml-org-ggml-3678254/src/ggml-impl.h000066400000000000000000000565001512524704700172170ustar00rootroot00000000000000#pragma once // GGML internal header #include "ggml.h" #include "gguf.h" #include #include #include // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/ #include #include #include #ifdef __ARM_FEATURE_SVE #include #endif // __ARM_FEATURE_SVE #if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__) // if YCM cannot find , make a symbolic link to it, for example: // // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ // #include #endif #ifdef __cplusplus extern "C" { #endif void ggml_print_backtrace(void); #ifndef MIN # define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #ifndef MAX # define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif // required for mmap as gguf only guarantees 32-byte alignment #define TENSOR_ALIGNMENT 32 // static_assert should be a #define, but if it's not, // fall back to the _Static_assert C11 keyword. // if C99 - static_assert is noop // ref: https://stackoverflow.com/a/53923785/4039976 #ifndef __cplusplus #ifndef static_assert #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) #define static_assert(cond, msg) _Static_assert(cond, msg) #else #define static_assert(cond, msg) struct global_scope_noop_trick #endif #endif #endif static inline int ggml_up32(int n) { return (n + 31) & ~31; } //static inline int ggml_up64(int n) { // return (n + 63) & ~63; //} static inline int ggml_up(int n, int m) { // assert m is a power of 2 GGML_ASSERT((m & (m - 1)) == 0); return (n + m - 1) & ~(m - 1); } // TODO: move to ggml.h? (won't be able to inline) static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { if (a->type != b->type) { return false; } for (int i = 0; i < GGML_MAX_DIMS; i++) { if (a->ne[i] != b->ne[i]) { return false; } if (a->nb[i] != b->nb[i]) { return false; } } return true; } static bool ggml_op_is_empty(enum ggml_op op) { switch (op) { case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_TRANSPOSE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: return true; default: return false; } } static inline float ggml_compute_softplus_f32(float input) { return (input > 20.0f) ? input : logf(1 + expf(input)); } // // logging // GGML_ATTRIBUTE_FORMAT(2, 3) GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); #define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) #define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) #define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) #define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) #define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) #define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) #define GGML_DEBUG 0 #if (GGML_DEBUG >= 1) #define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__) #else #define GGML_PRINT_DEBUG(...) #endif #if (GGML_DEBUG >= 5) #define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__) #else #define GGML_PRINT_DEBUG_5(...) #endif #if (GGML_DEBUG >= 10) #define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__) #else #define GGML_PRINT_DEBUG_10(...) #endif // tensor params static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) { GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings assert(params_size <= GGML_MAX_OP_PARAMS); memcpy(tensor->op_params, params, params_size); } static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) { assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); return ((const int32_t *)(tensor->op_params))[i]; } static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) { assert(i < GGML_MAX_OP_PARAMS / sizeof(float)); return ((const float *)(tensor->op_params))[i]; } static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) { assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); ((int32_t *)(tensor->op_params))[i] = value; } static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) { assert(i < GGML_MAX_OP_PARAMS / sizeof(float)); ((float *)(tensor->op_params))[i] = value; } struct ggml_map_custom1_op_params { ggml_custom1_op_t fun; int n_tasks; void * userdata; }; struct ggml_map_custom2_op_params { ggml_custom2_op_t fun; int n_tasks; void * userdata; }; struct ggml_map_custom3_op_params { ggml_custom3_op_t fun; int n_tasks; void * userdata; }; struct ggml_custom_op_params { ggml_custom_op_t fun; int n_tasks; void * userdata; }; // bitset typedef uint32_t ggml_bitset_t; static_assert(sizeof(ggml_bitset_t) == 4, "bitset_t constants must be updated"); #define BITSET_SHR 5 // log2(sizeof(ggml_bitset_t)*8) #define BITSET_MASK (sizeof(ggml_bitset_t)*8 - 1) static size_t ggml_bitset_size(size_t n) { return (n + BITSET_MASK) >> BITSET_SHR; } static inline bool ggml_bitset_get(const ggml_bitset_t * bitset, size_t i) { return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK))); } static inline void ggml_bitset_set(ggml_bitset_t * bitset, size_t i) { bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK)); } static inline void ggml_bitset_clear(ggml_bitset_t * bitset, size_t i) { bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK)); } // hash set #define GGML_HASHSET_FULL ((size_t)-1) #define GGML_HASHSET_ALREADY_EXISTS ((size_t)-2) struct ggml_hash_set { size_t size; ggml_bitset_t * used; // whether or not the keys are in use i.e. set struct ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if ggml_bitset_get(used, i) }; struct ggml_hash_set ggml_hash_set_new(size_t size); void ggml_hash_set_free(struct ggml_hash_set * hash_set); // returns the minimum size for a hash set that can hold min_sz elements size_t ggml_hash_size(size_t min_sz); // remove all elements from the hash set void ggml_hash_set_reset(struct ggml_hash_set * hash_set); // returns true if key is in the hash set static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key); // returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key); // returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key); // return index, asserts if table is full static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key); // hash function for ggml_tensor static inline size_t ggml_hash(const struct ggml_tensor * p) { // the last 4 bits are always zero due to alignment return (size_t)(uintptr_t)p >> 4; } static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key) { size_t h = ggml_hash(key) % hash_set->size; // linear probing size_t i = h; while (ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) { i = (i + 1) % hash_set->size; if (i == h) { // visited all hash table entries -> not found return GGML_HASHSET_FULL; } } return i; } static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) { size_t i = ggml_hash_find(hash_set, key); return i != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, i); } static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) { size_t h = ggml_hash(key) % hash_set->size; // linear probing size_t i = h; do { if (!ggml_bitset_get(hash_set->used, i)) { ggml_bitset_set(hash_set->used, i); hash_set->keys[i] = key; return i; } if (hash_set->keys[i] == key) { return GGML_HASHSET_ALREADY_EXISTS; } i = (i + 1) % hash_set->size; } while (i != h); // visited all hash table entries -> not found GGML_ABORT("fatal error"); } static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) { size_t h = ggml_hash(key) % hash_set->size; // linear probing size_t i = h; do { if (!ggml_bitset_get(hash_set->used, i)) { ggml_bitset_set(hash_set->used, i); hash_set->keys[i] = key; return i; } if (hash_set->keys[i] == key) { return i; } i = (i + 1) % hash_set->size; } while (i != h); // visited all hash table entries -> not found GGML_ABORT("fatal error"); } // computation graph enum ggml_cgraph_eval_order { GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0, GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT, GGML_CGRAPH_EVAL_ORDER_COUNT }; struct ggml_cgraph { int size; // maximum number of nodes/leafs/grads/grad_accs int n_nodes; // number of nodes currently in use int n_leafs; // number of leafs currently in use struct ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes struct ggml_tensor ** grad_accs; // accumulators for node gradients struct ggml_tensor ** leafs; // tensors with constant data int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot struct ggml_hash_set visited_hash_set; enum ggml_cgraph_eval_order order; }; // returns a slice of cgraph with nodes [i0, i1) // the slice does not have leafs or gradients // if you need the gradients, get them from the original graph struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1); // ggml-alloc.c: true if the operation can reuse memory from its sources GGML_API bool ggml_op_can_inplace(enum ggml_op op); // Memory allocation GGML_API void * ggml_aligned_malloc(size_t size); GGML_API void ggml_aligned_free(void * ptr, size_t size); // FP16 <-> FP32 // ref: https://github.com/Maratyszcza/FP16 static inline float fp32_from_bits(uint32_t w) { union { uint32_t as_bits; float as_value; } fp32; fp32.as_bits = w; return fp32.as_value; } static inline uint32_t fp32_to_bits(float f) { union { float as_value; uint32_t as_bits; } fp32; fp32.as_value = f; return fp32.as_bits; } static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { const uint32_t w = (uint32_t) h << 16; const uint32_t sign = w & UINT32_C(0x80000000); const uint32_t two_w = w + w; const uint32_t exp_offset = UINT32_C(0xE0) << 23; #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) const float exp_scale = 0x1.0p-112f; #else const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); #endif const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; const uint32_t magic_mask = UINT32_C(126) << 23; const float magic_bias = 0.5f; const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; const uint32_t denormalized_cutoff = UINT32_C(1) << 27; const uint32_t result = sign | (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); return fp32_from_bits(result); } static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) const float scale_to_inf = 0x1.0p+112f; const float scale_to_zero = 0x1.0p-110f; #else const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); #endif float base = (fabsf(f) * scale_to_inf) * scale_to_zero; const uint32_t w = fp32_to_bits(f); const uint32_t shl1_w = w + w; const uint32_t sign = w & UINT32_C(0x80000000); uint32_t bias = shl1_w & UINT32_C(0xFF000000); if (bias < UINT32_C(0x71000000)) { bias = UINT32_C(0x71000000); } base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; const uint32_t bits = fp32_to_bits(base); const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); const uint32_t nonsign = exp_bits + mantissa_bits; return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); } #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) static inline float ggml_e8m0_to_fp32(uint8_t x) { uint32_t bits; // Stores the raw bit representation of the float // Handle special case for minimum exponent (denormalized float) if (x == 0) { // Bit pattern for 2^(-127): // - Sign bit: 0 (positive) // - Exponent: 0 (denormalized number) // - Mantissa: 0x400000 (0.5 in fractional form) // Value = 0.5 * 2^(-126) = 2^(-127) bits = 0x00400000; } // note: disabled as we don't need to handle NaNs //// Handle special case for NaN (all bits set) //else if (x == 0xFF) { // // Standard quiet NaN pattern: // // - Sign bit: 0 // // - Exponent: all 1s (0xFF) // // - Mantissa: 0x400000 (quiet NaN flag) // bits = 0x7FC00000; //} // Normalized values (most common case) else { // Construct normalized float by shifting exponent into position: // - Exponent field: 8 bits (positions 30-23) // - Mantissa: 0 (implicit leading 1) // Value = 2^(x - 127) bits = (uint32_t) x << 23; } float result; // Final float value // Safely reinterpret bit pattern as float without type-punning issues memcpy(&result, &bits, sizeof(float)); return result; } // Equal to ggml_e8m0_to_fp32/2 // Useful with MXFP4 quantization since the E0M2 values are doubled static inline float ggml_e8m0_to_fp32_half(uint8_t x) { uint32_t bits; // For x < 2: use precomputed denormal patterns if (x < 2) { // 0x00200000 = 2^(-128), 0x00400000 = 2^(-127) bits = 0x00200000 << x; } // For x >= 2: normalized exponent adjustment else { // 0.5 * 2^(x-127) = 2^(x-128) = normalized with exponent (x-1) bits = (uint32_t)(x - 1) << 23; } // Note: NaNs are not handled here float result; memcpy(&result, &bits, sizeof(float)); return result; } #define GGML_E8M0_TO_FP32(x) ggml_e8m0_to_fp32(x) #define GGML_E8M0_TO_FP32_HALF(x) ggml_e8m0_to_fp32_half(x) /** * Converts brain16 to float32. * * The bfloat16 floating point format has the following structure: * * ┌sign * │ * │ ┌exponent * │ │ * │ │ ┌mantissa * │ │ │ * │┌──┴───┐┌─┴───┐ * 0b0000000000000000 brain16 * * Since bf16 has the same number of exponent bits as a 32bit float, * encoding and decoding numbers becomes relatively straightforward. * * ┌sign * │ * │ ┌exponent * │ │ * │ │ ┌mantissa * │ │ │ * │┌──┴───┐┌─┴───────────────────┐ * 0b00000000000000000000000000000000 IEEE binary32 * * For comparison, the standard fp16 format has fewer exponent bits. * * ┌sign * │ * │ ┌exponent * │ │ * │ │ ┌mantissa * │ │ │ * │┌─┴─┐┌─┴──────┐ * 0b0000000000000000 IEEE binary16 * * @see IEEE 754-2008 */ static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) { union { float f; uint32_t i; } u; u.i = (uint32_t)h.bits << 16; return u.f; } /** * Converts float32 to brain16. * * This is binary identical with Google Brain float conversion. * Floats shall round to nearest even, and NANs shall be quiet. * Subnormals aren't flushed to zero, except perhaps when used. * This code should vectorize nicely if using modern compilers. */ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { ggml_bf16_t h; union { float f; uint32_t i; } u; u.f = s; if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */ h.bits = (u.i >> 16) | 64; /* force to quiet */ return h; } h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16; return h; } #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) static inline int32_t ggml_node_get_use_count(const struct ggml_cgraph * cgraph, int node_idx) { const struct ggml_tensor * node = cgraph->nodes[node_idx]; size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos)) { return 0; } return cgraph->use_counts[hash_pos]; } // return true if the node's results are only used by N other nodes // and can be fused into their calculations. static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) { const struct ggml_tensor * node = cgraph->nodes[node_idx]; // check the use count against how many we're replacing if (ggml_node_get_use_count(cgraph, node_idx) != n_uses) { return false; } // if node is a view, some other node might be using the intermediate result // via the view source. if (node->view_src) { return false; } // If the user requested output for the node, can't fuse if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { return false; } return true; } // Returns true if nodes with indices { node_idxs } are the sequence of ggml_ops in ops[] // and are fusable. Nodes are considered fusable according to this function if: // - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses). // - all nodes except the last are a src of the following node. // - all nodes are the same shape. // TODO: Consider allowing GGML_OP_NONE nodes in between static inline bool ggml_can_fuse_ext(const struct ggml_cgraph * cgraph, const int * node_idxs, const enum ggml_op * ops, int num_ops) { for (int i = 0; i < num_ops; ++i) { if (node_idxs[i] >= cgraph->n_nodes) { return false; } struct ggml_tensor * node = cgraph->nodes[node_idxs[i]]; if (node->op != ops[i]) { return false; } if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idxs[i], 1)) { return false; } if (i > 0) { struct ggml_tensor * prev = cgraph->nodes[node_idxs[i - 1]]; if (node->src[0] != prev && node->src[1] != prev) { return false; } if (!ggml_are_same_shape(node, prev)) { return false; } } } return true; } // same as above, for sequential indices starting at node_idx static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) { assert(num_ops < 32); if (node_idx + num_ops > cgraph->n_nodes) { return false; } int idxs[32]; for (int i = 0; i < num_ops; ++i) { idxs[i] = node_idx + i; } return ggml_can_fuse_ext(cgraph, idxs, ops, num_ops); } GGML_API bool ggml_can_fuse_subgraph_ext(const struct ggml_cgraph * cgraph, const int * node_idxs, int count, const enum ggml_op * ops, const int * outputs, int num_outputs); // Returns true if the subgraph formed by {node_idxs} can be fused // checks whethers all nodes which are not part of outputs can be elided // by checking if their num_uses are confined to the subgraph static inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph, int node_idx, int count, const enum ggml_op * ops, const int * outputs, int num_outputs) { GGML_ASSERT(count < 32); if (node_idx + count > cgraph->n_nodes) { return false; } int idxs[32]; for (int i = 0; i < count; ++i) { idxs[i] = node_idx + i; } return ggml_can_fuse_subgraph_ext(cgraph, idxs, count, ops, outputs, num_outputs); } #ifdef __cplusplus } #endif #ifdef __cplusplus #include #include #include // nicer C++ syntax for ggml_can_fuse inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops) { return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size()); } inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph, int start_idx, std::initializer_list ops, std::initializer_list outputs = {}) { return ggml_can_fuse_subgraph(cgraph, start_idx, ops.size(), ops.begin(), outputs.begin(), outputs.size()); } // Return true if the edges in the graph match expectations. inline bool ggml_check_edges(const struct ggml_cgraph * cgraph, int start_idx, std::initializer_list> edges) { for (const auto & edge : edges) { int dst_node = edge[0]; int src_idx = edge[1]; int src_node = edge[2]; if (cgraph->nodes[start_idx + dst_node]->src[src_idx] != cgraph->nodes[start_idx + src_node]) { return false; } } return true; } // expose GGUF internals for test code GGML_API size_t gguf_type_size(enum gguf_type type); GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, std::vector & buf, bool only_meta); #endif // __cplusplus ggml-org-ggml-3678254/src/ggml-metal/000077500000000000000000000000001512524704700172015ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-metal/CMakeLists.txt000066400000000000000000000130741512524704700217460ustar00rootroot00000000000000find_library(FOUNDATION_LIBRARY Foundation REQUIRED) find_library(METAL_FRAMEWORK Metal REQUIRED) find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) message(STATUS "Metal framework found") ggml_add_backend_library(ggml-metal ggml-metal.cpp ggml-metal-device.m ggml-metal-device.cpp ggml-metal-common.cpp ggml-metal-context.m ggml-metal-ops.cpp ) target_link_libraries(ggml-metal PRIVATE ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK} ) if (GGML_METAL_NDEBUG) add_compile_definitions(GGML_METAL_NDEBUG) endif() # copy metal files to bin directory configure_file(../ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY) configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY) configure_file(ggml-metal-impl.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal-impl.h COPYONLY) set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h") if (GGML_METAL_EMBED_LIBRARY) enable_language(ASM) add_compile_definitions(GGML_METAL_EMBED_LIBRARY) set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") set(METALLIB_IMPL "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal-impl.h") file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated") # merge ggml-common.h and ggml-metal.metal into a single file set(METALLIB_EMBED_ASM "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.s") set(METALLIB_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal") set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp") add_custom_command( OUTPUT "${METALLIB_EMBED_ASM}" COMMAND echo "Embedding Metal library" COMMAND sed -e "/__embed_ggml-common.h__/r ${METALLIB_COMMON}" -e "/__embed_ggml-common.h__/d" < "${METALLIB_SOURCE}" > "${METALLIB_SOURCE_EMBED_TMP}" COMMAND sed -e "/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}" -e "/\#include \"ggml-metal-impl.h\"/d" < "${METALLIB_SOURCE_EMBED_TMP}" > "${METALLIB_SOURCE_EMBED}" COMMAND echo ".section __DATA,__ggml_metallib" > "${METALLIB_EMBED_ASM}" COMMAND echo ".globl _ggml_metallib_start" >> "${METALLIB_EMBED_ASM}" COMMAND echo "_ggml_metallib_start:" >> "${METALLIB_EMBED_ASM}" COMMAND echo .incbin "\"${METALLIB_SOURCE_EMBED}\"" >> "${METALLIB_EMBED_ASM}" COMMAND echo ".globl _ggml_metallib_end" >> "${METALLIB_EMBED_ASM}" COMMAND echo "_ggml_metallib_end:" >> "${METALLIB_EMBED_ASM}" DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h COMMENT "Generate assembly for embedded Metal library" VERBATIM ) target_sources(ggml-metal PRIVATE "${METALLIB_EMBED_ASM}") else() if (GGML_METAL_SHADER_DEBUG) # custom command to do the following: # xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air # xcrun -sdk macosx metallib ggml-metal.air -o default.metallib # # note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works # disabling fast math is needed in order to pass tests/test-backend-ops # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1 # note: unfortunately, we have to call it default.metallib instead of ggml.metallib # ref: https://github.com/ggerganov/whisper.cpp/issues/1720 # note: adding -g causes segmentation fault during compile #set(XC_FLAGS -fno-fast-math -fno-inline -g) set(XC_FLAGS -fno-fast-math -fno-inline) else() set(XC_FLAGS -O3) endif() # Append macOS metal versioning flags if (GGML_METAL_MACOSX_VERSION_MIN) message(STATUS "Adding -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN} flag to metal compilation") list (APPEND XC_FLAGS -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN}) endif() if (GGML_METAL_STD) message(STATUS "Adding -std=${GGML_METAL_STD} flag to metal compilation") list (APPEND XC_FLAGS -std=${GGML_METAL_STD}) endif() add_custom_command( OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - | xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal DEPENDS ggml-metal.metal ${METALLIB_COMMON} COMMENT "Compiling Metal kernels" ) # FIXME: only add to the ggml-metal target? add_custom_target( ggml-metal-lib ALL DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib ) endif() # GGML_METAL_EMBED_LIBRARY if (NOT GGML_METAL_EMBED_LIBRARY) install( FILES src/ggml-metal/ggml-metal.metal PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ DESTINATION ${CMAKE_INSTALL_BINDIR}) install( FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib DESTINATION ${CMAKE_INSTALL_BINDIR} ) endif() ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-common.cpp000066400000000000000000000325101512524704700232220ustar00rootroot00000000000000#include "ggml-metal-common.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include // represents a memory range (i.e. an interval from a starting address p0 to an ending address p1 in a given buffer pb) // the type indicates whether it is a source range (i.e. ops read data from it) or a destination range (i.e. ops write data to it) struct ggml_mem_range { uint64_t pb; // buffer id uint64_t p0; // begin uint64_t p1; // end ggml_mem_range_type pt; }; struct ggml_mem_ranges { std::vector ranges; int debug = 0; }; ggml_mem_ranges_t ggml_mem_ranges_init(int debug) { auto * res = new ggml_mem_ranges; res->ranges.reserve(256); res->debug = debug; return res; } void ggml_mem_ranges_free(ggml_mem_ranges_t mrs) { delete mrs; } void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs) { mrs->ranges.clear(); } static bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, ggml_mem_range mr) { mrs->ranges.push_back(mr); return true; } static ggml_mem_range ggml_mem_range_from_tensor(const ggml_tensor * tensor, ggml_mem_range_type pt) { // always use the base tensor tensor = tensor->view_src ? tensor->view_src : tensor; GGML_ASSERT(!tensor->view_src); ggml_mem_range mr; if (tensor->buffer) { // when the tensor is allocated, use the actual memory address range in the buffer // // take the actual allocated size with ggml_backend_buft_get_alloc_size() // this can be larger than the tensor size if the buffer type allocates extra memory // ref: https://github.com/ggml-org/llama.cpp/pull/15966 mr = { /*.pb =*/ (uint64_t) tensor->buffer, /*.p0 =*/ (uint64_t) tensor->data, /*.p1 =*/ (uint64_t) tensor->data + ggml_backend_buft_get_alloc_size(tensor->buffer->buft, tensor), /*.pt =*/ pt, }; } else { // otherwise, the pointer address is used as an unique id of the memory ranges // that the tensor will be using when it is allocated mr = { /*.pb =*/ (uint64_t) tensor, /*.p0 =*/ 0, // /*.p1 =*/ 1024, // [0, 1024) is a dummy range, not used /*.pt =*/ pt, }; }; return mr; } static ggml_mem_range ggml_mem_range_from_tensor_src(const ggml_tensor * tensor) { return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_SRC); } static ggml_mem_range ggml_mem_range_from_tensor_dst(const ggml_tensor * tensor) { return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_DST); } static bool ggml_mem_ranges_add_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { GGML_ASSERT(tensor); ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor); if (mrs->debug > 2) { GGML_LOG_DEBUG("%s: add src range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1); } return ggml_mem_ranges_add(mrs, mr); } static bool ggml_mem_ranges_add_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { GGML_ASSERT(tensor); ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor); if (mrs->debug > 2) { GGML_LOG_DEBUG("%s: add dst range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1); } return ggml_mem_ranges_add(mrs, mr); } bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (tensor->src[i]) { ggml_mem_ranges_add_src(mrs, tensor->src[i]); } } return ggml_mem_ranges_add_dst(mrs, tensor); } static bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, ggml_mem_range mr) { for (size_t i = 0; i < mrs->ranges.size(); i++) { const auto & cmp = mrs->ranges[i]; // two memory ranges cannot intersect if they are in different buffers if (mr.pb != cmp.pb) { continue; } // intersecting source ranges are allowed if (mr.pt == MEM_RANGE_TYPE_SRC && cmp.pt == MEM_RANGE_TYPE_SRC) { continue; } if (mr.p0 < cmp.p1 && mr.p1 >= cmp.p0) { if (mrs->debug > 2) { GGML_LOG_DEBUG("%s: the %s range buf=%lld, [%lld, %lld) overlaps with a previous %s range buf=%lld, [%lld, %lld)\n", __func__, mr.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst", mr.pb, mr.p0, mr.p1, cmp.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst", cmp.pb, cmp.p0, cmp.p1); } return false; } } return true; } static bool ggml_mem_ranges_check_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { GGML_ASSERT(tensor); ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor); const bool res = ggml_mem_ranges_check(mrs, mr); return res; } static bool ggml_mem_ranges_check_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { GGML_ASSERT(tensor); ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor); const bool res = ggml_mem_ranges_check(mrs, mr); return res; } bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (tensor->src[i]) { if (!ggml_mem_ranges_check_src(mrs, tensor->src[i])) { return false; } } } return ggml_mem_ranges_check_dst(mrs, tensor); } struct node_info { ggml_tensor * node; std::vector fused; ggml_op op() const { return node->op; } const ggml_tensor * dst() const { return fused.empty() ? node : fused.back(); } bool is_empty() const { return ggml_op_is_empty(node->op); } void add_fused(ggml_tensor * t) { fused.push_back(t); } }; static std::vector ggml_metal_graph_optimize_reorder(const std::vector & nodes) { // helper to add node src and dst ranges const auto & h_add = [](ggml_mem_ranges_t mrs, const node_info & node) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (node.node->src[i]) { if (!ggml_mem_ranges_add_src(mrs, node.node->src[i])) { return false; } } } // keep track of the sources of the fused nodes as well for (const auto * fused : node.fused) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (fused->src[i]) { if (!ggml_mem_ranges_add_src(mrs, fused->src[i])) { return false; } } } } return ggml_mem_ranges_add_dst(mrs, node.dst()); }; // helper to check if a node can run concurrently with the existing set of nodes const auto & h_check = [](ggml_mem_ranges_t mrs, const node_info & node) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (node.node->src[i]) { if (!ggml_mem_ranges_check_src(mrs, node.node->src[i])) { return false; } } } for (const auto * fused : node.fused) { for (int i = 0; i < GGML_MAX_SRC; i++) { if (fused->src[i]) { if (!ggml_mem_ranges_check_src(mrs, fused->src[i])) { return false; } } } } return ggml_mem_ranges_check_dst(mrs, node.dst()); }; // perform reorders only across these types of ops // can be expanded when needed const auto & h_safe = [](ggml_op op) { switch (op) { case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: case GGML_OP_ROPE: case GGML_OP_NORM: case GGML_OP_RMS_NORM: case GGML_OP_GROUP_NORM: case GGML_OP_SUM_ROWS: case GGML_OP_MUL: case GGML_OP_ADD: case GGML_OP_DIV: case GGML_OP_GLU: case GGML_OP_SCALE: case GGML_OP_GET_ROWS: case GGML_OP_CPY: case GGML_OP_SET_ROWS: return true; default: return ggml_op_is_empty(op); } }; const int n = nodes.size(); std::vector res; res.reserve(n); std::vector used(n, false); // the memory ranges for the set of currently concurrent nodes ggml_mem_ranges_t mrs0 = ggml_mem_ranges_init(0); // the memory ranges for the set of nodes that haven't been processed yet, when looking forward for a node to reorder ggml_mem_ranges_t mrs1 = ggml_mem_ranges_init(0); for (int i0 = 0; i0 < n; i0++) { if (used[i0]) { continue; } const auto & node0 = nodes[i0]; // the node is not concurrent with the existing concurrent set, so we have to "put a barrier" (i.e reset mrs0) // but before we do that, look forward for some other nodes that can be added to the concurrent set mrs0 // // note: we can always add empty nodes to the concurrent set as they don't read nor write anything if (!node0.is_empty() && !h_check(mrs0, node0)) { // this will hold the set of memory ranges from the nodes that haven't been processed yet // if a node is not concurrent with this set, we cannot reorder it ggml_mem_ranges_reset(mrs1); // initialize it with the current node h_add(mrs1, node0); // that many nodes forward to search for a concurrent node constexpr int N_FORWARD = 8; for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) { if (used[i1]) { continue; } const auto & node1 = nodes[i1]; // disallow reordering of certain ops if (!h_safe(node1.op())) { break; } const bool is_empty = node1.is_empty(); // to reorder a node and add it to the concurrent set, it has to be: // + empty or concurrent with all nodes in the existing concurrent set (mrs0) // + concurrent with all nodes prior to it that haven't been processed yet (mrs1) if ((is_empty || h_check(mrs0, node1)) && h_check(mrs1, node1)) { // add the node to the existing concurrent set (i.e. reorder it for early execution) h_add(mrs0, node1); res.push_back(i1); // mark as used, so we skip re-processing it later used[i1] = true; } else { // expand the set of nodes that haven't been processed yet h_add(mrs1, node1); } } // finalize the concurrent set and begin a new one ggml_mem_ranges_reset(mrs0); } // expand the concurrent set with the current node { h_add(mrs0, node0); res.push_back(i0); } } ggml_mem_ranges_free(mrs0); ggml_mem_ranges_free(mrs1); return res; } void ggml_graph_optimize(ggml_cgraph * gf) { constexpr int MAX_FUSE = 16; const int n = gf->n_nodes; enum ggml_op ops[MAX_FUSE]; std::vector nodes; nodes.reserve(gf->n_nodes); // fuse nodes: // we don't want to make reorders that break fusing, so we first pack all fusable tensors // and perform the reorder over the fused nodes. after the reorder is done, we unfuse for (int i = 0; i < n; i++) { node_info node = { /*.node =*/ gf->nodes[i], /*.fused =*/ {}, }; // fuse only ops that start with these operations // can be expanded when needed if (node.op() == GGML_OP_ADD || node.op() == GGML_OP_NORM || node.op() == GGML_OP_RMS_NORM) { ops[0] = node.op(); int f = i + 1; while (f < n && f < i + MAX_FUSE) { // conservatively allow fusing only these ops // can be expanded when needed if (gf->nodes[f]->op != GGML_OP_ADD && gf->nodes[f]->op != GGML_OP_MUL && gf->nodes[f]->op != GGML_OP_NORM && gf->nodes[f]->op != GGML_OP_RMS_NORM) { break; } ops[f - i] = gf->nodes[f]->op; f++; } f -= i; for (; f > 1; f--) { if (ggml_can_fuse(gf, i, ops, f)) { break; } } // add the fused tensors into the node info so we can unfuse them later for (int k = 1; k < f; k++) { ++i; // the .dst() becomes the last fused tensor node.add_fused(gf->nodes[i]); } } nodes.push_back(std::move(node)); } #if 1 // reorder to improve concurrency const auto order = ggml_metal_graph_optimize_reorder(nodes); #else std::vector order(nodes.size()); for (size_t i = 0; i < nodes.size(); i++) { order[i] = i; } #endif // unfuse { int j = 0; for (const auto i : order) { const auto & node = nodes[i]; gf->nodes[j++] = node.node; for (auto * fused : node.fused) { gf->nodes[j++] = fused; } } } } ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-common.h000066400000000000000000000033521512524704700226710ustar00rootroot00000000000000// helper functions for ggml-metal that are too difficult to implement in Objective-C #pragma once #include #ifdef __cplusplus extern "C" { #endif struct ggml_tensor; struct ggml_cgraph; enum ggml_mem_range_type { MEM_RANGE_TYPE_SRC = 0, MEM_RANGE_TYPE_DST = 1, }; // a helper object that can be used for reordering operations to improve concurrency // // the fundamental idea is that a set of tasks (either ggml ops, or something else) can run concurrently if they // don't write to a memory that is being read by another task or written to by another task in the set // // with this structure, we can add tasks to the set, setting memory constraints. we can also check if a new task // can be added to the set without violating the constraints (i.e. if it can be executed concurrently with the // tasks already in the set) // typedef struct ggml_mem_ranges * ggml_mem_ranges_t; ggml_mem_ranges_t ggml_mem_ranges_init(int debug); void ggml_mem_ranges_free(ggml_mem_ranges_t mrs); // remove all ranges from the set void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs); // add src or dst ranges to track bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor); // return false if: // - new src range overlaps with any existing dst range // - new dst range overlaps with any existing range (src or dst) bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor); // reorder the nodes in the graph to improve concurrency, while respecting fusion // // note: this implementation is generic and not specific to metal // if it proves to work well, we can start using it for other backends in the future void ggml_graph_optimize(struct ggml_cgraph * gf); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-context.h000066400000000000000000000020441512524704700230620ustar00rootroot00000000000000#pragma once #include "ggml-metal-device.h" #ifdef __cplusplus extern "C" { #endif // // backend context // typedef struct ggml_metal * ggml_metal_t; ggml_metal_t ggml_metal_init(ggml_metal_device_t dev); void ggml_metal_free(ggml_metal_t ctx); void ggml_metal_synchronize(ggml_metal_t ctx); void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); enum ggml_status ggml_metal_graph_compute (ggml_metal_t ctx, struct ggml_cgraph * gf); void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf); void ggml_metal_set_n_cb (ggml_metal_t ctx, int n_cb); void ggml_metal_set_abort_callback (ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data); bool ggml_metal_supports_family (ggml_metal_t ctx, int family); void ggml_metal_capture_next_compute(ggml_metal_t ctx); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-context.m000066400000000000000000000517021512524704700230740ustar00rootroot00000000000000#import "ggml-metal-context.h" #import "ggml-impl.h" #import "ggml-backend-impl.h" #import "ggml-metal-impl.h" #import "ggml-metal-common.h" #import "ggml-metal-ops.h" #import #import #undef MIN #undef MAX #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) // max number of MTLCommandBuffer used to submit a graph for processing #define GGML_METAL_MAX_COMMAND_BUFFERS 8 struct ggml_metal_command_buffer { id obj; }; struct ggml_metal { ggml_metal_device_t dev; ggml_metal_library_t lib; dispatch_queue_t d_queue; // additional, inference-time compiled pipelines ggml_metal_pipelines_t pipelines_ext; bool use_fusion; bool use_concurrency; bool use_graph_optimize; int debug_graph; int debug_fusion; // how many times a given op was fused uint64_t fuse_cnt[GGML_OP_COUNT]; // capture state bool capture_next_compute; bool capture_started; id capture_scope; // command buffer state int n_cb; // number of extra threads used to submit the command buffers int n_nodes_0; // number of nodes submitted by the main thread int n_nodes_1; // remaining number of nodes submitted by the n_cb threads int n_nodes_per_cb; struct ggml_cgraph * gf; // the callback given to the thread pool void (^encode_async)(size_t ith); // n_cb command buffers + 1 used by the main thread struct ggml_metal_command_buffer cmd_bufs[GGML_METAL_MAX_COMMAND_BUFFERS + 1]; // extra command buffers for things like getting, setting and copying tensors NSMutableArray * cmd_bufs_ext; // the last command buffer queued into the Metal queue with operations relevant to the current Metal backend id cmd_buf_last; // abort ggml_metal_graph_compute if callback returns true ggml_abort_callback abort_callback; void * abort_callback_data; }; ggml_metal_t ggml_metal_init(ggml_metal_device_t dev) { GGML_LOG_INFO("%s: allocating\n", __func__); #if TARGET_OS_OSX && !GGML_METAL_NDEBUG // Show all the Metal device instances in the system NSArray * devices = MTLCopyAllDevices(); for (id device in devices) { GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]); } [devices release]; // since it was created by a *Copy* C method #endif // init context ggml_metal_t res = calloc(1, sizeof(struct ggml_metal)); id device = ggml_metal_device_get_obj(dev); GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); // TODO: would it be better to have one queue for the backend and one queue for the device? // the graph encoders and async ops would use the backend queue while the sync ops would use the device queue? //res->queue = [device newCommandQueue]; [TAG_QUEUE_PER_BACKEND] id queue = ggml_metal_device_get_queue(dev); if (queue == nil) { GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__); return NULL; } res->dev = dev; res->lib = ggml_metal_device_get_library(dev); if (res->lib == NULL) { GGML_LOG_WARN("%s: the device does not have a precompiled Metal library - this is unexpected\n", __func__); GGML_LOG_WARN("%s: will try to compile it on the fly\n", __func__); res->lib = ggml_metal_library_init(dev); if (res->lib == NULL) { GGML_LOG_ERROR("%s: error: failed to initialize the Metal library\n", __func__); free(res); return NULL; } } //const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev); res->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); res->use_fusion = getenv("GGML_METAL_FUSION_DISABLE") == nil; res->use_concurrency = getenv("GGML_METAL_CONCURRENCY_DISABLE") == nil; { const char * val = getenv("GGML_METAL_GRAPH_DEBUG"); res->debug_graph = val ? atoi(val) : 0; } { const char * val = getenv("GGML_METAL_FUSION_DEBUG"); res->debug_fusion = val ? atoi(val) : 0; } res->use_graph_optimize = true; if (getenv("GGML_METAL_GRAPH_OPTIMIZE_DISABLE") != NULL) { res->use_graph_optimize = false; } memset(res->fuse_cnt, 0, sizeof(res->fuse_cnt)); GGML_LOG_INFO("%s: use fusion = %s\n", __func__, res->use_fusion ? "true" : "false"); GGML_LOG_INFO("%s: use concurrency = %s\n", __func__, res->use_concurrency ? "true" : "false"); GGML_LOG_INFO("%s: use graph optimize = %s\n", __func__, res->use_graph_optimize ? "true" : "false"); res->capture_next_compute = false; res->capture_started = false; res->capture_scope = nil; res->gf = nil; res->encode_async = nil; for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) { res->cmd_bufs[i].obj = nil; } res->cmd_bufs_ext = [[NSMutableArray alloc] init]; res->cmd_buf_last = nil; res->pipelines_ext = ggml_metal_pipelines_init(); return res; } void ggml_metal_free(ggml_metal_t ctx) { GGML_LOG_INFO("%s: deallocating\n", __func__); for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) { if (ctx->cmd_bufs[i].obj) { [ctx->cmd_bufs[i].obj release]; } } for (int i = 0; i < (int) ctx->cmd_bufs_ext.count; ++i) { if (ctx->cmd_bufs_ext[i]) { [ctx->cmd_bufs_ext[i] release]; } } [ctx->cmd_bufs_ext removeAllObjects]; [ctx->cmd_bufs_ext release]; if (ctx->pipelines_ext) { ggml_metal_pipelines_free(ctx->pipelines_ext); ctx->pipelines_ext = nil; } if (ctx->debug_fusion > 0) { GGML_LOG_DEBUG("%s: fusion stats:\n", __func__); for (int i = 0; i < GGML_OP_COUNT; i++) { if (ctx->fuse_cnt[i] == 0) { continue; } // note: cannot use ggml_log here GGML_LOG_DEBUG("%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]); } } Block_release(ctx->encode_async); //[ctx->queue release]; // [TAG_QUEUE_PER_BACKEND] dispatch_release(ctx->d_queue); free(ctx); } void ggml_metal_synchronize(ggml_metal_t ctx) { // wait for any backend operations to finish if (ctx->cmd_buf_last) { [ctx->cmd_buf_last waitUntilCompleted]; ctx->cmd_buf_last = nil; } // check status of all command buffers { const int n_cb = ctx->n_cb; for (int cb_idx = 0; cb_idx <= n_cb; ++cb_idx) { id cmd_buf = ctx->cmd_bufs[cb_idx].obj; if (!cmd_buf) { continue; } MTLCommandBufferStatus status = [cmd_buf status]; if (status != MTLCommandBufferStatusCompleted) { GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, cb_idx, (int) status); if (status == MTLCommandBufferStatusError) { GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]); } GGML_ABORT("fatal error"); } } } // release any completed extra command buffers if (ctx->cmd_bufs_ext.count > 0) { for (size_t i = 0; i < ctx->cmd_bufs_ext.count; ++i) { id cmd_buf = ctx->cmd_bufs_ext[i]; MTLCommandBufferStatus status = [cmd_buf status]; if (status != MTLCommandBufferStatusCompleted) { GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, (int) i, (int) status); if (status == MTLCommandBufferStatusError) { GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]); } GGML_ABORT("fatal error"); } [cmd_buf release]; } [ctx->cmd_bufs_ext removeAllObjects]; } } static struct ggml_metal_buffer_id ggml_metal_get_buffer_id(const struct ggml_tensor * t) { if (!t) { return (struct ggml_metal_buffer_id) { nil, 0 }; } ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer; return ggml_metal_buffer_get_id(buffer->context, t); } void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { @autoreleasepool { // wrap the source data into a Metal buffer id device = ggml_metal_device_get_obj(ctx->dev); id buf_src = [device newBufferWithBytes:data length:size options:MTLResourceStorageModeShared]; GGML_ASSERT(buf_src); struct ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(tensor); if (bid_dst.metal == nil) { GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name); } bid_dst.offs += offset; // queue the copy operation into the queue of the Metal context // this will be queued at the end, after any currently ongoing GPU operations id queue = ggml_metal_device_get_queue(ctx->dev); id cmd_buf = [queue commandBuffer]; id encoder = [cmd_buf blitCommandEncoder]; [encoder copyFromBuffer:buf_src sourceOffset:0 toBuffer:bid_dst.metal destinationOffset:bid_dst.offs size:size]; [encoder endEncoding]; [cmd_buf commit]; [buf_src release]; // do not wait here for completion //[cmd_buf waitUntilCompleted]; // instead, remember a reference to the command buffer and wait for it later if needed [ctx->cmd_bufs_ext addObject:cmd_buf]; ctx->cmd_buf_last = cmd_buf; [cmd_buf retain]; } } void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { @autoreleasepool { id device = ggml_metal_device_get_obj(ctx->dev); id buf_dst = [device newBufferWithBytesNoCopy:data length:size options:MTLResourceStorageModeShared deallocator:nil]; GGML_ASSERT(buf_dst); struct ggml_metal_buffer_id bid_src = ggml_metal_get_buffer_id(tensor); if (bid_src.metal == nil) { GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name); } bid_src.offs += offset; // queue the copy operation into the queue of the Metal context // this will be queued at the end, after any currently ongoing GPU operations id queue = ggml_metal_device_get_queue(ctx->dev); id cmd_buf = [queue commandBuffer]; id encoder = [cmd_buf blitCommandEncoder]; [encoder copyFromBuffer:bid_src.metal sourceOffset:bid_src.offs toBuffer:buf_dst destinationOffset:0 size:size]; [encoder endEncoding]; [cmd_buf commit]; [buf_dst release]; // do not wait here for completion //[cmd_buf waitUntilCompleted]; // instead, remember a reference to the command buffer and wait for it later if needed [ctx->cmd_bufs_ext addObject:cmd_buf]; ctx->cmd_buf_last = cmd_buf; [cmd_buf retain]; } } enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * gf) { // number of nodes encoded by the main thread (empirically determined) const int n_main = 64; // number of threads in addition to the main thread const int n_cb = ctx->n_cb; // keep the memory wired ggml_metal_device_rsets_keep_alive(ctx->dev); // submit the ggml compute graph to the GPU by creating command buffers and encoding the ops in them // the first n_nodes_0 are encoded and submitted for processing directly by the calling thread // while these nodes are processing, we start n_cb threads to enqueue the rest of the nodes // each thread creates it's own command buffer and enqueues the ops in parallel // // tests on M1 Pro and M2 Ultra using LLaMA models, show that optimal values for n_cb are 1 or 2 @autoreleasepool { ctx->gf = gf; ctx->n_nodes_0 = MIN(n_main, gf->n_nodes); ctx->n_nodes_1 = gf->n_nodes - ctx->n_nodes_0; ctx->n_nodes_per_cb = (ctx->n_nodes_1 + ctx->n_cb - 1) / ctx->n_cb; const bool use_capture = ctx->capture_next_compute; if (use_capture) { ctx->capture_next_compute = false; // make sure all previous computations have finished before starting the capture if (ctx->cmd_buf_last) { [ctx->cmd_buf_last waitUntilCompleted]; ctx->cmd_buf_last = nil; } if (!ctx->capture_started) { // create capture scope id device = ggml_metal_device_get_obj(ctx->dev); ctx->capture_scope = [[MTLCaptureManager sharedCaptureManager] newCaptureScopeWithDevice:device]; MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new]; descriptor.captureObject = ctx->capture_scope; descriptor.destination = MTLCaptureDestinationGPUTraceDocument; descriptor.outputURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"/tmp/perf-metal.gputrace"]]; NSError * error = nil; if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) { GGML_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]); } else { [ctx->capture_scope beginScope]; ctx->capture_started = true; } } } // short-hand id queue = ggml_metal_device_get_queue(ctx->dev); // the main thread commits the first few commands immediately // cmd_buf[n_cb] { id cmd_buf = [queue commandBufferWithUnretainedReferences]; [cmd_buf retain]; if (ctx->cmd_bufs[n_cb].obj) { [ctx->cmd_bufs[n_cb].obj release]; } ctx->cmd_bufs[n_cb].obj = cmd_buf; [cmd_buf enqueue]; ctx->encode_async(n_cb); } // remember the command buffer for the next iteration ctx->cmd_buf_last = ctx->cmd_bufs[n_cb].obj; // prepare the rest of the command buffers asynchronously (optional) // cmd_buf[0.. n_cb) for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) { id cmd_buf = [queue commandBufferWithUnretainedReferences]; [cmd_buf retain]; if (ctx->cmd_bufs[cb_idx].obj) { [ctx->cmd_bufs[cb_idx].obj release]; } ctx->cmd_bufs[cb_idx].obj = cmd_buf; // always enqueue the first two command buffers // enqueue all of the command buffers if we don't need to abort if (cb_idx < 2 || ctx->abort_callback == NULL) { [cmd_buf enqueue]; // update the pointer to the last queued command buffer // this is needed to implement synchronize() ctx->cmd_buf_last = cmd_buf; } } dispatch_apply(n_cb, ctx->d_queue, ctx->encode_async); // for debugging: block until graph is computed //[ctx->cmd_buf_last waitUntilCompleted]; // enter here only when capturing in order to wait for all computation to finish // otherwise, we leave the graph to compute asynchronously if (!use_capture && ctx->capture_started) { // wait for completion and check status of each command buffer // needed to detect if the device ran out-of-memory for example (#1881) { id cmd_buf = ctx->cmd_bufs[n_cb].obj; [cmd_buf waitUntilCompleted]; MTLCommandBufferStatus status = [cmd_buf status]; if (status != MTLCommandBufferStatusCompleted) { GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, n_cb, status); if (status == MTLCommandBufferStatusError) { GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]); } return GGML_STATUS_FAILED; } } for (int i = 0; i < n_cb; ++i) { id cmd_buf = ctx->cmd_bufs[i].obj; [cmd_buf waitUntilCompleted]; MTLCommandBufferStatus status = [cmd_buf status]; if (status != MTLCommandBufferStatusCompleted) { GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); if (status == MTLCommandBufferStatusError) { GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]); } return GGML_STATUS_FAILED; } id next_buffer = (i + 1 < n_cb ? ctx->cmd_bufs[i + 1].obj : nil); if (!next_buffer) { continue; } const bool next_queued = ([next_buffer status] != MTLCommandBufferStatusNotEnqueued); if (next_queued) { continue; } if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) { GGML_LOG_INFO("%s: command buffer %d aborted", __func__, i); return GGML_STATUS_ABORTED; } [next_buffer commit]; } [ctx->capture_scope endScope]; [[MTLCaptureManager sharedCaptureManager] stopCapture]; } } return GGML_STATUS_SUCCESS; } void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf) { //const int64_t t_start = ggml_time_us(); if (ctx->use_graph_optimize) { ggml_graph_optimize(gf); } //printf("%s: graph optimize took %.3f ms\n", __func__, (ggml_time_us() - t_start) / 1000.0); } void ggml_metal_set_n_cb(ggml_metal_t ctx, int n_cb) { if (ctx->n_cb != n_cb) { ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_COMMAND_BUFFERS); if (ctx->n_cb > 2) { GGML_LOG_WARN("%s: n_cb = %d, using n_cb > 2 is not recommended and can degrade the performance in some cases\n", __func__, n_cb); } } if (ctx->encode_async) { Block_release(ctx->encode_async); } ctx->encode_async = Block_copy(^(size_t iter) { const int cb_idx = iter; const int n_cb_l = ctx->n_cb; const int n_nodes_0 = ctx->n_nodes_0; const int n_nodes_1 = ctx->n_nodes_1; const int n_nodes_per_cb = ctx->n_nodes_per_cb; int idx_start = 0; int idx_end = n_nodes_0; if (cb_idx < n_cb_l) { idx_start = n_nodes_0 + ( (cb_idx + 0) * n_nodes_per_cb); idx_end = n_nodes_0 + (MIN((cb_idx == n_cb_l - 1) ? n_nodes_1 : (cb_idx + 1) * n_nodes_per_cb, n_nodes_1)); } id cmd_buf = ctx->cmd_bufs[cb_idx].obj; ggml_metal_op_t ctx_op = ggml_metal_op_init( ctx->dev, cmd_buf, ctx->gf, idx_start, idx_end, ctx->use_fusion, ctx->use_concurrency, ctx->capture_next_compute, ctx->debug_graph, ctx->debug_fusion); for (int idx = 0; idx < ggml_metal_op_n_nodes(ctx_op); ++idx) { const int res = ggml_metal_op_encode(ctx_op, idx); if (res == 0) { break; } idx += res - 1; } ggml_metal_op_free(ctx_op); if (cb_idx < 2 || ctx->abort_callback == NULL) { [cmd_buf commit]; } }); } void ggml_metal_set_abort_callback(ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data) { ctx->abort_callback = abort_callback; ctx->abort_callback_data = user_data; } bool ggml_metal_supports_family(ggml_metal_t ctx, int family) { GGML_ASSERT(ctx->dev != nil); id device = ggml_metal_device_get_obj(ctx->dev); return [device supportsFamily:(MTLGPUFamilyApple1 + family - 1)]; } void ggml_metal_capture_next_compute(ggml_metal_t ctx) { ctx->capture_next_compute = true; } ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-device.cpp000066400000000000000000001564231512524704700232030ustar00rootroot00000000000000#include "ggml-metal-device.h" #include "ggml-metal-impl.h" #include "ggml-impl.h" #include #include #include #include struct ggml_metal_device_deleter { void operator()(ggml_metal_device_t ctx) { ggml_metal_device_free(ctx); } }; typedef std::unique_ptr ggml_metal_device_ptr; ggml_metal_device_t ggml_metal_device_get(void) { static ggml_metal_device_ptr ctx { ggml_metal_device_init() }; return ctx.get(); } struct ggml_metal_pipelines { std::unordered_map data; }; ggml_metal_pipelines_t ggml_metal_pipelines_init(void) { ggml_metal_pipelines_t res = new ggml_metal_pipelines(); return res; } void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls) { if (!ppls) { return; } for (auto it = ppls->data.begin(); it != ppls->data.end(); ++it) { ggml_metal_pipeline_free(it->second); } delete ppls; } void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline) { ppls->data[name] = pipeline; } ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name) { if (ppls->data.find(name) == ppls->data.end()) { return nullptr; } return ppls->data[name]; } struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_base(ggml_metal_library_t lib, ggml_op op) { char base[256]; char name[256]; const char * op_str = "undefined"; switch (op) { case GGML_OP_ADD_ID: op_str = "add_id"; break; case GGML_OP_CONCAT: op_str = "concat"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_%s", op_str); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cpy(ggml_metal_library_t lib, ggml_type tsrc, ggml_type tdst) { char base[256]; char name[256]; snprintf(base, 256, "kernel_cpy_%s_%s", ggml_type_name(tsrc), ggml_type_name(tdst)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_2d(ggml_metal_library_t lib, const ggml_tensor * op, ggml_op_pool op_pool) { GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32 && op->src[0]->type == op->type); const char * pool_str = "undefined"; switch (op_pool) { case GGML_OP_POOL_AVG: pool_str = "avg"; break; case GGML_OP_POOL_MAX: pool_str = "max"; break; default: GGML_ASSERT(false && "not implemented"); }; char base[256]; char name[256]; snprintf(base, 256, "kernel_pool_2d_%s_%s", pool_str, ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_get_rows(ggml_metal_library_t lib, ggml_type tsrc) { char base[256]; char name[256]; snprintf(base, 256, "kernel_get_rows_%s", ggml_type_name(tsrc)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_set_rows(ggml_metal_library_t lib, ggml_type tidx, ggml_type tdst) { char base[256]; char name[256]; snprintf(base, 256, "kernel_set_rows_%s_%s", ggml_type_name(tdst), ggml_type_name(tidx)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_repeat(ggml_metal_library_t lib, ggml_type tsrc) { char base[256]; char name[256]; snprintf(base, 256, "kernel_repeat_%s", ggml_type_name(tsrc)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_unary(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(ggml_is_contiguous(op->src[0])); char base[256]; char name[256]; const int64_t n = ggml_nelements(op); const char * op_str = "undefined"; switch (op->op) { case GGML_OP_SCALE: op_str = "scale"; break; case GGML_OP_FILL: op_str = "fill"; break; case GGML_OP_CLAMP: op_str = "clamp"; break; case GGML_OP_SQR: op_str = "sqr"; break; case GGML_OP_SQRT: op_str = "sqrt"; break; case GGML_OP_SIN: op_str = "sin"; break; case GGML_OP_COS: op_str = "cos"; break; case GGML_OP_LOG: op_str = "log"; break; case GGML_OP_LEAKY_RELU: op_str = "leaky_relu"; break; case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_TANH: op_str = "tanh"; break; case GGML_UNARY_OP_RELU: op_str = "relu"; break; case GGML_UNARY_OP_SIGMOID: op_str = "sigmoid"; break; case GGML_UNARY_OP_GELU: op_str = "gelu"; break; case GGML_UNARY_OP_GELU_ERF: op_str = "gelu_erf"; break; case GGML_UNARY_OP_GELU_QUICK: op_str = "gelu_quick"; break; case GGML_UNARY_OP_SILU: op_str = "silu"; break; case GGML_UNARY_OP_ELU: op_str = "elu"; break; case GGML_UNARY_OP_NEG: op_str = "neg"; break; case GGML_UNARY_OP_ABS: op_str = "abs"; break; case GGML_UNARY_OP_SGN: op_str = "sgn"; break; case GGML_UNARY_OP_STEP: op_str = "step"; break; case GGML_UNARY_OP_HARDSWISH: op_str = "hardswish"; break; case GGML_UNARY_OP_HARDSIGMOID: op_str = "hardsigmoid"; break; case GGML_UNARY_OP_EXP: op_str = "exp"; break; case GGML_UNARY_OP_SOFTPLUS: op_str = "softplus"; break; case GGML_UNARY_OP_EXPM1: op_str = "expm1"; break; default: GGML_ABORT("fatal error"); } break; default: GGML_ABORT("fatal error"); }; const char * suffix = ""; if (n % 4 == 0) { suffix = "_4"; } snprintf(base, 256, "kernel_%s_%s%s", op_str, ggml_type_name(op->src[0]->type), suffix); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_glu(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(ggml_is_contiguous_1(op->src[0])); char base[256]; char name[256]; const char * op_str = "undefined"; switch (op->op) { case GGML_OP_GLU: switch (ggml_get_glu_op(op)) { case GGML_GLU_OP_REGLU: op_str = "reglu"; break; case GGML_GLU_OP_GEGLU: op_str = "geglu"; break; case GGML_GLU_OP_SWIGLU: op_str = "swiglu"; break; case GGML_GLU_OP_SWIGLU_OAI: op_str = "swiglu_oai"; break; case GGML_GLU_OP_GEGLU_ERF: op_str = "geglu_erf"; break; case GGML_GLU_OP_GEGLU_QUICK: op_str = "geglu_quick"; break; default: GGML_ABORT("fatal error"); } break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_SUM); char base[256]; char name[256]; snprintf(base, 256, "kernel_op_sum_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum_rows(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type)); char base[256]; char name[256]; const char * op_str = "undefined"; switch (op->op) { case GGML_OP_SUM_ROWS: op_str = "sum_rows"; break; case GGML_OP_MEAN: op_str = "mean"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*sizeof(float); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_blk(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->op == GGML_OP_CUMSUM); char base[256]; char name[256]; snprintf(base, 256, "kernel_cumsum_blk_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_add(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->op == GGML_OP_CUMSUM); char base[256]; char name[256]; snprintf(base, 256, "kernel_cumsum_add_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_tri(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->op == GGML_OP_TRI); GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type)); char base[256]; char name[256]; const char * op_str = "tri"; const int ttype = op->op_params[0]; snprintf(base, 256, "kernel_%s_%s_%d", op_str, ggml_type_name(op->src[0]->type), ttype); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_soft_max(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(!op->src[1] || op->src[1]->type == GGML_TYPE_F16 || op->src[1]->type == GGML_TYPE_F32); char base[256]; char name[256]; const char * suffix = ""; if (op->src[0]->ne[0] % 4 == 0) { suffix = "_4"; } const ggml_type tsrc1 = op->src[1] ? op->src[1]->type : GGML_TYPE_F32; snprintf(base, 256, "kernel_soft_max_%s%s", ggml_type_name(tsrc1), suffix); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*sizeof(float); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(ggml_is_contiguous(op->src[1])); char base[256]; char name[256]; const char * suffix = ""; if (op->src[1]->ne[0] % 4 == 0) { suffix = "_4"; } snprintf(base, 256, "kernel_ssm_conv_%s_%s%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type), suffix); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched(ggml_metal_library_t lib, const ggml_tensor * op, int ssm_conv_bs) { GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(ggml_is_contiguous(op->src[1])); char base[256]; char name[256]; const char * suffix = ""; if (op->src[1]->ne[0] % 4 == 0) { suffix = "_4"; } snprintf(base, 256, "kernel_ssm_conv_%s_%s_batched%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type), suffix); snprintf(name, 256, "%s_ssm_conv_bs=%d", base, ssm_conv_bs); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int16(cv, ssm_conv_bs, FC_SSM_CONV + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); char base[256]; char name[256]; const int nsg = (ne00 + 31)/32; snprintf(base, 256, "kernel_ssm_scan_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s_nsg=%d", base, nsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } // Shared memory layout: // - sgptg * NW floats for partial sums (nsg * 32) // - sgptg floats for shared_x_dt (nsg) // - sgptg floats for shared_dA (nsg) // Total: nsg * (32 + 2) floats res.smem = (32 + 2)*sizeof(float)*nsg; return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv(ggml_metal_library_t lib, const ggml_tensor * op) { char base[256]; char name[256]; const int64_t C = op->ne[0]; const int64_t H = op->src[0]->ne[1]; switch (op->op) { case GGML_OP_RWKV_WKV6: { GGML_ASSERT(op->src[5]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); GGML_ASSERT(C / H == 64); snprintf(base, 256, "kernel_rwkv_wkv6_%s", ggml_type_name(op->src[0]->type)); } break; case GGML_OP_RWKV_WKV7: { GGML_ASSERT(op->src[6]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); GGML_ASSERT(C / H == 64); snprintf(base, 256, "kernel_rwkv_wkv7_%s", ggml_type_name(op->src[0]->type)); } break; default: GGML_ABORT("fatal error"); } snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext(ggml_metal_library_t lib, ggml_type tsrc0, ggml_type tsrc1, int nsg, int nxpsg, int r1ptg) { char base[256]; char name[256]; snprintf(base, 256, "kernel_mul_mv_ext_%s_%s_r1_%d", ggml_type_name(tsrc0), ggml_type_name(tsrc1), r1ptg); snprintf(name, 256, "%s_nsg=%d_nxpsg=%d", base, nsg, nxpsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0); ggml_metal_cv_set_int16(cv, nxpsg, FC_MUL_MV + 1); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm(ggml_metal_library_t lib, const ggml_tensor * op) { char base[256]; char name[256]; const ggml_type tsrc0 = op->src[0]->type; const ggml_type tsrc1 = op->src[1]->type; const bool bc_inp = op->src[0]->ne[0] % 32 != 0; const bool bc_out = op->ne[0] % 64 != 0 || op->ne[1] % 32 != 0; snprintf(base, 256, "kernel_mul_mm_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1)); snprintf(name, 256, "%s_bci=%d_bco=%d", base, bc_inp, bc_out); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, bc_inp, FC_MUL_MM + 0); ggml_metal_cv_set_bool(cv, bc_out, FC_MUL_MM + 1); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } // when the output size is not multiple of 64x32, we need extra smem to prevent out-of-bounds writes res.smem = bc_out ? 8192 : 4096 + 2048; return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); char base[256]; char name[256]; int nsg = 0; // number of simdgroups int nr0 = 0; // number of src0 rows per simdgroup int nr1 = 1; // number of src1 rows per threadgroup size_t smem = 0; // shared memory const ggml_type tsrc0 = op->src[0]->type; const ggml_type tsrc1 = op->src[1]->type; const char * suffix = ""; // use custom matrix x vector kernel switch (tsrc0) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_BF16: { if (ne00 < 32) { nsg = 1; nr0 = 32; nr1 = 1; suffix = "_short"; } else { nsg = std::min(4, (ne00 + 127) / 128); nr0 = 2; nr1 = 1; smem = 32*sizeof(float)*nr0; suffix = ne00 % 4 == 0 ? "_4" : ""; } } break; case GGML_TYPE_Q4_0: { nsg = N_SG_Q4_0; nr0 = N_R0_Q4_0; } break; case GGML_TYPE_Q4_1: { nsg = N_SG_Q4_1; nr0 = N_R0_Q4_1; } break; case GGML_TYPE_Q5_0: { nsg = N_SG_Q5_0; nr0 = N_R0_Q5_0; } break; case GGML_TYPE_Q5_1: { nsg = N_SG_Q5_1; nr0 = N_R0_Q5_1; } break; case GGML_TYPE_Q8_0: { nsg = N_SG_Q8_0; nr0 = N_R0_Q8_0; smem = 32*sizeof(float)*N_R0_Q8_0; } break; case GGML_TYPE_MXFP4: { nsg = N_SG_MXFP4; nr0 = N_R0_MXFP4; smem = 32*sizeof(float); } break; case GGML_TYPE_Q2_K: { nsg = N_SG_Q2_K; nr0 = N_R0_Q2_K; } break; case GGML_TYPE_Q3_K: { nsg = N_SG_Q3_K; nr0 = N_R0_Q3_K; } break; case GGML_TYPE_Q4_K: { nsg = N_SG_Q4_K; nr0 = N_R0_Q4_K; } break; case GGML_TYPE_Q5_K: { nsg = N_SG_Q5_K; nr0 = N_R0_Q5_K; } break; case GGML_TYPE_Q6_K: { nsg = N_SG_Q6_K; nr0 = N_R0_Q6_K; } break; case GGML_TYPE_IQ2_XXS: { nsg = N_SG_IQ2_XXS; nr0 = N_R0_IQ2_XXS; smem = 256*8+128; } break; case GGML_TYPE_IQ2_XS: { nsg = N_SG_IQ2_XS; nr0 = N_R0_IQ2_XS; smem = 512*8+128; } break; case GGML_TYPE_IQ3_XXS: { nsg = N_SG_IQ3_XXS; nr0 = N_R0_IQ3_XXS; smem = 256*4+128; } break; case GGML_TYPE_IQ3_S: { nsg = N_SG_IQ3_S; nr0 = N_R0_IQ3_S; smem = 512*4; } break; case GGML_TYPE_IQ2_S: { nsg = N_SG_IQ2_S; nr0 = N_R0_IQ2_S; } break; case GGML_TYPE_IQ1_S: { nsg = N_SG_IQ1_S; nr0 = N_R0_IQ1_S; } break; case GGML_TYPE_IQ1_M: { nsg = N_SG_IQ1_M; nr0 = N_R0_IQ1_M; } break; case GGML_TYPE_IQ4_NL: { nsg = N_SG_IQ4_NL; nr0 = N_R0_IQ4_NL; smem = 32*sizeof(float); } break; case GGML_TYPE_IQ4_XS: { nsg = N_SG_IQ4_XS; nr0 = N_R0_IQ4_XS; smem = 32*sizeof(float); } break; default: { GGML_LOG_ERROR("Asserting on type %d\n", (int) tsrc0); GGML_ABORT("not implemented"); } }; snprintf(base, 256, "kernel_mul_mv_%s_%s%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1), suffix); snprintf(name, 256, "%s_nsg=%d", base, nsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } res.nr0 = nr0; res.nr1 = nr1; res.nsg = nsg; res.smem = smem; return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id_map0(ggml_metal_library_t lib, int ne02, int ne20) { char base[256]; char name[256]; snprintf(base, 256, "kernel_mul_mm_id_map0_ne20_%d", ne20); snprintf(name, 256, "%s_ne02=%d", base, ne02); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = (size_t) ne02*ne20*sizeof(uint16_t); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id(ggml_metal_library_t lib, const ggml_tensor * op) { char base[256]; char name[256]; const ggml_type tsrc0 = op->src[0]->type; const ggml_type tsrc1 = op->src[1]->type; const bool bc_inp = op->src[0]->ne[0] % 32 != 0; snprintf(base, 256, "kernel_mul_mm_id_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1)); snprintf(name, 256, "%s_bci=%d", base, bc_inp); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, bc_inp, FC_MUL_MM + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } res.smem = 8192; return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_id(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); char base[256]; char name[256]; int nsg = 0; // number of simdgroups int nr0 = 0; // number of src0 rows per simdgroup int nr1 = 1; // number of src1 rows per threadgroup size_t smem = 0; // shared memory const ggml_type tsrc0 = op->src[0]->type; const ggml_type tsrc1 = op->src[1]->type; const char * suffix = ""; // use custom matrix x vector kernel switch (tsrc0) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_BF16: { nsg = std::min(4, (ne00 + 127) / 128); nr0 = 2; nr1 = 1; smem = 32*sizeof(float)*nr0; suffix = ne00 % 4 == 0 ? "_4" : ""; } break; case GGML_TYPE_Q4_0: { nsg = N_SG_Q4_0; nr0 = N_R0_Q4_0; } break; case GGML_TYPE_Q4_1: { nsg = N_SG_Q4_1; nr0 = N_R0_Q4_1; } break; case GGML_TYPE_Q5_0: { nsg = N_SG_Q5_0; nr0 = N_R0_Q5_0; } break; case GGML_TYPE_Q5_1: { nsg = N_SG_Q5_1; nr0 = N_R0_Q5_1; } break; case GGML_TYPE_Q8_0: { nsg = N_SG_Q8_0; nr0 = N_R0_Q8_0; smem = 32*sizeof(float)*N_R0_Q8_0; } break; case GGML_TYPE_MXFP4: { nsg = N_SG_MXFP4; nr0 = N_R0_MXFP4; smem = 32*sizeof(float); } break; case GGML_TYPE_Q2_K: { nsg = N_SG_Q2_K; nr0 = N_R0_Q2_K; } break; case GGML_TYPE_Q3_K: { nsg = N_SG_Q3_K; nr0 = N_R0_Q3_K; } break; case GGML_TYPE_Q4_K: { nsg = N_SG_Q4_K; nr0 = N_R0_Q4_K; } break; case GGML_TYPE_Q5_K: { nsg = N_SG_Q5_K; nr0 = N_R0_Q5_K; } break; case GGML_TYPE_Q6_K: { nsg = N_SG_Q6_K; nr0 = N_R0_Q6_K; } break; case GGML_TYPE_IQ2_XXS: { nsg = N_SG_IQ2_XXS; nr0 = N_R0_IQ2_XXS; smem = 256*8+128; } break; case GGML_TYPE_IQ2_XS: { nsg = N_SG_IQ2_XS; nr0 = N_R0_IQ2_XS; smem = 512*8+128; } break; case GGML_TYPE_IQ3_XXS: { nsg = N_SG_IQ3_XXS; nr0 = N_R0_IQ3_XXS; smem = 256*4+128; } break; case GGML_TYPE_IQ3_S: { nsg = N_SG_IQ3_S; nr0 = N_R0_IQ3_S; smem = 512*4; } break; case GGML_TYPE_IQ2_S: { nsg = N_SG_IQ2_S; nr0 = N_R0_IQ2_S; } break; case GGML_TYPE_IQ1_S: { nsg = N_SG_IQ1_S; nr0 = N_R0_IQ1_S; } break; case GGML_TYPE_IQ1_M: { nsg = N_SG_IQ1_M; nr0 = N_R0_IQ1_M; } break; case GGML_TYPE_IQ4_NL: { nsg = N_SG_IQ4_NL; nr0 = N_R0_IQ4_NL; smem = 32*sizeof(float); } break; case GGML_TYPE_IQ4_XS: { nsg = N_SG_IQ4_XS; nr0 = N_R0_IQ4_XS; smem = 32*sizeof(float); } break; default: { GGML_LOG_ERROR("Asserting on type %d\n", (int)op->src[2]->type); GGML_ABORT("not implemented"); } }; snprintf(base, 256, "kernel_mul_mv_id_%s_%s%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1), suffix); snprintf(name, 256, "%s_nsg=%d", base, nsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } res.nr0 = nr0; res.nr1 = nr1; res.nsg = nsg; res.smem = smem; return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argmax(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous_1(op->src[0])); GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type)); char base[256]; char name[256]; snprintf(base, 256, "kernel_argmax_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*(sizeof(float) + sizeof(int32_t)); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_ARGSORT); char base[256]; char name[256]; ggml_sort_order order = (ggml_sort_order) op->op_params[0]; const char * order_str = "undefined"; switch (order) { case GGML_SORT_ORDER_ASC: order_str = "asc"; break; case GGML_SORT_ORDER_DESC: order_str = "desc"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_argsort_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort_merge(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_ARGSORT); char base[256]; char name[256]; ggml_sort_order order = (ggml_sort_order) op->op_params[0]; const char * order_str = "undefined"; switch (order) { case GGML_SORT_ORDER_ASC: order_str = "asc"; break; case GGML_SORT_ORDER_DESC: order_str = "desc"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_argsort_merge_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } // note: reuse the argsort kernel for top_k ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_TOP_K); char base[256]; char name[256]; // note: the top_k kernel is always descending order ggml_sort_order order = GGML_SORT_ORDER_DESC; const char * order_str = "undefined"; switch (order) { case GGML_SORT_ORDER_ASC: order_str = "asc"; break; case GGML_SORT_ORDER_DESC: order_str = "desc"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_argsort_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k_merge(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_TOP_K); char base[256]; char name[256]; ggml_sort_order order = GGML_SORT_ORDER_DESC; const char * order_str = "undefined"; switch (order) { case GGML_SORT_ORDER_ASC: order_str = "asc"; break; case GGML_SORT_ORDER_DESC: order_str = "desc"; break; default: GGML_ABORT("fatal error"); }; snprintf(base, 256, "kernel_argsort_merge_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_pad( ggml_metal_library_t lib, const struct ggml_tensor * op, bool has_mask, int32_t ncpsg) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); GGML_UNUSED(op); char base[256]; char name[256]; snprintf(base, 256, "kernel_%s", "flash_attn_ext_pad"); snprintf(name, 256, "%s_mask=%d_ncpsg=%d", base, has_mask, ncpsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_PAD + 0); //ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_PAD + 1); //ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_PAD + 2); //ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_PAD + 3); //ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_PAD + 20); //ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_PAD + 21); //ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_PAD + 22); //ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_PAD + 23); //ggml_metal_cv_set_int32(cv, nqptg, FC_FLASH_ATTN_EXT_PAD + 24); ggml_metal_cv_set_int32(cv, ncpsg, FC_FLASH_ATTN_EXT_PAD + 25); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_blk( ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t nqptg, int32_t ncpsg) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); GGML_UNUSED(op); char base[256]; char name[256]; snprintf(base, 256, "kernel_%s", "flash_attn_ext_blk"); snprintf(name, 256, "%s_nqptg=%d_ncpsg=%d", base, nqptg, ncpsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); //ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_BLK + 0); //ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_BLK + 1); //ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_BLK + 2); //ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_BLK + 3); //ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_BLK + 20); //ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_BLK + 21); //ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_BLK + 22); //ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_BLK + 23); ggml_metal_cv_set_int32(cv, nqptg, FC_FLASH_ATTN_EXT_BLK + 24); ggml_metal_cv_set_int32(cv, ncpsg, FC_FLASH_ATTN_EXT_BLK + 25); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext( ggml_metal_library_t lib, const ggml_tensor * op, bool has_mask, bool has_sinks, bool has_bias, bool has_scap, bool has_kvpad, int32_t nsg) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); char base[256]; char name[256]; const int32_t dk = (int32_t) op->src[1]->ne[0]; const int32_t dv = (int32_t) op->src[2]->ne[0]; const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0]; const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0]; // do bounds checks for the mask? const bool bc_mask = op->src[3] && (op->src[3]->ne[1] % 8 != 0); snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d", "flash_attn_ext", ggml_type_name(op->src[1]->type), dk, dv); snprintf(name, 256, "%s_mask=%d_sinks=%d_bias=%d_scap=%d_kvpad=%d_bcm=%d_ns10=%d_ns20=%d_nsg=%d", base, has_mask, has_sinks, has_bias, has_scap, has_kvpad, bc_mask, ns10, ns20, nsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT + 0); ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT + 1); ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT + 2); ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT + 3); ggml_metal_cv_set_bool(cv, has_kvpad, FC_FLASH_ATTN_EXT + 4); ggml_metal_cv_set_bool(cv, bc_mask, FC_FLASH_ATTN_EXT + 10); ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT + 20); ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT + 21); ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT + 22); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec( ggml_metal_library_t lib, const ggml_tensor * op, bool has_mask, bool has_sinks, bool has_bias, bool has_scap, bool has_kvpad, int32_t nsg, int32_t nwg) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); char base[256]; char name[256]; const int32_t dk = (int32_t) op->src[1]->ne[0]; const int32_t dv = (int32_t) op->src[2]->ne[0]; const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0]; const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0]; snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d", "flash_attn_ext_vec", ggml_type_name(op->src[1]->type), dk, dv); snprintf(name, 256, "%s_mask=%d_sink=%d_bias=%d_scap=%d_kvpad=%d_ns10=%d_ns20=%d_nsg=%d_nwg=%d", base, has_mask, has_sinks, has_bias, has_scap, has_kvpad, ns10, ns20, nsg, nwg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_VEC + 0); ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_VEC + 1); ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_VEC + 2); ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_VEC + 3); ggml_metal_cv_set_bool(cv, has_kvpad, FC_FLASH_ATTN_EXT_VEC + 4); ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_VEC + 20); ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_VEC + 21); ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_VEC + 22); ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_VEC + 23); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce( ggml_metal_library_t lib, const ggml_tensor * op, int32_t dv, int32_t nwg) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); char base[256]; char name[256]; snprintf(base, 256, "kernel_flash_attn_ext_vec_reduce"); snprintf(name, 256, "%s_dv=%d_nwg=%d", base, dv, nwg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int32(cv, dv, FC_FLASH_ATTN_EXT_VEC_REDUCE + 0); ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_VEC_REDUCE + 1); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; GGML_UNUSED(op); } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin( ggml_metal_library_t lib, ggml_op op, int32_t n_fuse, bool row) { char base[256]; char name[256]; const char * op_str = "undefined"; switch (op) { case GGML_OP_ADD: op_str = "add"; break; case GGML_OP_SUB: op_str = "sub"; break; case GGML_OP_MUL: op_str = "mul"; break; case GGML_OP_DIV: op_str = "div"; break; default: GGML_ABORT("fatal error"); }; if (row) { snprintf(base, 256, "kernel_%s_row_c4_fuse_%d", op_str, n_fuse); } else { snprintf(base, 256, "kernel_%s_fuse_%d", op_str, n_fuse); } snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_l2_norm(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_L2_NORM); GGML_ASSERT(op->src[0]->ne[0] % 4 == 0); GGML_ASSERT(ggml_is_contiguous_1(op->src[0])); char base[256]; char name[256]; snprintf(base, 256, "kernel_l2_norm_f32"); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*sizeof(float); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_group_norm(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_GROUP_NORM); GGML_ASSERT(ggml_is_contiguous(op->src[0])); char base[256]; char name[256]; snprintf(base, 256, "kernel_group_norm_f32"); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*sizeof(float); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_norm(ggml_metal_library_t lib, const ggml_tensor * op, int n_fuse) { assert(op->op == GGML_OP_NORM || op->op == GGML_OP_RMS_NORM); GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); char base[256]; char name[256]; const char * suffix = ""; if (op->ne[0] % 4 == 0) { suffix = "_4"; } switch (op->op) { case GGML_OP_NORM: switch (n_fuse) { case 1: snprintf(base, 256, "kernel_norm_f32%s", suffix); break; case 2: snprintf(base, 256, "kernel_norm_mul_f32%s", suffix); break; case 3: snprintf(base, 256, "kernel_norm_mul_add_f32%s", suffix); break; default: GGML_ABORT("fatal error"); } break; case GGML_OP_RMS_NORM: switch (n_fuse) { case 1: snprintf(base, 256, "kernel_rms_norm_f32%s", suffix); break; case 2: snprintf(base, 256, "kernel_rms_norm_mul_f32%s", suffix); break; case 3: snprintf(base, 256, "kernel_rms_norm_mul_add_f32%s", suffix); break; default: GGML_ABORT("fatal error"); } break; default: GGML_ABORT("fatal error"); } snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } res.smem = 32*sizeof(float); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rope(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_ROPE); char base[256]; char name[256]; const int mode = ((const int32_t *) op->op_params)[2]; const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; if (is_neox) { snprintf(base, 256, "kernel_rope_neox_%s", ggml_type_name(op->src[0]->type)); } else if ((is_mrope || is_imrope) && !is_vision) { GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token snprintf(base, 256, "kernel_rope_multi_%s", ggml_type_name(op->src[0]->type)); } else if (is_vision) { GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token snprintf(base, 256, "kernel_rope_vision_%s", ggml_type_name(op->src[0]->type)); } else { snprintf(base, 256, "kernel_rope_norm_%s", ggml_type_name(op->src[0]->type)); } snprintf(name, 256, "%s_imrope=%d", base, is_imrope ? 1 : 0); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_bool(cv, is_imrope, FC_ROPE + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_im2col(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_IM2COL); GGML_ASSERT(ggml_is_contiguous(op->src[1])); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32); char base[256]; char name[256]; snprintf(base, 256, "kernel_im2col_%s", ggml_type_name(op->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_1d(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_CONV_TRANSPOSE_1D); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(ggml_is_contiguous(op->src[1])); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F32); char base[256]; char name[256]; snprintf(base, 256, "kernel_conv_transpose_1d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_2d(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_CONV_TRANSPOSE_2D); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(ggml_is_contiguous(op->src[1])); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F32); char base[256]; char name[256]; snprintf(base, 256, "kernel_conv_transpose_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_2d(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_CONV_2D); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F32); char base[256]; char name[256]; snprintf(base, 256, "kernel_conv_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_upscale(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_UPSCALE); char base[256]; char name[256]; snprintf(base, 256, "kernel_upscale_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_PAD); char base[256]; char name[256]; snprintf(base, 256, "kernel_pad_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (res.pipeline) { return res; } res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad_reflect_1d(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_PAD_REFLECT_1D); char base[256]; char name[256]; snprintf(base, 256, "kernel_pad_reflect_1d_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_arange(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_ARANGE); char base[256]; char name[256]; snprintf(base, 256, "kernel_arange_%s", ggml_type_name(op->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_TIMESTEP_EMBEDDING); char base[256]; char name[256]; snprintf(base, 256, "kernel_timestep_embedding_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_adamw(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_OPT_STEP_ADAMW); char base[256]; char name[256]; snprintf(base, 256, "kernel_opt_step_adamw_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_sgd(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_OPT_STEP_SGD); char base[256]; char name[256]; snprintf(base, 256, "kernel_opt_step_sgd_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_memset(ggml_metal_library_t lib, const ggml_tensor * op) { GGML_ASSERT(op->type == GGML_TYPE_I64); char base[256]; char name[256]; snprintf(base, 256, "kernel_memset_%s", ggml_type_name(op->type)); snprintf(name, 256, "%s", base); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); } return res; } ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_count_equal(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_COUNT_EQUAL); GGML_TENSOR_LOCALS(int64_t, ne0, op->src[0], ne); GGML_ASSERT(op->src[0]->type == op->src[1]->type); GGML_ASSERT(op->src[0]->type == GGML_TYPE_I32); GGML_ASSERT(op->type == GGML_TYPE_I64); // note: the kernel only supports i32 output due to metal atomic add only supporting atomic_int GGML_ASSERT(ggml_nelements(op->src[0]) < (1LL << 31)); char base[256]; char name[256]; int nsg = 1; while (32*nsg < ne00 && nsg < 32) { nsg *= 2; } snprintf(base, 256, "kernel_count_equal_%s", ggml_type_name(op->src[0]->type)); snprintf(name, 256, "%s_nsg=%d", base, nsg); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { ggml_metal_cv_t cv = ggml_metal_cv_init(); ggml_metal_cv_set_int16(cv, nsg, FC_COUNT_EQUAL + 0); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); ggml_metal_cv_free(cv); } res.smem = 32 * sizeof(int32_t); res.nsg = nsg; return res; } ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-device.h000066400000000000000000000346131512524704700226440ustar00rootroot00000000000000#pragma once #include "ggml.h" #ifdef __cplusplus extern "C" { #endif struct ggml_metal_buffer_id { void * metal; // id size_t offs; }; typedef struct ggml_metal_device * ggml_metal_device_t; // // MTLFunctionConstantValues wrapper // typedef struct ggml_metal_cv * ggml_metal_cv_t; ggml_metal_cv_t ggml_metal_cv_init(void); void ggml_metal_cv_free(ggml_metal_cv_t cv); void ggml_metal_cv_set_int16(ggml_metal_cv_t cv, int16_t value, int32_t idx); void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx); void ggml_metal_cv_set_bool (ggml_metal_cv_t cv, bool value, int32_t idx); // // MTLComputePipelineState wrapper // typedef struct ggml_metal_pipeline * ggml_metal_pipeline_t; ggml_metal_pipeline_t ggml_metal_pipeline_init(void); void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline); // a collection of pipelines typedef struct ggml_metal_pipelines * ggml_metal_pipelines_t; ggml_metal_pipelines_t ggml_metal_pipelines_init(void); void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls); void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline); ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name); struct ggml_metal_pipeline_with_params { ggml_metal_pipeline_t pipeline; int nsg; int nr0; int nr1; size_t smem; }; int ggml_metal_pipeline_max_theads_per_threadgroup(struct ggml_metal_pipeline_with_params pipeline); // // MTLCommandBuffer wrapper // typedef void * ggml_metal_cmd_buf_t; // // MTLComputeCommandEncoder wrapper // typedef struct ggml_metal_encoder * ggml_metal_encoder_t; ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent); void ggml_metal_encoder_free(ggml_metal_encoder_t encoder); void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name); void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder); void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, struct ggml_metal_pipeline_with_params pipeline); void ggml_metal_encoder_set_bytes (ggml_metal_encoder_t encoder, void * data, size_t size, int idx); void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx); void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx); void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2); void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder); void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder); // // MTLLibrary wrapper // typedef struct ggml_metal_library * ggml_metal_library_t; ggml_metal_library_t ggml_metal_library_init (ggml_metal_device_t dev); ggml_metal_library_t ggml_metal_library_init_from_source(ggml_metal_device_t dev, const char * source, bool verbose); void ggml_metal_library_free(ggml_metal_library_t lib); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline (ggml_metal_library_t lib, const char * name); struct ggml_metal_pipeline_with_params ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_base (ggml_metal_library_t lib, enum ggml_op op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cpy (ggml_metal_library_t lib, enum ggml_type tsrc, enum ggml_type tdst); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_2d (ggml_metal_library_t lib, const struct ggml_tensor * op, enum ggml_op_pool op_pool); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_get_rows (ggml_metal_library_t lib, enum ggml_type tsrc); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_set_rows (ggml_metal_library_t lib, enum ggml_type tidx, enum ggml_type tdst); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_repeat (ggml_metal_library_t lib, enum ggml_type tsrc); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_unary (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_glu (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum_rows (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_blk (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_add (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_tri (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_soft_max (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched (ggml_metal_library_t lib, const struct ggml_tensor * op, int ssm_conv_bs); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1, int nsg, int nxpsg, int r1ptg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id_map0 (ggml_metal_library_t lib, int ne02, int ne20); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_id (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argmax (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort_merge (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k_merge (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin (ggml_metal_library_t lib, enum ggml_op op, int32_t n_fuse, bool row); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_l2_norm (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_group_norm (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_norm (ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t n_fuse); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rope (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_im2col (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_2d (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_2d (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_upscale (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad_reflect_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_arange (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_adamw (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_sgd (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_memset (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_count_equal (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_pad( ggml_metal_library_t lib, const struct ggml_tensor * op, bool has_mask, int32_t ncpsg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_blk( ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t nqptg, int32_t ncpsg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext( ggml_metal_library_t lib, const struct ggml_tensor * op, bool has_mask, bool has_sinks, bool has_bias, bool has_scap, bool has_kvpad, int32_t nsg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec( ggml_metal_library_t lib, const struct ggml_tensor * op, bool has_mask, bool has_sinks, bool has_bias, bool has_scap, bool has_kvpad, int32_t nsg, int32_t nwg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce( ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t dv, int32_t nwg); // MTLResidencySet wrapper typedef void * ggml_metal_rset_t; // a collection of residency sets (non-owning) typedef struct ggml_metal_rsets * ggml_metal_rsets_t; ggml_metal_rsets_t ggml_metal_rsets_init(void); void ggml_metal_rsets_free(ggml_metal_rsets_t rsets); // // device // struct ggml_metal_device_props { char name[128]; size_t max_buffer_size; size_t max_working_set_size; size_t max_theadgroup_memory_size; bool has_simdgroup_reduction; bool has_simdgroup_mm; bool has_unified_memory; bool has_bfloat; bool has_tensor; bool use_residency_sets; bool use_shared_buffers; bool supports_gpu_family_apple7; }; ggml_metal_device_t ggml_metal_device_init(void); void ggml_metal_device_free(ggml_metal_device_t dev); // return a singleton that is automatically destroyed when the program exits ggml_metal_device_t ggml_metal_device_get(void); void * ggml_metal_device_get_obj (ggml_metal_device_t dev); // id void * ggml_metal_device_get_queue(ggml_metal_device_t dev); // id ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev); void ggml_metal_device_rsets_add(ggml_metal_device_t dev, ggml_metal_rset_t rset); void ggml_metal_device_rsets_rm (ggml_metal_device_t dev, ggml_metal_rset_t rset); void ggml_metal_device_rsets_keep_alive(ggml_metal_device_t dev); void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total); bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op); const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev); // // device buffers // typedef struct ggml_metal_buffer * ggml_metal_buffer_t; ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared); ggml_metal_buffer_t ggml_metal_buffer_map (ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size); void ggml_metal_buffer_free (ggml_metal_buffer_t buf); void * ggml_metal_buffer_get_base (ggml_metal_buffer_t buf); bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf); void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size); void ggml_metal_buffer_set_tensor (ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void ggml_metal_buffer_get_tensor (ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); void ggml_metal_buffer_clear (ggml_metal_buffer_t buf, uint8_t value); // finds the Metal buffer that contains the tensor data on the GPU device // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the // Metal buffer based on the host memory pointer // struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-device.m000066400000000000000000001657551512524704700226650ustar00rootroot00000000000000#import "ggml-metal-device.h" #import "ggml-impl.h" #include #include #include #ifndef TARGET_OS_VISION #define TARGET_OS_VISION 0 #endif // create residency sets only on macOS >= 15.0 #if !TARGET_CPU_X86_64 && TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 || \ TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 180000 || \ TARGET_OS_TV && __TV_OS_VERSION_MAX_ALLOWED >= 180000 || \ TARGET_OS_VISION && __VISION_OS_VERSION_MAX_ALLOWED >= 200000 #define GGML_METAL_HAS_RESIDENCY_SETS 1 #endif // overload of MTLGPUFamilyMetalX (not available in some environments) static const NSInteger MTLGPUFamilyMetal3_GGML = 5001; static const NSInteger MTLGPUFamilyMetal4_GGML = 5002; // virtual address for GPU memory allocations static atomic_uintptr_t g_addr_device = 0x000000400ULL; #if !GGML_METAL_EMBED_LIBRARY // Here to assist with NSBundle Path Hack @interface GGMLMetalClass : NSObject @end @implementation GGMLMetalClass @end #endif // // MTLFunctionConstantValues wrapper // struct ggml_metal_cv { MTLFunctionConstantValues * obj; }; ggml_metal_cv_t ggml_metal_cv_init(void) { ggml_metal_cv_t res = calloc(1, sizeof(struct ggml_metal_cv)); res->obj = [[MTLFunctionConstantValues alloc] init]; return res; } void ggml_metal_cv_free(ggml_metal_cv_t cv) { [cv->obj release]; free(cv); } void ggml_metal_cv_set_int16(ggml_metal_cv_t cv, int16_t value, int32_t idx) { [cv->obj setConstantValue:&value type:MTLDataTypeShort atIndex:idx]; } void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx) { [cv->obj setConstantValue:&value type:MTLDataTypeInt atIndex:idx]; } void ggml_metal_cv_set_bool(ggml_metal_cv_t cv, bool value, int32_t idx) { [cv->obj setConstantValue:&value type:MTLDataTypeBool atIndex:idx]; } // // MTLComputePipelineState wrapper // struct ggml_metal_pipeline { id obj; }; ggml_metal_pipeline_t ggml_metal_pipeline_init(void) { ggml_metal_pipeline_t res = calloc(1, sizeof(struct ggml_metal_pipeline)); *res = (struct ggml_metal_pipeline) { /*.obj =*/ nil, }; return res; } void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline) { [pipeline->obj release]; free(pipeline); } int ggml_metal_pipeline_max_theads_per_threadgroup(struct ggml_metal_pipeline_with_params pipeline) { return pipeline.pipeline->obj.maxTotalThreadsPerThreadgroup; } struct ggml_metal_library { id obj; id device; ggml_metal_pipelines_t pipelines; // cache of compiled pipelines NSLock * lock; }; ggml_metal_library_t ggml_metal_library_init(ggml_metal_device_t dev) { id library = nil; id device = ggml_metal_device_get_obj(dev); // load library // // - first check if the library is embedded // - then check if the library is in the bundle // - if not found, load the source and compile it // - if that fails, return NULL // // TODO: move to a function { const int64_t t_start = ggml_time_us(); NSError * error = nil; NSString * src = nil; #if GGML_METAL_EMBED_LIBRARY GGML_LOG_INFO("%s: using embedded metal library\n", __func__); extern const char ggml_metallib_start[]; extern const char ggml_metallib_end[]; src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding]; #else #ifdef SWIFT_PACKAGE NSBundle * bundle = SWIFTPM_MODULE_BUNDLE; #else NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; #endif NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"]; if (path_lib == nil) { // Try to find the resource in the directory where the current binary located. NSString * bin_cur = [[NSProcessInfo processInfo] arguments][0]; NSString * bin_dir = [bin_cur stringByDeletingLastPathComponent]; NSString * path_lib_default = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]]; if ([[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) { GGML_LOG_INFO("%s: found '%s'\n", __func__, [path_lib_default UTF8String]); NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:path_lib_default error:&error]; if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) { // Optionally, if this is a symlink, try to resolve it. path_lib_default = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:path_lib_default error:&error]; if (path_lib_default && [path_lib_default length] > 0 && ![[path_lib_default substringToIndex:1] isEqualToString:@"/"]) { // It is a relative path, adding the binary directory as directory prefix. path_lib_default = [NSString pathWithComponents:@[bin_dir, path_lib_default]]; } if (!path_lib_default || ![[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) { // Link to the resource could not be resolved. path_lib_default = nil; } else { GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [path_lib_default UTF8String]); } } } else { // The resource couldn't be found in the binary's directory. path_lib_default = nil; } path_lib = path_lib_default; } if (path_lib != nil) { // pre-compiled library found NSURL * libURL = [NSURL fileURLWithPath:path_lib]; GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]); library = [device newLibraryWithURL:libURL error:&error]; if (error) { GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return nil; } } else { GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); NSString * path_source; NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"]; GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil"); if (path_resource) { path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"]; } else { path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; } if (path_source == nil) { GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__); path_source = @"ggml-metal.metal"; } GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]); src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error]; if (error) { GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return nil; } } #endif if (!library) { @autoreleasepool { // dictionary of preprocessor macros NSMutableDictionary * prep = [NSMutableDictionary dictionary]; if (ggml_metal_device_get_props(dev)->has_bfloat) { [prep setObject:@"1" forKey:@"GGML_METAL_HAS_BF16"]; } if (ggml_metal_device_get_props(dev)->has_tensor) { [prep setObject:@"1" forKey:@"GGML_METAL_HAS_TENSOR"]; } #if GGML_METAL_EMBED_LIBRARY [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"]; #endif MTLCompileOptions * options = [MTLCompileOptions new]; options.preprocessorMacros = prep; //[options setFastMathEnabled:false]; library = [device newLibraryWithSource:src options:options error:&error]; if (error) { GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return nil; } #if !__has_feature(objc_arc) [options release]; #endif } } #if GGML_METAL_EMBED_LIBRARY [src release]; #endif // GGML_METAL_EMBED_LIBRARY GGML_LOG_INFO("%s: loaded in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6); } ggml_metal_library_t res = calloc(1, sizeof(struct ggml_metal_library)); res->obj = library; res->device = device; res->pipelines = ggml_metal_pipelines_init(); res->lock = [NSLock new]; return res; } ggml_metal_library_t ggml_metal_library_init_from_source(ggml_metal_device_t dev, const char * source, bool verbose) { if (source == NULL) { GGML_LOG_ERROR("%s: source is NULL\n", __func__); return NULL; } id device = ggml_metal_device_get_obj(dev); id library = nil; NSError * error = nil; const int64_t t_start = ggml_time_us(); NSString * src = [[NSString alloc] initWithBytes:source length:strlen(source) encoding:NSUTF8StringEncoding]; if (!src) { GGML_LOG_ERROR("%s: failed to create NSString from source\n", __func__); return NULL; } @autoreleasepool { NSMutableDictionary * prep = [NSMutableDictionary dictionary]; MTLCompileOptions * options = [MTLCompileOptions new]; options.preprocessorMacros = prep; library = [device newLibraryWithSource:src options:options error:&error]; if (error) { if (verbose) { GGML_LOG_ERROR("%s: error compiling source: %s\n", __func__, [[error description] UTF8String]); } else { GGML_LOG_ERROR("%s: error compiling source\n", __func__); } library = nil; } [options release]; } [src release]; if (!library) { if (verbose) { GGML_LOG_ERROR("%s: failed to create Metal library from source\n", __func__); } return NULL; } if (verbose) { GGML_LOG_INFO("%s: compiled in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6); } ggml_metal_library_t res = calloc(1, sizeof(struct ggml_metal_library)); if (!res) { GGML_LOG_ERROR("%s: calloc failed\n", __func__); return NULL; } res->obj = library; res->device = device; res->pipelines = ggml_metal_pipelines_init(); res->lock = [NSLock new]; return res; } void ggml_metal_library_free(ggml_metal_library_t lib) { if (!lib) { return; } if (lib->obj) { [lib->obj release]; } ggml_metal_pipelines_free(lib->pipelines); [lib->lock release]; free(lib); } struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline(ggml_metal_library_t lib, const char * name) { [lib->lock lock]; struct ggml_metal_pipeline_with_params res = { /*.pipeline =*/ nil, /*.nr0 =*/ 0, /*.nr1 =*/ 0, /*.nsg =*/ 0, /*.smem =*/ 0, }; res.pipeline = ggml_metal_pipelines_get(lib->pipelines, name); [lib->lock unlock]; return res; } struct ggml_metal_pipeline_with_params ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv) { struct ggml_metal_pipeline_with_params res = { /*.pipeline =*/ nil, /*.nr0 =*/ 0, /*.nr1 =*/ 0, /*.nsg =*/ 0, /*.smem =*/ 0, }; [lib->lock lock]; res.pipeline = ggml_metal_pipelines_get(lib->pipelines, name); if (res.pipeline) { [lib->lock unlock]; return res; } @autoreleasepool { NSError * error = nil; NSString * base_func = [NSString stringWithUTF8String:base]; GGML_LOG_DEBUG("%s: compiling pipeline: base = '%s', name = '%s'\n", __func__, base, name); id mtl_function; if (!cv) { mtl_function = [lib->obj newFunctionWithName:base_func]; } else { mtl_function = [lib->obj newFunctionWithName:base_func constantValues:cv->obj error:&error]; } if (!mtl_function) { [lib->lock unlock]; GGML_LOG_ERROR("%s: failed to compile pipeline: base = '%s', name = '%s'\n", __func__, base, name); if (error) { GGML_LOG_ERROR("%s: %s\n", __func__, [[error description] UTF8String]); } return res; } id obj = [lib->device newComputePipelineStateWithFunction:mtl_function error:&error]; [mtl_function release]; if (!obj) { [lib->lock unlock]; GGML_LOG_ERROR("%s: failed to create pipeline state: base = '%s', name = '%s'\n", __func__, base, name); if (error) { GGML_LOG_ERROR("%s: %s\n", __func__, [[error description] UTF8String]); } return res; } GGML_LOG_DEBUG("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, name, (void *) obj, (int) obj.maxTotalThreadsPerThreadgroup, (int) obj.threadExecutionWidth); if (obj.maxTotalThreadsPerThreadgroup == 0 || obj.threadExecutionWidth == 0) { [obj release]; [lib->lock unlock]; GGML_LOG_ERROR("%s: incompatible pipeline %s\n", __func__, name); return res; } res.pipeline = ggml_metal_pipeline_init(); res.pipeline->obj = obj; ggml_metal_pipelines_add(lib->pipelines, name, res.pipeline); } [lib->lock unlock]; return res; } // // MTLComputeCommandEncoder wrapper // struct ggml_metal_encoder { id obj; }; ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent) { ggml_metal_encoder_t res = calloc(1, sizeof(struct ggml_metal_encoder)); id cmd_buf = (id) cmd_buf_raw; if (concurrent) { res->obj = [cmd_buf computeCommandEncoderWithDispatchType: MTLDispatchTypeConcurrent]; } else { res->obj = [cmd_buf computeCommandEncoder]; } [res->obj retain]; return res; } void ggml_metal_encoder_free(ggml_metal_encoder_t encoder) { [encoder->obj release]; free(encoder); } void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name) { [encoder->obj pushDebugGroup:[NSString stringWithCString:name encoding:NSUTF8StringEncoding]]; } void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder) { [encoder->obj popDebugGroup]; } void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, struct ggml_metal_pipeline_with_params pipeline) { [encoder->obj setComputePipelineState:pipeline.pipeline->obj]; } void ggml_metal_encoder_set_bytes(ggml_metal_encoder_t encoder, void * data, size_t size, int idx) { [encoder->obj setBytes:data length:size atIndex:idx]; } void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx) { [encoder->obj setBuffer:buffer.metal offset:buffer.offs atIndex:idx]; } void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx) { [encoder->obj setThreadgroupMemoryLength:size atIndex:idx]; } void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2) { [encoder->obj dispatchThreadgroups:MTLSizeMake(tg0, tg1, tg2) threadsPerThreadgroup:MTLSizeMake(tptg0, tptg1, tptg2)]; } void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder) { [encoder->obj memoryBarrierWithScope:MTLBarrierScopeBuffers]; } void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder) { [encoder->obj endEncoding]; } struct ggml_metal_device { id mtl_device; // a single global queue shared by all Metal backends // technically not needed for devices with unified memory, but enables discrete GPUs support // ref: https://github.com/ggml-org/llama.cpp/pull/15906 id mtl_queue; ggml_metal_rsets_t rsets; ggml_metal_library_t library; struct ggml_metal_device_props props; }; // // MTLResidenceSet wrapper // struct ggml_metal_rsets { NSLock * lock; NSMutableArray * data; // number of seconds since the last graph computation // keep the residency sets wired for that amount of time to avoid being collected by the OS int keep_alive_s; // background heartbeat thread to keep the residency sets alive atomic_bool d_stop; atomic_int d_loop; dispatch_group_t d_group; }; ggml_metal_rsets_t ggml_metal_rsets_init(void) { ggml_metal_rsets_t res = calloc(1, sizeof(struct ggml_metal_rsets)); res->lock = [[NSLock alloc] init]; res->data = [[NSMutableArray alloc] init]; // by default keep the memory wired for 3 minutes res->keep_alive_s = 3*60; const char * GGML_METAL_RESIDENCY_KEEP_ALIVE_S = getenv("GGML_METAL_RESIDENCY_KEEP_ALIVE_S"); if (GGML_METAL_RESIDENCY_KEEP_ALIVE_S) { res->keep_alive_s = atoi(GGML_METAL_RESIDENCY_KEEP_ALIVE_S); } if (res->keep_alive_s <= 0) { res->keep_alive_s = 3*60; } GGML_LOG_INFO("%s: creating a residency set collection (keep_alive = %d s)\n", __func__, res->keep_alive_s); atomic_store_explicit(&res->d_stop, false, memory_order_relaxed); atomic_store_explicit(&res->d_loop, 2*res->keep_alive_s, memory_order_relaxed); res->d_group = dispatch_group_create(); // start a background thread that periodically requests residency for all the currently active sets in the collection // the requests stop after a certain amount of time (keep_alive_s) of inactivity dispatch_queue_t d_queue = dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0); dispatch_group_async(res->d_group, d_queue, ^{ #if defined(GGML_METAL_HAS_RESIDENCY_SETS) if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) { while (!atomic_load_explicit(&res->d_stop, memory_order_relaxed)) { if (atomic_load_explicit(&res->d_loop, memory_order_relaxed) > 0) { [res->lock lock]; for (int i = 0; i < (int) res->data.count; ++i) { [res->data[i] requestResidency]; } atomic_fetch_sub_explicit(&res->d_loop, 1, memory_order_relaxed); [res->lock unlock]; } // half a second usleep(500 * 1000); } } #endif }); return res; } void ggml_metal_rsets_free(ggml_metal_rsets_t rsets) { if (rsets == NULL) { return; } // note: if you hit this assert, most likely you haven't deallocated all Metal resources before exiting GGML_ASSERT([rsets->data count] == 0); atomic_store_explicit(&rsets->d_stop, true, memory_order_relaxed); dispatch_group_wait(rsets->d_group, DISPATCH_TIME_FOREVER); dispatch_release(rsets->d_group); [rsets->data release]; [rsets->lock release]; free(rsets); } ggml_metal_device_t ggml_metal_device_init(void) { ggml_metal_device_t dev = calloc(1, sizeof(struct ggml_metal_device)); assert(dev != NULL); if (dev->mtl_device == nil) { dev->mtl_device = MTLCreateSystemDefaultDevice(); if (dev->mtl_device) { dev->mtl_queue = [dev->mtl_device newCommandQueue]; if (dev->mtl_queue == nil) { GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__); } dev->props.has_simdgroup_reduction = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7]; dev->props.has_simdgroup_reduction |= [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; dev->props.has_simdgroup_mm = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7]; dev->props.has_unified_memory = dev->mtl_device.hasUnifiedMemory; dev->props.has_bfloat = [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; dev->props.has_bfloat |= [dev->mtl_device supportsFamily:MTLGPUFamilyApple6]; if (getenv("GGML_METAL_BF16_DISABLE") != NULL) { dev->props.has_bfloat = false; } dev->props.has_tensor = [dev->mtl_device supportsFamily:MTLGPUFamilyMetal4_GGML]; if (getenv("GGML_METAL_TENSOR_DISABLE") != NULL) { dev->props.has_tensor = false; } // note: disable the tensor API by default for old chips because with the current implementation it is not useful // - M2 Ultra: ~5% slower // - M4, M4 Max: no significant difference // // TODO: try to update the tensor API kernels to at least match the simdgroup performance if (getenv("GGML_METAL_TENSOR_ENABLE") == NULL && ![[dev->mtl_device name] containsString:@"M5"] && ![[dev->mtl_device name] containsString:@"M6"] && ![[dev->mtl_device name] containsString:@"A19"] && ![[dev->mtl_device name] containsString:@"A20"]) { GGML_LOG_WARN("%s: tensor API disabled for pre-M5 and pre-A19 devices\n", __func__); dev->props.has_tensor = false; } // double-check that the tensor API compiles if (dev->props.has_tensor) { const char * src_tensor_f16 = "\n" "#include \n" "#include \n" "#include \n" " \n" "using namespace metal; \n" "using namespace mpp::tensor_ops; \n" " \n" "kernel void dummy_kernel( \n" " tensor> A [[buffer(0)]], \n" " tensor> B [[buffer(1)]], \n" " device float * C [[buffer(2)]], \n" " uint2 tgid [[threadgroup_position_in_grid]]) \n" "{ \n" " auto tA = A.slice(0, (int)tgid.y); \n" " auto tB = B.slice((int)tgid.x, 0); \n" " \n" " matmul2d< \n" " matmul2d_descriptor(8, 8, dynamic_extent), \n" " execution_simdgroups<4>> mm; \n" " \n" " auto cT = mm.get_destination_cooperative_tensor(); \n" " \n" " auto sA = tA.slice(0, 0); \n" " auto sB = tB.slice(0, 0); \n" " mm.run(sB, sA, cT); \n" " \n" " auto tC = tensor, tensor_inline>(C, dextents(4, 4)); \n" " \n" " cT.store(tC); \n" "}"; GGML_LOG_INFO("%s: testing tensor API for f16 support\n", __func__); ggml_metal_library_t lib = ggml_metal_library_init_from_source(dev, src_tensor_f16, false); if (lib == NULL) { GGML_LOG_WARN("%s: - the tensor API is not supported in this environment - disabling\n", __func__); dev->props.has_tensor = false; } else { struct ggml_metal_pipeline_with_params ppl = ggml_metal_library_compile_pipeline(lib, "dummy_kernel", "dummy_kernel", nil); if (!ppl.pipeline) { GGML_LOG_WARN("%s: - the tensor API is not supported in this environment - disabling\n", __func__); dev->props.has_tensor = false; } ggml_metal_library_free(lib); } } // try to compile a dummy kernel to determine if the tensor API is supported for bfloat if (dev->props.has_tensor && dev->props.has_bfloat) { const char * src_tensor_bf16 = "\n" "#include \n" "#include \n" "#include \n" " \n" "using namespace metal; \n" "using namespace mpp::tensor_ops; \n" " \n" "kernel void dummy_kernel( \n" " tensor> A [[buffer(0)]], \n" " tensor> B [[buffer(1)]], \n" " device float * C [[buffer(2)]], \n" " uint2 tgid [[threadgroup_position_in_grid]]) \n" "{ \n" " auto tA = A.slice(0, (int)tgid.y); \n" " auto tB = B.slice((int)tgid.x, 0); \n" " \n" " matmul2d< \n" " matmul2d_descriptor(8, 8, dynamic_extent), \n" " execution_simdgroups<4>> mm; \n" " \n" " auto cT = mm.get_destination_cooperative_tensor(); \n" " \n" " auto sA = tA.slice(0, 0); \n" " auto sB = tB.slice(0, 0); \n" " mm.run(sB, sA, cT); \n" " \n" " auto tC = tensor, tensor_inline>(C, dextents(4, 4)); \n" " \n" " cT.store(tC); \n" "}"; GGML_LOG_INFO("%s: testing tensor API for bfloat support\n", __func__); ggml_metal_library_t lib = ggml_metal_library_init_from_source(dev, src_tensor_bf16, false); if (lib == NULL) { GGML_LOG_WARN("%s: - the tensor API does not support bfloat - disabling bfloat support\n", __func__); dev->props.has_bfloat = false; } else { struct ggml_metal_pipeline_with_params ppl = ggml_metal_library_compile_pipeline(lib, "dummy_kernel", "dummy_kernel", nil); if (!ppl.pipeline) { GGML_LOG_WARN("%s: - the tensor API does not support bfloat - disabling bfloat support\n", __func__); dev->props.has_bfloat = false; } ggml_metal_library_free(lib); } } dev->props.use_residency_sets = true; #if defined(GGML_METAL_HAS_RESIDENCY_SETS) dev->props.use_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == nil; #endif dev->props.use_shared_buffers = dev->props.has_unified_memory; #if TARGET_OS_OSX // In case of eGPU, shared memory may be preferable. dev->props.use_shared_buffers |= [dev->mtl_device location] == MTLDeviceLocationExternal; #endif if (getenv("GGML_METAL_SHARED_BUFFERS_DISABLE") != NULL) { dev->props.use_shared_buffers = false; } if (getenv("GGML_METAL_SHARED_BUFFERS_ENABLE") != NULL) { dev->props.use_shared_buffers = true; } dev->props.supports_gpu_family_apple7 = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7]; dev->props.max_buffer_size = dev->mtl_device.maxBufferLength; dev->props.max_working_set_size = dev->mtl_device.recommendedMaxWorkingSetSize; dev->props.max_theadgroup_memory_size = dev->mtl_device.maxThreadgroupMemoryLength; strncpy(dev->props.name, [[dev->mtl_device name] UTF8String], sizeof(dev->props.name) - 1); dev->library = ggml_metal_library_init(dev); if (!dev->library) { GGML_LOG_ERROR("%s: error: failed to create library\n", __func__); } if (dev->props.use_residency_sets) { dev->rsets = ggml_metal_rsets_init(); } else { dev->rsets = nil; } // print MTL GPU family: GGML_LOG_INFO("%s: GPU name: %s\n", __func__, dev->props.name); // determine max supported GPU family // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf { for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) { if ([dev->mtl_device supportsFamily:i]) { GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i); break; } } for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) { if ([dev->mtl_device supportsFamily:i]) { GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i); break; } } for (int i = MTLGPUFamilyMetal3_GGML + 5; i >= MTLGPUFamilyMetal3_GGML; --i) { if ([dev->mtl_device supportsFamily:i]) { GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3_GGML + 3, i); break; } } } GGML_LOG_INFO("%s: simdgroup reduction = %s\n", __func__, dev->props.has_simdgroup_reduction ? "true" : "false"); GGML_LOG_INFO("%s: simdgroup matrix mul. = %s\n", __func__, dev->props.has_simdgroup_mm ? "true" : "false"); GGML_LOG_INFO("%s: has unified memory = %s\n", __func__, dev->props.has_unified_memory ? "true" : "false"); GGML_LOG_INFO("%s: has bfloat = %s\n", __func__, dev->props.has_bfloat ? "true" : "false"); GGML_LOG_INFO("%s: has tensor = %s\n", __func__, dev->props.has_tensor ? "true" : "false"); GGML_LOG_INFO("%s: use residency sets = %s\n", __func__, dev->props.use_residency_sets ? "true" : "false"); GGML_LOG_INFO("%s: use shared buffers = %s\n", __func__, dev->props.use_shared_buffers ? "true" : "false"); #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15) if (@available(macOS 10.12, iOS 16.0, *)) { GGML_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, dev->props.max_working_set_size / 1e6); } #endif } } return dev; } void ggml_metal_device_free(ggml_metal_device_t dev) { assert(dev != NULL); ggml_metal_rsets_free(dev->rsets); ggml_metal_library_free(dev->library); dev->library = NULL; if (dev->mtl_queue) { [dev->mtl_queue release]; dev->mtl_queue = nil; } if (dev->mtl_device) { [dev->mtl_device release]; dev->mtl_device = nil; } free(dev); } void * ggml_metal_device_get_obj(ggml_metal_device_t dev) { return dev->mtl_device; } void * ggml_metal_device_get_queue(ggml_metal_device_t dev) { return dev->mtl_queue; } ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev) { return dev->library; } void ggml_metal_device_rsets_add(ggml_metal_device_t dev, ggml_metal_rset_t rset) { if (rset == nil) { return; } GGML_ASSERT(dev->rsets); [dev->rsets->lock lock]; [dev->rsets->data addObject:rset]; [dev->rsets->lock unlock]; } void ggml_metal_device_rsets_rm(ggml_metal_device_t dev, ggml_metal_rset_t rset) { if (rset == nil) { return; } GGML_ASSERT(dev->rsets); [dev->rsets->lock lock]; [dev->rsets->data removeObject:rset]; [dev->rsets->lock unlock]; } void ggml_metal_device_rsets_keep_alive(ggml_metal_device_t dev) { if (dev->rsets == NULL) { return; } atomic_store_explicit(&dev->rsets->d_loop, 2*dev->rsets->keep_alive_s, memory_order_relaxed); } void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total) { if (@available(macOS 10.12, iOS 16.0, *)) { *total = dev->mtl_device.recommendedMaxWorkingSetSize; *free = *total - dev->mtl_device.currentAllocatedSize; } else { *free = 0; *total = 0; } } bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op) { const bool has_simdgroup_mm = dev->props.has_simdgroup_mm; const bool has_simdgroup_reduction = dev->props.has_simdgroup_reduction; const bool has_bfloat = dev->props.has_bfloat; if (!has_bfloat) { if (op->type == GGML_TYPE_BF16) { return false; } for (size_t i = 0, n = 3; i < n; ++i) { if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) { return false; } } } switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_STEP: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_SOFTPLUS: case GGML_UNARY_OP_EXPM1: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; default: return false; } case GGML_OP_GLU: switch (ggml_get_glu_op(op)) { case GGML_GLU_OP_REGLU: case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_SWIGLU: case GGML_GLU_OP_SWIGLU_OAI: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; default: return false; } case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_TRANSPOSE: case GGML_OP_PERMUTE: case GGML_OP_CONCAT: return true; case GGML_OP_ADD: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_ADD_ID: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_ACC: case GGML_OP_REPEAT: case GGML_OP_SCALE: case GGML_OP_FILL: case GGML_OP_CONV_TRANSPOSE_1D: return true; case GGML_OP_CONV_TRANSPOSE_2D: return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]) && (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32) && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; case GGML_OP_CLAMP: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: case GGML_OP_LOG: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SUM: return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]); case GGML_OP_TRI: return ggml_is_contiguous_rows(op->src[0]); case GGML_OP_SUM_ROWS: case GGML_OP_CUMSUM: case GGML_OP_MEAN: case GGML_OP_SOFT_MAX: case GGML_OP_GROUP_NORM: return has_simdgroup_reduction && ggml_is_contiguous_rows(op->src[0]); case GGML_OP_L2_NORM: return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0])); case GGML_OP_COUNT_EQUAL: return has_simdgroup_reduction && op->src[0]->type == GGML_TYPE_I32 && op->src[1]->type == GGML_TYPE_I32 && op->type == GGML_TYPE_I64; case GGML_OP_ARGMAX: return has_simdgroup_reduction; case GGML_OP_NORM: case GGML_OP_RMS_NORM: return has_simdgroup_reduction && (ggml_is_contiguous_rows(op->src[0])); case GGML_OP_ROPE: return true; case GGML_OP_IM2COL: return ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32 && (op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32); case GGML_OP_CONV_2D: return ggml_is_contiguous(op->src[0]) && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32 && (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); case GGML_OP_POOL_1D: return false; case GGML_OP_UPSCALE: return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST && !(op->op_params[0] & GGML_SCALE_FLAG_ANTIALIAS); case GGML_OP_POOL_2D: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_PAD: // TODO: add circular padding support for metal, see https://github.com/ggml-org/llama.cpp/pull/16985 if (ggml_get_op_params_i32(op, 8) != 0) { return false; } return (ggml_get_op_params_i32(op, 0) == 0) && (ggml_get_op_params_i32(op, 2) == 0) && (ggml_get_op_params_i32(op, 4) == 0) && (ggml_get_op_params_i32(op, 6) == 0); case GGML_OP_PAD_REFLECT_1D: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_ARGSORT: case GGML_OP_TOP_K: case GGML_OP_ARANGE: return true; case GGML_OP_FLASH_ATTN_EXT: // for new head sizes, add checks here if (op->src[0]->ne[0] != 32 && op->src[0]->ne[0] != 40 && op->src[0]->ne[0] != 48 && op->src[0]->ne[0] != 64 && op->src[0]->ne[0] != 72 && op->src[0]->ne[0] != 80 && op->src[0]->ne[0] != 96 && op->src[0]->ne[0] != 112 && op->src[0]->ne[0] != 128 && op->src[0]->ne[0] != 192 && op->src[0]->ne[0] != 256) { return false; } if (op->src[0]->ne[0] == 576) { // DeepSeek sizes // TODO: disabled for now, until optmized return false; } if (op->src[1]->type != op->src[2]->type) { return false; } return has_simdgroup_mm; // TODO: over-restricted for vec-kernels case GGML_OP_SSM_CONV: case GGML_OP_SSM_SCAN: return has_simdgroup_reduction; case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: return true; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: return has_simdgroup_reduction; case GGML_OP_CPY: case GGML_OP_DUP: case GGML_OP_CONT: { switch (op->src[0]->type) { case GGML_TYPE_F32: switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_IQ4_NL: case GGML_TYPE_I32: return true; default: return false; } case GGML_TYPE_F16: switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: return true; default: return false; } case GGML_TYPE_BF16: switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_BF16: return true; default: return false; } case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: return true; default: return false; } case GGML_TYPE_I32: return op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_I32; default: return false; }; } case GGML_OP_GET_ROWS: return true; case GGML_OP_SET_ROWS: { if (op->src[0]->type != GGML_TYPE_F32) { return false; } switch (op->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_IQ4_NL: return true; default: return false; }; } case GGML_OP_OPT_STEP_ADAMW: case GGML_OP_OPT_STEP_SGD: return has_simdgroup_reduction; default: return false; } } const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev) { return &dev->props; } // // device buffers // // max memory buffers that can be mapped to the device #define GGML_METAL_MAX_BUFFERS 64 struct ggml_metal_buffer_wrapper { void * data; size_t size; id metal; }; struct ggml_metal_buffer { void * all_data; size_t all_size; // if false, the Metal buffer data is allocated in private GPU memory and is not shared with the host bool is_shared; bool owned; // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap int n_buffers; struct ggml_metal_buffer_wrapper buffers[GGML_METAL_MAX_BUFFERS]; bool use_residency_sets; // optional MTLResidencySet // note: cannot use explicity "id" here because it is not available on certain OSes id rset; // pointers to global device ggml_metal_device_t dev; }; static void ggml_metal_log_allocated_size(id device, size_t size_aligned) { #ifndef GGML_METAL_NDEBUG #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15) if (@available(macOS 10.12, iOS 16.0, *)) { GGML_LOG_DEBUG("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)\n", __func__, size_aligned / 1024.0 / 1024.0, device.currentAllocatedSize / 1024.0 / 1024.0, device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) { GGML_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__); } } else { GGML_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n", __func__, size_aligned / 1024.0 / 1024.0, device.currentAllocatedSize / 1024.0 / 1024.0); } #endif #endif GGML_UNUSED(device); GGML_UNUSED(size_aligned); } // rset init static bool ggml_metal_buffer_rset_init(ggml_metal_buffer_t buf) { buf->rset = nil; if (!buf->use_residency_sets) { return true; } #if defined(GGML_METAL_HAS_RESIDENCY_SETS) if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) { MTLResidencySetDescriptor * desc = [[MTLResidencySetDescriptor alloc] init]; desc.label = @"ggml_metal"; desc.initialCapacity = buf->n_buffers; NSError * error; buf->rset = [buf->dev->mtl_device newResidencySetWithDescriptor:desc error:&error]; if (error) { GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); [desc release]; return false; } [desc release]; for (int i = 0; i < buf->n_buffers; i++) { [buf->rset addAllocation:buf->buffers[i].metal]; } [buf->rset commit]; [buf->rset requestResidency]; return true; } #endif return true; } // rset free static void ggml_metal_buffer_rset_free(ggml_metal_buffer_t buf) { #if defined(GGML_METAL_HAS_RESIDENCY_SETS) if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) { if (buf->rset) { [buf->rset endResidency]; [buf->rset removeAllAllocations]; [buf->rset release]; } } #else GGML_UNUSED(buf); #endif } static void * ggml_metal_host_malloc(size_t n) { void * data = NULL; #if TARGET_OS_OSX kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE); if (err != KERN_SUCCESS) { GGML_LOG_ERROR("%s: error: vm_allocate failed\n", __func__); return NULL; } #else const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n); if (result != 0) { GGML_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); return NULL; } #endif return data; } ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared) { ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer)); res->dev = dev; const size_t size_page = sysconf(_SC_PAGESIZE); size_t size_aligned = size; if ((size_aligned % size_page) != 0) { size_aligned += (size_page - (size_aligned % size_page)); } const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev); shared = shared && props_dev->use_shared_buffers; // allocate shared buffer if the device supports it and it is required by the buffer type if (shared) { res->all_data = ggml_metal_host_malloc(size_aligned); res->is_shared = true; } else { // use virtual address from g_addr_device counter res->all_data = (void *) atomic_fetch_add_explicit(&g_addr_device, size_aligned, memory_order_relaxed); res->is_shared = false; } res->all_size = size_aligned; res->owned = true; res->n_buffers = 1; if (res->all_data != NULL) { res->buffers[0].size = size; res->buffers[0].metal = nil; if (size_aligned > 0) { if (props_dev->use_shared_buffers && shared) { res->buffers[0].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:res->all_data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; } else { res->buffers[0].metal = [res->dev->mtl_device newBufferWithLength:size_aligned options:MTLResourceStorageModePrivate]; } } res->buffers[0].data = res->all_data; } if (size_aligned > 0 && (res->all_data == NULL || res->buffers[0].metal == nil)) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(res); return NULL; } res->use_residency_sets = props_dev->use_residency_sets; if (!ggml_metal_buffer_rset_init(res)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(res); return NULL; } ggml_metal_device_rsets_add(dev, res->rset); //ggml_metal_log_allocated_size(device, size_aligned); return res; } ggml_metal_buffer_t ggml_metal_buffer_map(ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size) { ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer)); res->dev = dev; res->all_data = ptr; res->all_size = size; res->is_shared = true; res->owned = false; res->n_buffers = 0; const size_t size_page = sysconf(_SC_PAGESIZE); // page-align the data ptr { const uintptr_t offs = (uintptr_t) ptr % size_page; ptr = (void *) ((char *) ptr - offs); size += offs; } size_t size_aligned = size; if ((size_aligned % size_page) != 0) { size_aligned += (size_page - (size_aligned % size_page)); } const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev); // the buffer fits into the max buffer size allowed by the device if (size_aligned <= props_dev->max_buffer_size) { res->buffers[res->n_buffers].data = ptr; res->buffers[res->n_buffers].size = size; res->buffers[res->n_buffers].metal = nil; if (size_aligned > 0) { res->buffers[res->n_buffers].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:ptr length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (res->buffers[res->n_buffers].metal == nil) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(res); return NULL; } } ggml_metal_log_allocated_size(res->dev->mtl_device, size_aligned); ++res->n_buffers; } else { // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into // one of the views const size_t size_ovlp = ((max_tensor_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case const size_t size_step = props_dev->max_buffer_size - size_ovlp; const size_t size_view = props_dev->max_buffer_size; for (size_t i = 0; i < size; i += size_step) { const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i); res->buffers[res->n_buffers].data = (void *) ((uint8_t *) ptr + i); res->buffers[res->n_buffers].size = size_step_aligned; res->buffers[res->n_buffers].metal = nil; if (size_step_aligned > 0) { res->buffers[res->n_buffers].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:(void *) ((uint8_t *) ptr + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (res->buffers[res->n_buffers].metal == nil) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0); free(res); return NULL; } } ggml_metal_log_allocated_size(res->dev->mtl_device, size_step_aligned); if (i + size_step < size) { GGML_LOG_INFO("\n"); } ++res->n_buffers; } } res->use_residency_sets = props_dev->use_residency_sets; if (!ggml_metal_buffer_rset_init(res)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(res); return NULL; } ggml_metal_device_rsets_add(dev, res->rset); return res; } void ggml_metal_buffer_free(ggml_metal_buffer_t buf) { ggml_metal_device_rsets_rm(buf->dev, buf->rset); for (int i = 0; i < buf->n_buffers; i++) { [buf->buffers[i].metal release]; } ggml_metal_buffer_rset_free(buf); if (buf->is_shared && buf->owned) { #if TARGET_OS_OSX vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)buf->all_data, buf->all_size); #else free(buf->all_data); #endif } free(buf); } void * ggml_metal_buffer_get_base(ggml_metal_buffer_t buf) { return buf->all_data; } bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf) { return buf->is_shared; } void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { if (buf->is_shared) { memset((char *) tensor->data + offset, value, size); return; } @autoreleasepool { // dst struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor); bid_dst.offs += offset; id cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences]; { id encoder = [cmd_buf blitCommandEncoder]; [encoder fillBuffer:bid_dst.metal range:NSMakeRange(bid_dst.offs, bid_dst.offs + size) value:value]; [encoder endEncoding]; } [cmd_buf commit]; [cmd_buf waitUntilCompleted]; } } void ggml_metal_buffer_set_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { if (buf->is_shared) { memcpy((char *) tensor->data + offset, data, size); return; } @autoreleasepool { // src void * data_ptr = (void *)(uintptr_t) data; // "const cast" the src data id buf_src = [buf->dev->mtl_device newBufferWithBytesNoCopy:data_ptr length:size options:MTLResourceStorageModeShared deallocator:nil]; GGML_ASSERT(buf_src); // dst struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor); bid_dst.offs += offset; // note: for experimentation purposes, here we use a semaphore to wait for the copy to complete // this is alternative to waitUntilCompleted, which should be faster, but don't seem to make much difference dispatch_semaphore_t completion_semaphore = dispatch_semaphore_create(0); id cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences]; { id encoder = [cmd_buf blitCommandEncoder]; [encoder copyFromBuffer:buf_src sourceOffset:0 toBuffer:bid_dst.metal destinationOffset:bid_dst.offs size:size]; [encoder endEncoding]; } [cmd_buf addCompletedHandler:^(id cb) { // TODO: can check for errors here GGML_UNUSED(cb); dispatch_semaphore_signal(completion_semaphore); }]; [cmd_buf commit]; dispatch_semaphore_wait(completion_semaphore, DISPATCH_TIME_FOREVER); dispatch_release(completion_semaphore); //[cmd_buf waitUntilCompleted]; } } void ggml_metal_buffer_get_tensor(ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { if (buf->is_shared) { memcpy(data, (const char *) tensor->data + offset, size); return; } @autoreleasepool { // src struct ggml_metal_buffer_id bid_src = ggml_metal_buffer_get_id(buf, tensor); bid_src.offs += offset; // dst id buf_dst = [buf->dev->mtl_device newBufferWithBytesNoCopy:data length:size options:MTLResourceStorageModeShared deallocator:nil]; GGML_ASSERT(buf_dst); id cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences]; { id encoder = [cmd_buf blitCommandEncoder]; [encoder copyFromBuffer:bid_src.metal sourceOffset:bid_src.offs toBuffer:buf_dst destinationOffset:0 size:size]; [encoder endEncoding]; } [cmd_buf commit]; [cmd_buf waitUntilCompleted]; } } void ggml_metal_buffer_clear(ggml_metal_buffer_t buf, uint8_t value) { if (buf->is_shared) { memset(buf->all_data, value, buf->all_size); return; } @autoreleasepool { id cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences]; { id encoder = [cmd_buf blitCommandEncoder]; [encoder fillBuffer:buf->buffers[0].metal range:NSMakeRange(0, buf->buffers[0].size) value:value]; [encoder endEncoding]; } [cmd_buf commit]; [cmd_buf waitUntilCompleted]; } } struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t) { struct ggml_metal_buffer_id res = { nil, 0 }; const int64_t tsize = ggml_nbytes(t); // find the view that contains the tensor fully for (int i = 0; i < buf->n_buffers; ++i) { const int64_t ioffs = (int64_t) t->data - (int64_t) buf->buffers[i].data; //GGML_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf->buffers[i].size); if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf->buffers[i].size) { res.metal = buf->buffers[i].metal; res.offs = (size_t) ioffs; //GGML_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs); return res; } } GGML_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name); return res; } ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-impl.h000066400000000000000000000424441512524704700223470ustar00rootroot00000000000000#ifndef GGML_METAL_IMPL #define GGML_METAL_IMPL // kernel parameters for mat-vec threadgroups // // N_R0: number of src0 rows to process per simdgroup // N_SG: number of simdgroups per threadgroup // // TODO: for optimal performance, become function of the device and work size #define N_R0_Q4_0 4 #define N_SG_Q4_0 2 #define N_R0_Q4_1 4 #define N_SG_Q4_1 2 #define N_R0_Q5_0 4 #define N_SG_Q5_0 2 #define N_R0_Q5_1 4 #define N_SG_Q5_1 2 #define N_R0_Q8_0 2 #define N_SG_Q8_0 4 #define N_R0_MXFP4 2 #define N_SG_MXFP4 2 #define N_R0_Q2_K 4 #define N_SG_Q2_K 2 #define N_R0_Q3_K 2 #define N_SG_Q3_K 2 #define N_R0_Q4_K 2 #define N_SG_Q4_K 2 #define N_R0_Q5_K 2 #define N_SG_Q5_K 2 #define N_R0_Q6_K 2 #define N_SG_Q6_K 2 #define N_R0_IQ1_S 4 #define N_SG_IQ1_S 2 #define N_R0_IQ1_M 4 #define N_SG_IQ1_M 2 #define N_R0_IQ2_XXS 4 #define N_SG_IQ2_XXS 2 #define N_R0_IQ2_XS 4 #define N_SG_IQ2_XS 2 #define N_R0_IQ2_S 4 #define N_SG_IQ2_S 2 #define N_R0_IQ3_XXS 4 #define N_SG_IQ3_XXS 2 #define N_R0_IQ3_S 4 #define N_SG_IQ3_S 2 #define N_R0_IQ4_NL 2 #define N_SG_IQ4_NL 2 #define N_R0_IQ4_XS 2 #define N_SG_IQ4_XS 2 // function constants offsets #define FC_FLASH_ATTN_EXT_PAD 100 #define FC_FLASH_ATTN_EXT_BLK 200 #define FC_FLASH_ATTN_EXT 300 #define FC_FLASH_ATTN_EXT_VEC 400 #define FC_FLASH_ATTN_EXT_VEC_REDUCE 500 #define FC_MUL_MV 600 #define FC_MUL_MM 700 #define FC_ROPE 800 #define FC_SSM_CONV 900 #define FC_COUNT_EQUAL 1000 // op-specific constants #define OP_FLASH_ATTN_EXT_NQPTG 8 #define OP_FLASH_ATTN_EXT_NCPSG 64 #define OP_FLASH_ATTN_EXT_VEC_NQPTG 1 #define OP_FLASH_ATTN_EXT_VEC_NCPSG 32 // kernel argument structs // // - element counters (e.g. ne00) typically use int32_t to reduce register usage // however, be careful from int overflows when using those in the kernel implementation // // - strides (e.g. nb00) use uint64_t typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne10; int32_t ne11; int32_t ne12; int32_t ne13; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; int32_t dim; } ggml_metal_kargs_concat; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne10; int32_t ne11; int32_t ne12; int32_t ne13; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; uint64_t offs; uint64_t o1[8]; } ggml_metal_kargs_bin; typedef struct { int64_t ne0; int64_t ne1; size_t nb01; size_t nb02; size_t nb11; size_t nb21; } ggml_metal_kargs_add_id; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_repeat; typedef struct { float scale; float bias; } ggml_metal_kargs_scale; typedef struct { float val; } ggml_metal_kargs_fill; typedef struct { float min; float max; } ggml_metal_kargs_clamp; typedef struct { int64_t nk0; int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t ne0; int64_t ne1; int64_t ne2; int64_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_cpy; typedef struct { int64_t ne10; int64_t ne11; int64_t ne12; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; uint64_t nb1; uint64_t nb2; uint64_t nb3; uint64_t offs; bool inplace; } ggml_metal_kargs_set; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; int32_t n_past; int32_t n_dims; int32_t n_ctx_orig; float freq_base; float freq_scale; float ext_factor; float attn_factor; float beta_fast; float beta_slow; int32_t sect_0; int32_t sect_1; int32_t sect_2; int32_t sect_3; bool src2; } ggml_metal_kargs_rope; typedef struct { int32_t ne11; int32_t ne_12_2; // assume K and V are same shape int32_t ne_12_3; uint64_t nb11; uint64_t nb12; uint64_t nb13; uint64_t nb21; uint64_t nb22; uint64_t nb23; int32_t ne31; int32_t ne32; int32_t ne33; uint64_t nb31; uint64_t nb32; uint64_t nb33; } ggml_metal_kargs_flash_attn_ext_pad; typedef struct { int32_t ne01; int32_t ne30; int32_t ne31; int32_t ne32; int32_t ne33; uint64_t nb31; uint64_t nb32; uint64_t nb33; } ggml_metal_kargs_flash_attn_ext_blk; typedef struct { int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne11; int32_t ne_12_2; // assume K and V are same shape int32_t ne_12_3; int32_t ns10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ns20; uint64_t nb21; uint64_t nb22; uint64_t nb23; int32_t ne31; int32_t ne32; int32_t ne33; uint64_t nb31; uint64_t nb32; uint64_t nb33; int32_t ne1; int32_t ne2; int32_t ne3; float scale; float max_bias; float m0; float m1; int32_t n_head_log2; float logit_softcap; } ggml_metal_kargs_flash_attn_ext; typedef struct { int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne11; int32_t ne_12_2; // assume K and V are same shape int32_t ne_12_3; int32_t ns10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ns20; uint64_t nb21; uint64_t nb22; uint64_t nb23; int32_t ne31; int32_t ne32; int32_t ne33; uint64_t nb31; uint64_t nb32; uint64_t nb33; int32_t ne1; int32_t ne2; int32_t ne3; float scale; float max_bias; float m0; float m1; int32_t n_head_log2; float logit_softcap; } ggml_metal_kargs_flash_attn_ext_vec; typedef struct { int32_t nrows; } ggml_metal_kargs_flash_attn_ext_vec_reduce; typedef struct { int32_t ne00; int32_t ne02; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne12; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne0; int32_t ne1; int16_t r2; int16_t r3; } ggml_metal_kargs_mul_mm; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne10; int32_t ne11; int32_t ne12; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne0; int32_t ne1; int32_t nr0; int16_t r2; int16_t r3; } ggml_metal_kargs_mul_mv; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne10; int32_t ne11; int32_t ne12; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne0; int32_t ne1; int16_t r2; int16_t r3; } ggml_metal_kargs_mul_mv_ext; typedef struct { int32_t ne02; int32_t ne10; int32_t ne11; // n_expert_used (bcast) uint64_t nb11; uint64_t nb12; int32_t ne21; // n_tokens int32_t ne20; // n_expert_used uint64_t nb21; } ggml_metal_kargs_mul_mm_id_map0; typedef struct { int32_t ne00; int32_t ne02; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne11; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; int32_t ne20; int32_t ne21; int32_t ne0; int32_t ne1; int16_t r2; int16_t r3; } ggml_metal_kargs_mul_mm_id; typedef struct { int32_t nei0; int32_t nei1; uint64_t nbi1; int32_t ne00; int32_t ne01; int32_t ne02; uint64_t nb00; uint64_t nb01; uint64_t nb02; int32_t ne10; int32_t ne11; int32_t ne12; int32_t ne13; uint64_t nb10; uint64_t nb11; uint64_t nb12; int32_t ne0; int32_t ne1; uint64_t nb1; int32_t nr0; } ggml_metal_kargs_mul_mv_id; // NORM // RMS_NORM typedef struct { int32_t ne00; int32_t ne00_t; uint64_t nb1; uint64_t nb2; uint64_t nb3; float eps; int32_t nef1[3]; int32_t nef2[3]; int32_t nef3[3]; uint64_t nbf1[3]; uint64_t nbf2[3]; uint64_t nbf3[3]; } ggml_metal_kargs_norm; typedef struct { int32_t ne00; int32_t ne00_4; uint64_t nb01; float eps; } ggml_metal_kargs_l2_norm; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; uint64_t nb00; uint64_t nb01; uint64_t nb02; int32_t ngrp; float eps; } ggml_metal_kargs_group_norm; typedef struct { int32_t IC; int32_t IL; int32_t K; int32_t s0; uint64_t nb0; uint64_t nb1; } ggml_metal_kargs_conv_transpose_1d; typedef struct { int32_t IC; int32_t IH; int32_t IW; int32_t KH; int32_t KW; int32_t OC; int32_t s0; uint64_t nb0; uint64_t nb1; uint64_t nb2; } ggml_metal_kargs_conv_transpose_2d; typedef struct { uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; int32_t IW; int32_t IH; int32_t KW; int32_t KH; int32_t IC; int32_t OC; int32_t OW; int32_t OH; int32_t N; int32_t s0; int32_t s1; int32_t p0; int32_t p1; int32_t d0; int32_t d1; } ggml_metal_kargs_conv_2d; typedef struct { uint64_t ofs0; uint64_t ofs1; int32_t IW; int32_t IH; int32_t CHW; int32_t s0; int32_t s1; int32_t p0; int32_t p1; int32_t d0; int32_t d1; int32_t N; int32_t KH; int32_t KW; int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources } ggml_metal_kargs_im2col; typedef struct{ int32_t ne00; uint64_t nb01; int32_t ne10; uint64_t nb11; int32_t ne0; uint64_t nb1; int32_t i00; int32_t i10; float alpha; float limit; } ggml_metal_kargs_glu; typedef struct { uint64_t np; } ggml_metal_kargs_sum; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t ne0; int64_t ne1; int64_t ne2; int64_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_sum_rows; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t net0; int64_t net1; int64_t net2; int64_t net3; uint64_t nbt0; uint64_t nbt1; uint64_t nbt2; uint64_t nbt3; bool outb; } ggml_metal_kargs_cumsum_blk; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t net0; int64_t net1; int64_t net2; int64_t net3; uint64_t nbt0; uint64_t nbt1; uint64_t nbt2; uint64_t nbt3; } ggml_metal_kargs_cumsum_add; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne11; int32_t ne12; int32_t ne13; uint64_t nb11; uint64_t nb12; uint64_t nb13; uint64_t nb1; uint64_t nb2; uint64_t nb3; float scale; float max_bias; float m0; float m1; int32_t n_head_log2; } ggml_metal_kargs_soft_max; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; uint64_t nb00; uint64_t nb01; uint64_t nb02; int64_t ne10; int64_t ne11; uint64_t nb10; uint64_t nb11; int64_t ne0; int64_t ne1; int64_t ne2; uint64_t nb0; uint64_t nb1; uint64_t nb2; } ggml_metal_kargs_ssm_conv; typedef struct { int64_t d_state; int64_t d_inner; int64_t n_head; int64_t n_group; int64_t n_seq_tokens; int64_t n_seqs; uint64_t s_off; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t ns12; uint64_t nb13; uint64_t nb20; uint64_t nb21; uint64_t ns21; uint64_t nb22; int64_t ne30; uint64_t nb31; uint64_t nb41; uint64_t nb42; uint64_t ns42; uint64_t nb43; uint64_t nb51; uint64_t nb52; uint64_t ns52; uint64_t nb53; uint64_t nb0; } ggml_metal_kargs_ssm_scan; typedef struct { int32_t ne00t; int32_t ne00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne10; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_get_rows; typedef struct { int32_t nk0; int32_t ne01; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne11; int32_t ne12; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_set_rows; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t ne0; int64_t ne1; int64_t ne2; int64_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; float sf0; float sf1; float sf2; float sf3; } ggml_metal_kargs_upscale; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t ne0; int64_t ne1; int64_t ne2; int64_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_pad; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int64_t ne0; int64_t ne1; int64_t ne2; int64_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; int32_t p0; int32_t p1; } ggml_metal_kargs_pad_reflect_1d; typedef struct { uint64_t nb1; int dim; int max_period; } ggml_metal_kargs_timestep_embedding; typedef struct { float slope; } ggml_metal_kargs_leaky_relu; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; uint64_t nb0; uint64_t nb1; uint64_t nb2; uint64_t nb3; } ggml_metal_kargs_tri; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; int32_t top_k; } ggml_metal_kargs_argsort; typedef struct { int64_t ne00; int64_t ne01; int64_t ne02; int64_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; int32_t ne0; int32_t ne1; int32_t ne2; int32_t ne3; int32_t top_k; int32_t len; } ggml_metal_kargs_argsort_merge; typedef struct { int64_t ne0; float start; float step; } ggml_metal_kargs_arange; typedef struct { int64_t val; } ggml_metal_kargs_memset; typedef struct { int32_t ne00; int32_t ne01; int32_t ne02; int32_t ne03; uint64_t nb00; uint64_t nb01; uint64_t nb02; uint64_t nb03; uint64_t nb10; uint64_t nb11; uint64_t nb12; uint64_t nb13; } ggml_metal_kargs_count_equal; typedef struct { int32_t k0; int32_t k1; int32_t s0; int32_t s1; int32_t p0; int32_t p1; int64_t IH; int64_t IW; int64_t OH; int64_t OW; int64_t np; } ggml_metal_kargs_pool_2d; typedef struct { int64_t ne00; uint64_t nb01; } ggml_metal_kargs_argmax; typedef struct { int64_t np; } ggml_metal_kargs_opt_step_adamw; typedef struct { int64_t np; } ggml_metal_kargs_opt_step_sgd; #endif // GGML_METAL_IMPL ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-ops.cpp000066400000000000000000004264341512524704700225470ustar00rootroot00000000000000#include "ggml-metal-ops.h" #include "ggml.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-metal-impl.h" #include "ggml-metal-common.h" #include "ggml-metal-device.h" #include #include #include #include static ggml_metal_buffer_id ggml_metal_get_buffer_id(const ggml_tensor * t) { if (!t) { return { nullptr, 0 }; } ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer; ggml_metal_buffer_t ctx = (ggml_metal_buffer_t) buffer->context; return ggml_metal_buffer_get_id(ctx, t); } struct ggml_metal_op { ggml_metal_op( ggml_metal_device_t dev, ggml_metal_cmd_buf_t cmd_buf, ggml_cgraph * gf, int idx_start, int idx_end, bool use_fusion, bool use_concurrency, bool use_capture, int debug_graph, int debug_fusion) { this->dev = dev; this->lib = ggml_metal_device_get_library(dev); this->enc = ggml_metal_encoder_init(cmd_buf, use_concurrency); this->mem_ranges = ggml_mem_ranges_init(debug_graph); this->idx_start = idx_start; this->idx_end = idx_end; this->use_fusion = use_fusion; this->use_concurrency = use_concurrency; this->use_capture = use_capture; this->debug_graph = debug_graph; this->debug_fusion = debug_fusion; this->gf = gf; idxs.reserve(gf->n_nodes); // filter empty nodes // TODO: this can be removed when the allocator starts filtering them earlier // https://github.com/ggml-org/llama.cpp/pull/16130#issuecomment-3327905830 for (int i = idx_start; i < idx_end; i++) { if (!ggml_op_is_empty(gf->nodes[i]->op) && !ggml_is_empty(gf->nodes[i])) { idxs.push_back(i); } } } ~ggml_metal_op() { ggml_metal_encoder_end_encoding(this->enc); ggml_metal_encoder_free(this->enc); ggml_mem_ranges_free(this->mem_ranges); } int n_nodes() const { return idxs.size(); } ggml_tensor * node(int i) const { assert(i >= 0 && i < (int) idxs.size()); return ggml_graph_node(gf, idxs[i]); } bool can_fuse(int i0, const ggml_op * ops, int n_ops) const { assert(use_fusion); assert(i0 >= 0 && i0 < n_nodes()); if (i0 + n_ops > n_nodes()) { return false; } return ggml_can_fuse_ext(gf, idxs.data() + i0, ops, n_ops); } ggml_metal_device_t dev; ggml_metal_library_t lib; ggml_metal_encoder_t enc; ggml_mem_ranges_t mem_ranges; bool use_fusion; bool use_concurrency; bool use_capture; int debug_graph; int debug_fusion; private: ggml_cgraph * gf; int idx_start; int idx_end; // non-empty node indices std::vector idxs; }; ggml_metal_op_t ggml_metal_op_init( ggml_metal_device_t dev, ggml_metal_cmd_buf_t cmd_buf, ggml_cgraph * gf, int idx_start, int idx_end, bool use_fusion, bool use_concurrency, bool use_capture, int debug_graph, int debug_fusion) { ggml_metal_op_t res = new ggml_metal_op( dev, cmd_buf, gf, idx_start, idx_end, use_fusion, use_concurrency, use_capture, debug_graph, debug_fusion); return res; } void ggml_metal_op_free(ggml_metal_op_t ctx) { delete ctx; } int ggml_metal_op_n_nodes(ggml_metal_op_t ctx) { return ctx->n_nodes(); } static bool ggml_metal_op_concurrency_reset(ggml_metal_op_t ctx) { if (!ctx->mem_ranges) { return true; } ggml_metal_encoder_memory_barrier(ctx->enc); ggml_mem_ranges_reset(ctx->mem_ranges); return true; } static bool ggml_metal_op_concurrency_check(ggml_metal_op_t ctx, const ggml_tensor * node) { if (!ctx->mem_ranges) { return false; } return ggml_mem_ranges_check(ctx->mem_ranges, node); } static bool ggml_metal_op_concurrency_add(ggml_metal_op_t ctx, const ggml_tensor * node) { if (!ctx->mem_ranges) { return true; } return ggml_mem_ranges_add(ctx->mem_ranges, node); } static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) { struct ggml_tensor * node = ctx->node(idx); //GGML_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, idx, ggml_op_name(node->op)); if (ggml_is_empty(node)) { return 1; } switch (node->op) { case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_TRANSPOSE: case GGML_OP_PERMUTE: { // noop -> next node if (ctx->debug_graph > 0) { GGML_LOG_DEBUG("%s: node[%5d] - %-12s %s\n", __func__, idx, ggml_op_name(node->op), "(noop)"); } } return 1; default: { } break; } if (!ggml_metal_device_supports_op(ctx->dev, node)) { GGML_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(node)); GGML_ABORT("unsupported op"); } int n_fuse = 1; // check if the current node can run concurrently with other nodes before it // the condition is that: // - the current node cannot write to any previous src or dst ranges // - the current node cannot read from any previous dst ranges // // if the condition is not satisfied, we put a memory barrier and clear all ranges // otherwise, we add the new ranges to the encoding context and process the node concurrently // { const bool is_concurrent = ggml_metal_op_concurrency_check(ctx, node); if (!is_concurrent) { ggml_metal_op_concurrency_reset(ctx); } if (ctx->debug_graph > 0) { GGML_LOG_DEBUG("%s: node[%5d] - %-12s %-12s %s\n", __func__, idx, ggml_op_name(node->op), ggml_get_name(node), is_concurrent ? "(concurrent)" : ""); } if (ctx->debug_graph > 1) { GGML_TENSOR_LOCALS( int64_t, ne0, node->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, node->src[0], nb); GGML_TENSOR_LOCALS( int64_t, ne1, node->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, node->src[1], nb); GGML_TENSOR_LOCALS( int64_t, ne2, node->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, node->src[2], nb); GGML_TENSOR_LOCALS( int64_t, ne3, node->src[3], ne); GGML_TENSOR_LOCALS(uint64_t, nb3, node->src[3], nb); GGML_TENSOR_LOCALS( int64_t, ne, node, ne); GGML_TENSOR_LOCALS(uint64_t, nb, node, nb); if (node->src[0]) { GGML_LOG_DEBUG("%s: src0 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[0]->type), ne00, ne01, ne02, ne03, nb00, nb01, nb02, nb03, ggml_is_contiguous(node->src[0]), node->src[0]->name); } if (node->src[1]) { GGML_LOG_DEBUG("%s: src1 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[1]->type), ne10, ne11, ne12, ne13, nb10, nb11, nb12, nb13, ggml_is_contiguous(node->src[1]), node->src[1]->name); } if (node->src[2]) { GGML_LOG_DEBUG("%s: src2 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[2]->type), ne20, ne21, ne22, ne23, nb20, nb21, nb22, nb23, ggml_is_contiguous(node->src[2]), node->src[2]->name); } if (node->src[3]) { GGML_LOG_DEBUG("%s: src3 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[3]->type), ne30, ne31, ne32, ne33, nb30, nb31, nb32, nb33, ggml_is_contiguous(node->src[3]), node->src[3]->name); } if (node) { GGML_LOG_DEBUG("%s: node - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(node->type), ne0, ne1, ne2, ne3, nb0, nb1, nb2, nb3, node->name); } } } switch (node->op) { case GGML_OP_CONCAT: { n_fuse = ggml_metal_op_concat(ctx, idx); } break; case GGML_OP_ADD: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: { n_fuse = ggml_metal_op_bin(ctx, idx); } break; case GGML_OP_ADD_ID: { n_fuse = ggml_metal_op_add_id(ctx, idx); } break; case GGML_OP_REPEAT: { n_fuse = ggml_metal_op_repeat(ctx, idx); } break; case GGML_OP_ACC: { n_fuse = ggml_metal_op_acc(ctx, idx); } break; case GGML_OP_SCALE: { n_fuse = ggml_metal_op_scale(ctx, idx); } break; case GGML_OP_FILL: { n_fuse = ggml_metal_op_fill(ctx, idx); } break; case GGML_OP_CLAMP: { n_fuse = ggml_metal_op_clamp(ctx, idx); } break; case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: case GGML_OP_LOG: case GGML_OP_UNARY: { n_fuse = ggml_metal_op_unary(ctx, idx); } break; case GGML_OP_GLU: { n_fuse = ggml_metal_op_glu(ctx, idx); } break; case GGML_OP_SUM: { n_fuse = ggml_metal_op_sum(ctx, idx); } break; case GGML_OP_SUM_ROWS: case GGML_OP_MEAN: { n_fuse = ggml_metal_op_sum_rows(ctx, idx); } break; case GGML_OP_CUMSUM: { n_fuse = ggml_metal_op_cumsum(ctx, idx); } break; case GGML_OP_SOFT_MAX: { n_fuse = ggml_metal_op_soft_max(ctx, idx); } break; case GGML_OP_SSM_CONV: { n_fuse = ggml_metal_op_ssm_conv(ctx, idx); } break; case GGML_OP_SSM_SCAN: { n_fuse = ggml_metal_op_ssm_scan(ctx, idx); } break; case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: { n_fuse = ggml_metal_op_rwkv(ctx, idx); } break; case GGML_OP_MUL_MAT: { n_fuse = ggml_metal_op_mul_mat(ctx, idx); } break; case GGML_OP_MUL_MAT_ID: { n_fuse = ggml_metal_op_mul_mat_id(ctx, idx); } break; case GGML_OP_GET_ROWS: { n_fuse = ggml_metal_op_get_rows(ctx, idx); } break; case GGML_OP_SET_ROWS: { n_fuse = ggml_metal_op_set_rows(ctx, idx); } break; case GGML_OP_L2_NORM: { n_fuse = ggml_metal_op_l2_norm(ctx, idx); } break; case GGML_OP_GROUP_NORM: { n_fuse = ggml_metal_op_group_norm(ctx, idx); } break; case GGML_OP_NORM: case GGML_OP_RMS_NORM: { n_fuse = ggml_metal_op_norm(ctx, idx); } break; case GGML_OP_ROPE: { n_fuse = ggml_metal_op_rope(ctx, idx); } break; case GGML_OP_IM2COL: { n_fuse = ggml_metal_op_im2col(ctx, idx); } break; case GGML_OP_CONV_2D: { n_fuse = ggml_metal_op_conv_2d(ctx, idx); } break; case GGML_OP_CONV_TRANSPOSE_1D: { n_fuse = ggml_metal_op_conv_transpose_1d(ctx, idx); } break; case GGML_OP_CONV_TRANSPOSE_2D: { n_fuse = ggml_metal_op_conv_transpose_2d(ctx, idx); } break; case GGML_OP_UPSCALE: { n_fuse = ggml_metal_op_upscale(ctx, idx); } break; case GGML_OP_PAD: { n_fuse = ggml_metal_op_pad(ctx, idx); } break; case GGML_OP_PAD_REFLECT_1D: { n_fuse = ggml_metal_op_pad_reflect_1d(ctx, idx); } break; case GGML_OP_ARANGE: { n_fuse = ggml_metal_op_arange(ctx, idx); } break; case GGML_OP_TIMESTEP_EMBEDDING: { n_fuse = ggml_metal_op_timestep_embedding(ctx, idx); } break; case GGML_OP_ARGSORT: { n_fuse = ggml_metal_op_argsort(ctx, idx); } break; case GGML_OP_TOP_K: { n_fuse = ggml_metal_op_top_k(ctx, idx); } break; case GGML_OP_LEAKY_RELU: { n_fuse = ggml_metal_op_leaky_relu(ctx, idx); } break; case GGML_OP_TRI: { n_fuse = ggml_metal_op_tri(ctx, idx); } break; case GGML_OP_FLASH_ATTN_EXT: { n_fuse = ggml_metal_op_flash_attn_ext(ctx, idx); } break; case GGML_OP_DUP: case GGML_OP_CPY: case GGML_OP_CONT: { n_fuse = ggml_metal_op_cpy(ctx, idx); } break; case GGML_OP_POOL_2D: { n_fuse = ggml_metal_op_pool_2d(ctx, idx); } break; case GGML_OP_ARGMAX: { n_fuse = ggml_metal_op_argmax(ctx, idx); } break; case GGML_OP_OPT_STEP_ADAMW: { n_fuse = ggml_metal_op_opt_step_adamw(ctx, idx); } break; case GGML_OP_OPT_STEP_SGD: { n_fuse = ggml_metal_op_opt_step_sgd(ctx, idx); } break; case GGML_OP_COUNT_EQUAL: { n_fuse = ggml_metal_op_count_equal(ctx, idx); } break; default: { GGML_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(node->op)); GGML_ABORT("fatal error"); } } if (ctx->debug_graph > 0) { if (n_fuse > 1) { GGML_LOG_DEBUG("%s: fuse %d ops\n", __func__, n_fuse); } } // update the mem ranges in the encoding context for (int i = 0; i < n_fuse; ++i) { if (!ggml_metal_op_concurrency_add(ctx, ctx->node(idx + i))) { ggml_metal_op_concurrency_reset(ctx); } } return n_fuse; } int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx) { if (ctx->use_capture) { ggml_metal_encoder_debug_group_push(ctx->enc, ggml_op_desc(ctx->node(idx))); } int res = ggml_metal_op_encode_impl(ctx, idx); if (idx + res > ctx->n_nodes()) { GGML_ABORT("fusion error: nodes spanning multiple encoders have been fused. this indicates a bug in the fusion logic %s", "https://github.com/ggml-org/llama.cpp/pull/14849"); } if (ctx->use_capture) { ggml_metal_encoder_debug_group_pop(ctx->enc); } return res; } int ggml_metal_op_concat(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t dim = ((const int32_t *) op->op_params)[0]; ggml_metal_kargs_concat args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.ne13 =*/ ne13, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.dim =*/ dim, }; auto pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_CONCAT); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); const int nth = std::min(1024, ne0); ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1); return 1; } int ggml_metal_op_repeat(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_repeat(lib, op->type); ggml_metal_kargs_repeat args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0); ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1); return 1; } int ggml_metal_op_acc(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(ggml_is_contiguous(op->src[1])); const size_t pnb1 = ((const int32_t *) op->op_params)[0]; const size_t pnb2 = ((const int32_t *) op->op_params)[1]; const size_t pnb3 = ((const int32_t *) op->op_params)[2]; const size_t offs = ((const int32_t *) op->op_params)[3]; const bool inplace = (bool) ((const int32_t *) op->op_params)[4]; if (!inplace) { // run a separete kernel to cpy src->dst // not sure how to avoid this // TODO: make a simpler cpy_bytes kernel //const id pipeline = ctx->pipelines[GGML_METAL_PIPELINE_TYPE_CPY_F32_F32].obj; auto pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type); ggml_metal_kargs_cpy args = { /*.nk0 =*/ ne00, /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); ggml_metal_op_concurrency_reset(ctx); } ggml_metal_kargs_bin args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ pnb1, /*.nb02 =*/ pnb2, /*.nb03 =*/ pnb3, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.ne13 =*/ ne13, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ pnb1, /*.nb2 =*/ pnb2, /*.nb3 =*/ pnb3, /*.offs =*/ offs, /*.o1 =*/ { 0 }, }; auto pipeline = ggml_metal_library_get_pipeline_bin(lib, GGML_OP_ADD, 1, false); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00); ggml_metal_encoder_dispatch_threadgroups(enc, ne11, ne12, ne13, nth, 1, 1); return 1; } int ggml_metal_op_scale(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float scale; float bias; memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(float)); memcpy(&bias, ((const int32_t *) op->op_params) + 1, sizeof(float)); ggml_metal_kargs_scale args = { /*.scale =*/ scale, /*.bias =*/ bias, }; int64_t n = ggml_nelements(op); if (n % 4 == 0) { n /= 4; } auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); return 1; } int ggml_metal_op_fill(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const float val = ggml_get_op_params_f32(op, 0); ggml_metal_kargs_fill args = { /*.val =*/ val }; int64_t n = ggml_nelements(op); if (n % 4 == 0) { n /= 4; } auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); return 1; } int ggml_metal_op_clamp(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float min; float max; memcpy(&min, ((const int32_t *) op->op_params) + 0, sizeof(float)); memcpy(&max, ((const int32_t *) op->op_params) + 1, sizeof(float)); ggml_metal_kargs_clamp args = { /*.min =*/ min, /*.max =*/ max, }; int64_t n = ggml_nelements(op); if (n % 4 == 0) { n /= 4; } auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); return 1; } int ggml_metal_op_unary(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); int64_t n = ggml_nelements(op); if (n % 4 == 0) { n /= 4; } auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 1); ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); return 1; } int ggml_metal_op_glu(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); if (op->src[1]) { GGML_ASSERT(ggml_are_same_shape(op->src[0], op->src[1])); } auto pipeline = ggml_metal_library_get_pipeline_glu(lib, op); const int32_t swp = ggml_get_op_params_i32(op, 1); const float alpha = ggml_get_op_params_f32(op, 2); const float limit = ggml_get_op_params_f32(op, 3); const int32_t i00 = swp ? ne0 : 0; const int32_t i10 = swp ? 0 : ne0; ggml_metal_kargs_glu args = { /*.ne00 =*/ ne00, /*.nb01 =*/ nb01, /*.ne10 =*/ op->src[1] ? ne10 : ne00, /*.nb11 =*/ op->src[1] ? nb11 : nb01, /*.ne0 =*/ ne0, /*.nb1 =*/ nb1, /*.i00 =*/ op->src[1] ? 0 : i00, /*.i10 =*/ op->src[1] ? 0 : i10, /*.alpha=*/ alpha, /*.limit=*/ limit }; const int64_t nrows = ggml_nrows(op->src[0]); const int32_t nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00/2); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); if (op->src[1]) { ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); } else { ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 2); } ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_sum(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const uint64_t n = (uint64_t) ggml_nelements(op->src[0]); ggml_metal_kargs_sum args = { /*.np =*/ n, }; auto pipeline = ggml_metal_library_get_pipeline_sum(lib, op); int nth = 32; // SIMD width while (nth < (int) n && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); nth = std::min(nth, (int) n); const int nsg = (nth + 31) / 32; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, nsg * sizeof(float), 0); ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_sum_rows(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_sum_rows args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; auto pipeline = ggml_metal_library_get_pipeline_sum_rows(lib, op); int nth = 32; // SIMD width while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); nth = std::min(nth, ne00); const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); return 1; } int ggml_metal_op_cumsum(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline_blk = ggml_metal_library_get_pipeline_cumsum_blk(lib, op); int nth = 1; while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_blk)) { nth *= 2; } GGML_ASSERT(ne00 <= nth*nth); const int64_t net0 = (ne00 + nth - 1) / nth; const int64_t net1 = ne01; const int64_t net2 = ne02; const int64_t net3 = ne03; const uint64_t nbt0 = sizeof(float); const uint64_t nbt1 = net0*nbt0; const uint64_t nbt2 = net1*nbt1; const uint64_t nbt3 = net2*nbt2; const size_t smem = GGML_PAD(32*sizeof(float), 16); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_buffer_id bid_tmp = bid_dst; bid_tmp.offs += ggml_nbytes(op); { ggml_metal_kargs_cumsum_blk args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.net0 =*/ net0, /*.net1 =*/ net1, /*.net2 =*/ net2, /*.net3 =*/ net3, /*.nbt0 =*/ nbt0, /*.nbt1 =*/ nbt1, /*.nbt2 =*/ nbt2, /*.nbt3 =*/ nbt3, /*.outb =*/ ne00 > nth, }; ggml_metal_encoder_set_pipeline(enc, pipeline_blk); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_tmp, 2); ggml_metal_encoder_set_buffer (enc, bid_dst, 3); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, net0*ne01, ne02, ne03, nth, 1, 1); } if (ne00 > nth) { ggml_metal_op_concurrency_reset(ctx); { ggml_metal_kargs_cumsum_blk args = { /*.ne00 =*/ net0, /*.ne01 =*/ net1, /*.ne02 =*/ net2, /*.ne03 =*/ net3, /*.nb00 =*/ nbt0, /*.nb01 =*/ nbt1, /*.nb02 =*/ nbt2, /*.nb03 =*/ nbt3, /*.net0 =*/ net0, /*.net1 =*/ net1, /*.net2 =*/ net2, /*.net3 =*/ net3, /*.nbt0 =*/ nbt0, /*.nbt1 =*/ nbt1, /*.nbt2 =*/ nbt2, /*.nbt3 =*/ nbt3, /*.outb =*/ false, }; ggml_metal_encoder_set_pipeline(enc, pipeline_blk); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_tmp, 1); ggml_metal_encoder_set_buffer (enc, bid_tmp, 2); ggml_metal_encoder_set_buffer (enc, bid_tmp, 3); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, net1, net2, net3, nth, 1, 1); } ggml_metal_op_concurrency_reset(ctx); { auto pipeline_add = ggml_metal_library_get_pipeline_cumsum_add(lib, op); ggml_metal_kargs_cumsum_add args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.net0 =*/ net0, /*.net1 =*/ net1, /*.net2 =*/ net2, /*.net3 =*/ net3, /*.nbt0 =*/ nbt0, /*.nbt1 =*/ nbt1, /*.nbt2 =*/ nbt2, /*.nbt3 =*/ nbt3, }; ggml_metal_encoder_set_pipeline(enc, pipeline_add); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_tmp, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_dispatch_threadgroups(enc, net0*ne01, ne02, ne03, nth, 1, 1); } } return 1; } int ggml_metal_op_get_rows(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_get_rows(lib, op->src[0]->type); ggml_metal_kargs_get_rows args = { /*.ne00t =*/ ggml_is_quantized(op->src[0]->type) ? ne00/16 : ne00, /*.ne00 =*/ ne00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne10 =*/ ne10, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; const int nth = std::min(args.ne00t, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); const int nw0 = (args.ne00t + nth - 1)/nth; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, nw0*ne10, ne11, ne12, nth, 1, 1); return 1; } int ggml_metal_op_set_rows(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_set_rows(lib, op->src[1]->type, op->type); const int32_t nk0 = ne0/ggml_blck_size(op->type); int nth = 32; // SIMD width while (nth < nk0 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } int nrptg = 1; if (nth > nk0) { nrptg = (nth + nk0 - 1)/nk0; nth = nk0; if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nrptg--; } } nth = std::min(nth, nk0); ggml_metal_kargs_set_rows args = { /*.nk0 =*/ nk0, /*.ne01 =*/ ne01, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nrptg - 1)/nrptg, ne02, ne03, nth, nrptg, 1); return 1; } int ggml_metal_op_soft_max(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float scale; float max_bias; memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(scale)); memcpy(&max_bias, ((const int32_t *) op->op_params) + 1, sizeof(max_bias)); const uint32_t n_head = op->src[0]->ne[2]; const int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); // softmax ggml_metal_kargs_soft_max args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.ne13 =*/ ne13, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.scale =*/ scale, /*.max_bias =*/ max_bias, /*.m0 =*/ m0, /*.m1 =*/ m1, /*.n_head_log2 =*/ n_head_log2, }; auto pipeline = ggml_metal_library_get_pipeline_soft_max(lib, op); int nth = 32; // SIMD width if (ne00%4 == 0) { while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } } else { while (nth < ne00 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } } const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1); if (op->src[1]) { ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2); } else { ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 2); } if (op->src[2]) { ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[2]), 3); } else { ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 3); } ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 4); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); return 1; } int ggml_metal_op_ssm_conv(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_ssm_conv args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, }; // Use batched kernel for prefill (ne1 > 1) to reduce threadgroup dispatch overhead const bool use_batched = (ne1 > 1); if (use_batched) { // Determine the smallest power of 2 that's >= ne1, but <= 256 int BATCH_SIZE; if (ne1 > 128) BATCH_SIZE = 256; else if (ne1 > 64 ) BATCH_SIZE = 128; else if (ne1 > 32 ) BATCH_SIZE = 64; else if (ne1 > 16 ) BATCH_SIZE = 32; else if (ne1 > 8 ) BATCH_SIZE = 16; else if (ne1 > 4 ) BATCH_SIZE = 8; else BATCH_SIZE = 2; auto pipeline = ggml_metal_library_get_pipeline_ssm_conv_batched(lib, op, BATCH_SIZE); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3); // Dispatch: ne01 rows, ceil(ne1/BATCH_SIZE) token batches, ne02 sequences // Each threadgroup has BATCH_SIZE threads, each handling one token const int n_token_batches = (ne1 + BATCH_SIZE - 1) / BATCH_SIZE; ggml_metal_encoder_dispatch_threadgroups(enc, ne01, n_token_batches, ne02, BATCH_SIZE, 1, 1); } else { auto pipeline = ggml_metal_library_get_pipeline_ssm_conv(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne1, ne02, 1, 1, 1); } return 1; } int ggml_metal_op_ssm_scan(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne); GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb); GGML_TENSOR_LOCALS( int32_t, ne4, op->src[4], ne); GGML_TENSOR_LOCALS(uint64_t, nb4, op->src[4], nb); GGML_TENSOR_LOCALS( int32_t, ne5, op->src[5], ne); GGML_TENSOR_LOCALS(uint64_t, nb5, op->src[5], nb); GGML_TENSOR_LOCALS( int32_t, ne6, op->src[6], ne); GGML_TENSOR_LOCALS(uint64_t, nb6, op->src[6], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const ggml_tensor * src3 = op->src[3]; const ggml_tensor * src4 = op->src[4]; const ggml_tensor * src5 = op->src[5]; const ggml_tensor * src6 = op->src[6]; GGML_ASSERT(src3); GGML_ASSERT(src4); GGML_ASSERT(src5); GGML_ASSERT(src6); const int64_t d_state = ne00; const int64_t d_inner = ne01; const int64_t n_head = ne02; const int64_t n_group = ne41; const int64_t n_seq_tokens = ne12; const int64_t n_seqs = ne13; ggml_metal_kargs_ssm_scan args = { /*.d_state =*/ d_state, /*.d_inner =*/ d_inner, /*.n_head =*/ n_head, /*.n_group =*/ n_group, /*.n_seq_tokens =*/ n_seq_tokens, /*.n_seqs =*/ n_seqs, /*.s_off =*/ ggml_nelements(op->src[1]) * sizeof(float), /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.ns12 =*/ nb12/nb10, /*.nb13 =*/ nb13, /*.nb20 =*/ nb20, /*.nb21 =*/ nb21, /*.ns21 =*/ nb21/nb20, /*.nb22 =*/ nb22, /*.ne30 =*/ ne30, /*.nb31 =*/ nb31, /*.nb41 =*/ nb41, /*.nb42 =*/ nb42, /*.ns42 =*/ nb42/nb40, /*.nb43 =*/ nb43, /*.nb51 =*/ nb51, /*.nb52 =*/ nb52, /*.ns52 =*/ nb52/nb50, /*.nb53 =*/ nb53, /*.nb0 =*/ nb0, }; auto pipeline = ggml_metal_library_get_pipeline_ssm_scan(lib, op); GGML_ASSERT(d_state <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), 4); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), 5); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[5]), 6); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[6]), 7); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 8); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, d_inner, n_head, n_seqs, d_state, 1, 1); return 1; } int ggml_metal_op_rwkv(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int64_t B = op->op == GGML_OP_RWKV_WKV6 ? op->src[5]->ne[1] : op->src[6]->ne[1]; const int64_t T = op->src[0]->ne[2]; const int64_t C = op->ne[0]; const int64_t H = op->src[0]->ne[1]; auto pipeline = ggml_metal_library_get_pipeline_rwkv(lib, op); int ida = 0; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[5]), ida++); if (op->op == GGML_OP_RWKV_WKV7) { ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[6]), ida++); } ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), ida++); ggml_metal_encoder_set_bytes (enc, (void *) &B, sizeof(B), ida++); ggml_metal_encoder_set_bytes (enc, (void *) &T, sizeof(T), ida++); ggml_metal_encoder_set_bytes (enc, (void *) &C, sizeof(C), ida++); ggml_metal_encoder_set_bytes (enc, (void *) &H, sizeof(H), ida++); ggml_metal_encoder_dispatch_threadgroups(enc, B * H, 1, 1, C/H, 1, 1); return 1; } int ggml_metal_op_cpy(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type); GGML_ASSERT(ne00 % ggml_blck_size(op->src[0]->type) == 0); int64_t nk0 = ne00; if (ggml_is_quantized(op->src[0]->type)) { nk0 = ne00/16; } else if (ggml_is_quantized(op->type)) { nk0 = ne00/ggml_blck_size(op->type); } int nth = std::min(nk0, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); // when rows are small, we can batch them together in a single threadgroup int nrptg = 1; // TODO: relax this constraint in the future if (ggml_blck_size(op->src[0]->type) == 1 && ggml_blck_size(op->type) == 1) { if (nth > nk0) { nrptg = (nth + nk0 - 1)/nk0; nth = nk0; if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nrptg--; } } } nth = std::min(nth, nk0); ggml_metal_kargs_cpy args = { /*.nk0 =*/ nk0, /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; const int nw0 = nrptg == 1 ? (nk0 + nth - 1)/nth : 1; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, nw0*(ne01 + nrptg - 1)/nrptg, ne02, ne03, nth, nrptg, 1); return 1; } int ggml_metal_op_pool_2d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t * opts = op->op_params; ggml_op_pool op_pool = (ggml_op_pool) opts[0]; const int32_t k0 = opts[1]; const int32_t k1 = opts[2]; const int32_t s0 = opts[3]; const int32_t s1 = opts[4]; const int32_t p0 = opts[5]; const int32_t p1 = opts[6]; const int64_t IH = op->src[0]->ne[1]; const int64_t IW = op->src[0]->ne[0]; const int64_t N = op->ne[3]; const int64_t OC = op->ne[2]; const int64_t OH = op->ne[1]; const int64_t OW = op->ne[0]; const int64_t np = N * OC * OH * OW; ggml_metal_kargs_pool_2d args_pool_2d = { /* .k0 = */ k0, /* .k1 = */ k1, /* .s0 = */ s0, /* .s1 = */ s1, /* .p0 = */ p0, /* .p1 = */ p1, /* .IH = */ IH, /* .IW = */ IW, /* .OH = */ OH, /* .OW = */ OW, /* .np = */ np }; auto pipeline = ggml_metal_library_get_pipeline_pool_2d(lib, op, op_pool); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), (int) np); const int ntg = (np + nth - 1) / nth; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args_pool_2d, sizeof(args_pool_2d), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ntg, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_mul_mat(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); GGML_ASSERT(ne00 == ne10); GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); const int16_t r2 = ne12/ne02; const int16_t r3 = ne13/ne03; // find the break-even point where the matrix-matrix kernel becomes more efficient compared // to the matrix-vector kernel const int ne11_mm_min = 8; // first try to use small-batch mat-mv kernels // these should be efficient for BS [2, ~8] if (op->src[1]->type == GGML_TYPE_F32 && (ne00%128 == 0) && ( ( ( op->src[0]->type == GGML_TYPE_F32 || // TODO: helper function op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_Q4_1 || op->src[0]->type == GGML_TYPE_Q5_0 || op->src[0]->type == GGML_TYPE_Q5_1 || op->src[0]->type == GGML_TYPE_Q8_0 || op->src[0]->type == GGML_TYPE_MXFP4 || op->src[0]->type == GGML_TYPE_IQ4_NL || false) && (ne11 >= 2 && ne11 <= 8) ) || ( ( op->src[0]->type == GGML_TYPE_Q4_K || op->src[0]->type == GGML_TYPE_Q5_K || op->src[0]->type == GGML_TYPE_Q6_K || false) && (ne11 >= 4 && ne11 <= 8) ) ) ) { // TODO: determine the optimal parameters based on grid utilization // I still don't know why we should not always use the maximum available threads: // // nsg = pipeline.maxTotalThreadsPerThreadgroup / 32 // // my current hypothesis is that the work grid is not evenly divisible for different nsg // values and there can be some tail effects when nsg is high. need to confirm this // const int nsg = 2; // num simdgroups per threadgroup // num threads along row per simdgroup int16_t nxpsg = 0; if (ne00 % 256 == 0 && ne11 < 3) { nxpsg = 16; } else if (ne00 % 128 == 0) { nxpsg = 8; } else { nxpsg = 4; } const int16_t nypsg = 32/nxpsg; // num threads along col per simdgroup (i.e. a simdgroup processes that many src0 rows at a time) const int16_t r0ptg = nypsg*nsg; // num src0 rows per threadgroup int16_t r1ptg = 4; // num src1 rows per threadgroup // note: not sure how optimal are those across all different hardware. there might be someting cleverer switch (ne11) { case 2: r1ptg = 2; break; case 3: case 6: r1ptg = 3; break; case 4: case 7: case 8: r1ptg = 4; break; case 5: r1ptg = 5; break; default: GGML_ABORT("unsupported ne11"); }; auto pipeline = ggml_metal_library_get_pipeline_mul_mv_ext(lib, op->src[0]->type, op->src[1]->type, nsg, nxpsg, r1ptg); ggml_metal_kargs_mul_mv_ext args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.r2 =*/ r2, /*.r3 =*/ r3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + r0ptg - 1)/r0ptg), ((ne11 + r1ptg - 1)/r1ptg), ne12*ne13, 32, nsg, 1); } else if ( !ggml_is_transposed(op->src[0]) && !ggml_is_transposed(op->src[1]) && // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel props_dev->has_simdgroup_mm && ne00 >= 64 && ne11 > ne11_mm_min) { //GGML_LOG_INFO("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); // some Metal matrix data types require aligned pointers // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5) //switch (op->src[0]->type) { // case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break; // case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break; // case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break; // default: break; //} auto pipeline = ggml_metal_library_get_pipeline_mul_mm(lib, op); ggml_metal_kargs_mul_mm args = { /*.ne00 =*/ ne00, /*.ne02 =*/ ne02, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne12 =*/ ne12, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.r2 =*/ r2, /*.r3 =*/ r3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); const size_t smem = pipeline.smem; ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ((ne11 + 31)/32), ((ne01 + 63)/64), ne12*ne13, 128, 1, 1); } else { auto pipeline = ggml_metal_library_get_pipeline_mul_mv(lib, op); const int nr0 = pipeline.nr0; const int nr1 = pipeline.nr1; const int nsg = pipeline.nsg; const size_t smem = pipeline.smem; ggml_metal_kargs_mul_mv args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.nr0 =*/ nr0, /*.r2 =*/ r2, /*.r3 =*/ r3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); if (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_BF16 || op->src[0]->type == GGML_TYPE_Q8_0) { ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0 - 1)/(nr0)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1); } else { ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0*nsg - 1)/(nr0*nsg)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1); } } return 1; } size_t ggml_metal_op_mul_mat_id_extra_tpe(const ggml_tensor * op) { assert(op->op == GGML_OP_MUL_MAT_ID); const int64_t ne02 = op->src[0]->ne[2]; // n_expert return ggml_type_size(GGML_TYPE_I32)*ne02; } size_t ggml_metal_op_mul_mat_id_extra_ids(const ggml_tensor * op) { assert(op->op == GGML_OP_MUL_MAT_ID); const int64_t ne02 = op->src[0]->ne[2]; // n_expert const int64_t ne21 = op->src[2]->ne[1]; // n_token return ggml_type_size(GGML_TYPE_I32)*ne02*ne21; } int ggml_metal_op_mul_mat_id(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); // src2 = ids GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32); GGML_ASSERT(!ggml_is_transposed(op->src[0])); GGML_ASSERT(!ggml_is_transposed(op->src[1])); GGML_ASSERT(ne03 == 1); GGML_ASSERT(ne13 == 1); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]); ggml_metal_buffer_id bid_src2 = ggml_metal_get_buffer_id(op->src[2]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); const uint32_t r2 = 1; const uint32_t r3 = 1; // find the break-even point where the matrix-matrix kernel becomes more efficient compared // to the matrix-vector kernel // ne20 = n_used_experts // ne21 = n_rows (batch size) const int ne21_mm_id_min = 32; if (props_dev->has_simdgroup_mm && ne00 >= 64 && (ne21 >= ne21_mm_id_min)) { // some Metal matrix data types require aligned pointers // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5) //switch (op->src[0]->type) { // case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break; // case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break; // case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break; // default: break; //} // extra buffers for intermediate id mapping ggml_metal_buffer_id bid_tpe = bid_dst; bid_tpe.offs += ggml_nbytes(op); ggml_metal_buffer_id bid_ids = bid_tpe; bid_ids.offs += ggml_metal_op_mul_mat_id_extra_tpe(op); { ggml_metal_kargs_mul_mm_id_map0 args = { ne02, ne10, ne11, // n_expert_used (bcast) nb11, nb12, ne21, // n_tokens ne20, // n_expert_used nb21, }; auto pipeline = ggml_metal_library_get_pipeline_mul_mm_id_map0(lib, ne02, ne20); const size_t smem = pipeline.smem; GGML_ASSERT(ne02 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src2, 1); ggml_metal_encoder_set_buffer (enc, bid_tpe, 2); ggml_metal_encoder_set_buffer (enc, bid_ids, 3); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, ne02, 1, 1); } // this barrier is always needed because the next kernel has to wait for the id maps to be computed ggml_metal_op_concurrency_reset(ctx); { auto pipeline = ggml_metal_library_get_pipeline_mul_mm_id(lib, op); ggml_metal_kargs_mul_mm_id args = { /*.ne00 =*/ ne00, /*.ne02 =*/ ne02, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne11 =*/ ne11, // n_expert_used (bcast) /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne20 =*/ ne20, // n_expert_used /*.ne21 =*/ ne21, // n_tokens /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.r2 =*/ r2, /*.r3 =*/ r3, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_src1, 2); ggml_metal_encoder_set_buffer (enc, bid_tpe, 3); ggml_metal_encoder_set_buffer (enc, bid_ids, 4); ggml_metal_encoder_set_buffer (enc, bid_dst, 5); const size_t smem = pipeline.smem; ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, (ne21 + 31)/32, (ne01 + 63)/64, ne02, 128, 1, 1); } } else { auto pipeline = ggml_metal_library_get_pipeline_mul_mv_id(lib, op); const int nr0 = pipeline.nr0; const int nr1 = pipeline.nr1; const int nsg = pipeline.nsg; const size_t smem = pipeline.smem; ggml_metal_kargs_mul_mv_id args = { /*.nei0 =*/ ne20, /*.nei1 =*/ ne21, /*.nbi1 =*/ nb21, /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.ne13 =*/ ne13, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.nb1 =*/ nb1, /*.nr0 =*/ nr0, }; if (ggml_is_quantized(op->src[0]->type)) { GGML_ASSERT(ne00 >= nsg*nr0); } ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, bid_src0, 1); ggml_metal_encoder_set_buffer(enc, bid_src1, 2); ggml_metal_encoder_set_buffer(enc, bid_dst, 3); ggml_metal_encoder_set_buffer(enc, bid_src2, 4); const int64_t _ne1 = 1; const int64_t ne123 = ne20*ne21; ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); if (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_BF16 || op->src[0]->type == GGML_TYPE_Q8_0) { ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0 - 1)/(nr0), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1); } else { ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0*nsg - 1)/(nr0*nsg), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1); } } return 1; } int ggml_metal_op_add_id(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32); GGML_ASSERT(op->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); ggml_metal_kargs_add_id args = { /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb11 =*/ nb11, /*.nb21 =*/ nb21, }; auto pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_ADD_ID); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 4); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, 1, nth, 1, 1); return 1; } bool ggml_metal_op_flash_attn_ext_use_vec(const ggml_tensor * op) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); const int64_t ne00 = op->src[0]->ne[0]; // head size const int64_t ne01 = op->src[0]->ne[1]; // batch size // use vec kernel if the batch size is small and if the head size is supported return (ne01 < 20) && (ne00 % 32 == 0); } size_t ggml_metal_op_flash_attn_ext_extra_pad(const ggml_tensor * op) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne); GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb); size_t res = 0; const bool has_mask = op->src[3] != nullptr; if (ggml_metal_op_flash_attn_ext_use_vec(op)) { // note: always reserve the padding space to avoid graph reallocations //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_VEC_NCPSG != 0; const bool has_kvpad = true; if (has_kvpad) { res += OP_FLASH_ATTN_EXT_VEC_NCPSG*( nb11*ne12*ne13 + nb21*ne22*ne23 + (has_mask ? ggml_type_size(GGML_TYPE_F16)*ne31*ne32*ne33 : 0)); } } else { //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_NCPSG != 0; const bool has_kvpad = true; if (has_kvpad) { res += OP_FLASH_ATTN_EXT_NCPSG*( nb11*ne12*ne13 + nb21*ne22*ne23 + (has_mask ? ggml_type_size(GGML_TYPE_F16)*ne31*ne32*ne33 : 0)); } } return res; } size_t ggml_metal_op_flash_attn_ext_extra_blk(const ggml_tensor * op) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); //GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); //GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); //GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); //GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); //GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne); GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb); size_t res = 0; const bool has_mask = op->src[3] != nullptr; if (!has_mask) { return res; } const bool is_vec = ggml_metal_op_flash_attn_ext_use_vec(op); // this optimization is not useful for the vector kernels // note: always reserve the blk buffer to avoid graph reallocations //if (is_vec) { // return res; //} const int nqptg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NQPTG : OP_FLASH_ATTN_EXT_NQPTG; const int ncpsg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NCPSG : OP_FLASH_ATTN_EXT_NCPSG; const int64_t ne1 = (ne01 + nqptg - 1)/nqptg; const int64_t ne0 = (ne30 + ncpsg - 1)/ncpsg; res += GGML_PAD(ggml_type_size(GGML_TYPE_I8)*ne0*ne1*ne32*ne33, 32); return res; } size_t ggml_metal_op_flash_attn_ext_extra_tmp(const ggml_tensor * op) { assert(op->op == GGML_OP_FLASH_ATTN_EXT); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); //GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); //GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); //GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne); //GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb); size_t res = 0; // note: always reserve the temp buffer to avoid graph reallocations //if (ggml_metal_op_flash_attn_ext_use_vec(op)) { if (true) { const int64_t nwg = 32; const int64_t ne01_max = std::min(ne01, 32); // temp buffer for writing the results from each workgroup // - ne20: the size of the Value head // - + 2: the S and M values for each intermediate result res += ggml_type_size(GGML_TYPE_F32)*(ne01_max*ne02*ne03*nwg*(ne20 + 2)); } return res; } int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne); GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS( int32_t, nb, op, nb); GGML_ASSERT(ne00 % 4 == 0); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == op->src[2]->type); //GGML_ASSERT(ggml_are_same_shape (src1, src2)); GGML_ASSERT(ne11 == ne21); GGML_ASSERT(ne12 == ne22); GGML_ASSERT(!op->src[3] || op->src[3]->type == GGML_TYPE_F16); GGML_ASSERT(!op->src[3] || op->src[3]->ne[1] >= op->src[0]->ne[1] && "the Flash-Attention Metal kernel requires the mask to be at least n_queries big"); float scale; float max_bias; float logit_softcap; memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(scale)); memcpy(&max_bias, ((const int32_t *) op->op_params) + 1, sizeof(max_bias)); memcpy(&logit_softcap, ((const int32_t *) op->op_params) + 2, sizeof(logit_softcap)); if (logit_softcap != 0.0f) { scale /= logit_softcap; } const bool has_mask = op->src[3] != NULL; const bool has_sinks = op->src[4] != NULL; const bool has_bias = max_bias != 0.0f; const bool has_scap = logit_softcap != 0.0f; const uint32_t n_head = op->src[0]->ne[2]; const int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); GGML_ASSERT(ne01 < 65536); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]); ggml_metal_buffer_id bid_src2 = ggml_metal_get_buffer_id(op->src[2]); ggml_metal_buffer_id bid_src3 = has_mask ? ggml_metal_get_buffer_id(op->src[3]) : bid_src0; ggml_metal_buffer_id bid_src4 = has_sinks ? ggml_metal_get_buffer_id(op->src[4]) : bid_src0; ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_buffer_id bid_pad = bid_dst; bid_pad.offs += ggml_nbytes(op); ggml_metal_buffer_id bid_blk = bid_pad; bid_blk.offs += ggml_metal_op_flash_attn_ext_extra_pad(op); ggml_metal_buffer_id bid_tmp = bid_blk; bid_tmp.offs += ggml_metal_op_flash_attn_ext_extra_blk(op); if (!ggml_metal_op_flash_attn_ext_use_vec(op)) { // half8x8 kernel const int nqptg = OP_FLASH_ATTN_EXT_NQPTG; // queries per threadgroup const int ncpsg = OP_FLASH_ATTN_EXT_NCPSG; // cache values per simdgroup GGML_ASSERT(nqptg <= 32); GGML_ASSERT(nqptg % 8 == 0); GGML_ASSERT(ncpsg % 32 == 0); bool need_sync = false; const bool has_kvpad = ne11 % ncpsg != 0; if (has_kvpad) { assert(ggml_metal_op_flash_attn_ext_extra_pad(op) != 0); ggml_metal_kargs_flash_attn_ext_pad args0 = { /*.ne11 =*/ne11, /*.ne_12_2 =*/ne12, /*.ne_12_3 =*/ne13, /*.nb11 =*/nb11, /*.nb12 =*/nb12, /*.nb13 =*/nb13, /*.nb21 =*/nb21, /*.nb22 =*/nb22, /*.nb23 =*/nb23, /*.ne31 =*/ne31, /*.ne32 =*/ne32, /*.ne33 =*/ne33, /*.nb31 =*/nb31, /*.nb32 =*/nb32, /*.nb33 =*/nb33, }; auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_pad(lib, op, has_mask, ncpsg); ggml_metal_encoder_set_pipeline(enc, pipeline0); ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0); ggml_metal_encoder_set_buffer (enc, bid_src1, 1); ggml_metal_encoder_set_buffer (enc, bid_src2, 2); ggml_metal_encoder_set_buffer (enc, bid_src3, 3); ggml_metal_encoder_set_buffer (enc, bid_pad, 4); assert(ne12 == ne22); assert(ne13 == ne23); ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1); need_sync = true; } if (has_mask) { assert(ggml_metal_op_flash_attn_ext_extra_blk(op) != 0); ggml_metal_kargs_flash_attn_ext_blk args0 = { /*.ne01 =*/ ne01, /*.ne30 =*/ ne30, /*.ne31 =*/ ne31, /*.ne32 =*/ ne32, /*.ne33 =*/ ne33, /*.nb31 =*/ nb31, /*.nb32 =*/ nb32, /*.nb33 =*/ nb33, }; auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_blk(lib, op, nqptg, ncpsg); ggml_metal_encoder_set_pipeline(enc, pipeline0); ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0); ggml_metal_encoder_set_buffer (enc, bid_src3, 1); ggml_metal_encoder_set_buffer (enc, bid_blk, 2); const int32_t nblk1 = ((ne01 + nqptg - 1)/nqptg); const int32_t nblk0 = ((ne30 + ncpsg - 1)/ncpsg); ggml_metal_encoder_dispatch_threadgroups(enc, nblk0, nblk1, ne32*ne33, 32, 1, 1); need_sync = true; } if (need_sync) { ggml_metal_op_concurrency_reset(ctx); } const int is_q = ggml_is_quantized(op->src[1]->type) ? 1 : 0; // 2*(2*ncpsg) // ncpsg soft_max values + ncpsg mask values // // 16*32*(nsg) // the shared memory needed for the simdgroups to load the KV cache // each thread loads (dequantizes) 16 head elements, there are 32 threads in th SG // #define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(ne00 + 2*GGML_PAD(ne20, 64) + 2*(2*ncpsg)) + is_q*(16*32*(nsg)))*(sizeof(float)/2), 16)) //int64_t nsgmax = 4; // //if (is_q) { // nsgmax = 2; // while (true) { // const size_t smem = FATTN_SMEM(nsgmax); // if (smem > props_dev->max_theadgroup_memory_size) { // break; // } // nsgmax *= 2; // } // nsgmax /= 2; //} // simdgroups per threadgroup (a.k.a. warps) //nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4; int32_t nsg = 4; const size_t smem = FATTN_SMEM(nsg); ggml_metal_kargs_flash_attn_ext args = { /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne11 =*/ ne11, /*.ne_12_2 =*/ ne12, /*.ne_12_3 =*/ ne13, /*.ns10 =*/ int32_t(nb11/nb10), /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ns20 =*/ int32_t(nb21/nb20), /*.nb21 =*/ nb21, /*.nb22 =*/ nb22, /*.nb23 =*/ nb23, /*.ne31 =*/ ne31, /*.ne32 =*/ ne32, /*.ne33 =*/ ne33, /*.nb31 =*/ nb31, /*.nb32 =*/ nb32, /*.nb33 =*/ nb33, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.scale =*/ scale, /*.max_bias =*/ max_bias, /*.m0 =*/ m0, /*.m1 =*/ m1, /*.n_head_log2 =*/ n_head_log2, /*.logit_softcap =*/ logit_softcap, }; auto pipeline = ggml_metal_library_get_pipeline_flash_attn_ext(lib, op, has_mask, has_sinks, has_bias, has_scap, has_kvpad, nsg); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_src1, 2); ggml_metal_encoder_set_buffer (enc, bid_src2, 3); ggml_metal_encoder_set_buffer (enc, bid_src3, 4); ggml_metal_encoder_set_buffer (enc, bid_src4, 5); ggml_metal_encoder_set_buffer (enc, bid_pad, 6); ggml_metal_encoder_set_buffer (enc, bid_blk, 7); ggml_metal_encoder_set_buffer (enc, bid_dst, 8); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03, 32, nsg, 1); #undef FATTN_SMEM } else { // half4x4 kernel const int nqptg = OP_FLASH_ATTN_EXT_VEC_NQPTG; // queries per threadgroup const int ncpsg = OP_FLASH_ATTN_EXT_VEC_NCPSG; // cache values per simdgroup !! sync with kernel template arguments !! const int nkpsg = 1*ncpsg; GGML_ASSERT(nqptg <= 32); GGML_ASSERT(nqptg % 1 == 0); GGML_ASSERT(ncpsg % 32 == 0); bool need_sync = false; const bool has_kvpad = ne11 % ncpsg != 0; if (has_kvpad) { assert(ggml_metal_op_flash_attn_ext_extra_pad(op) != 0); ggml_metal_kargs_flash_attn_ext_pad args0 = { /*.ne11 =*/ne11, /*.ne_12_2 =*/ne12, /*.ne_12_3 =*/ne13, /*.nb11 =*/nb11, /*.nb12 =*/nb12, /*.nb13 =*/nb13, /*.nb21 =*/nb21, /*.nb22 =*/nb22, /*.nb23 =*/nb23, /*.ne31 =*/ne31, /*.ne32 =*/ne32, /*.ne33 =*/ne33, /*.nb31 =*/nb31, /*.nb32 =*/nb32, /*.nb33 =*/nb33, }; auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_pad(lib, op, has_mask, ncpsg); ggml_metal_encoder_set_pipeline(enc, pipeline0); ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0); ggml_metal_encoder_set_buffer (enc, bid_src1, 1); ggml_metal_encoder_set_buffer (enc, bid_src2, 2); ggml_metal_encoder_set_buffer (enc, bid_src3, 3); ggml_metal_encoder_set_buffer (enc, bid_pad, 4); assert(ne12 == ne22); assert(ne13 == ne23); ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1); need_sync = true; } if (need_sync) { ggml_metal_op_concurrency_reset(ctx); } // ne00 + 2*ncpsg*(nsg) // for each query, we load it as f16 in shared memory (ne00) // and store the soft_max values and the mask // // ne20*(nsg) // each simdgroup has a full f32 head vector in shared mem to accumulate results // #define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(GGML_PAD(ne00, 128) + 4*ncpsg*(nsg)) + 2*GGML_PAD(ne20, 128)*(nsg))*(sizeof(float)/2), 16)) int64_t nsgmax = 2; while (true) { const size_t smem = FATTN_SMEM(nsgmax); // avoid using more than half of the threadgroup memory - can cause slow downs especially for large head sizes if (smem > props_dev->max_theadgroup_memory_size/2) { break; } nsgmax *= 2; } nsgmax /= 2; // simdgroups per threadgroup (a.k.a. warps) //const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))); const int64_t nsgt = MAX(2, MIN(nsgmax, MIN((ne11 + nkpsg - 1)/(nkpsg), (int64_t) 1024/32))); int64_t nsg = 1; while (nsg <= nsgt) { nsg *= 2; } nsg /= 2; // workgroups // each workgroup handles nsg*nkpsg cache values int32_t nwg = 1; if (false) { // for small KV caches, we could launch a single workgroup and write the results directly to dst/ // however, this does not lead to significant improvement, so disabled nwg = 1; nsg = 4; } else { nwg = 32; nsg = 1; while (2*nwg*nsg*nkpsg < ne11 && nsg < 4) { nsg *= 2; } } ggml_metal_kargs_flash_attn_ext_vec args = { /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne11 =*/ ne11, /*.ne_12_2 =*/ ne12, /*.ne_12_3 =*/ ne13, /*.ns10 =*/ int32_t(nb11/nb10), /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ns20 =*/ int32_t(nb21/nb20), /*.nb21 =*/ nb21, /*.nb22 =*/ nb22, /*.nb23 =*/ nb23, /*.ne31 =*/ ne31, /*.ne32 =*/ ne32, /*.ne33 =*/ ne33, /*.nb31 =*/ nb31, /*.nb32 =*/ nb32, /*.nb33 =*/ nb33, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.scale =*/ scale, /*.max_bias =*/ max_bias, /*.m0 =*/ m0, /*.m1 =*/ m1, /*.n_head_log2 =*/ n_head_log2, /*.logit_softcap =*/ logit_softcap, }; auto pipeline = ggml_metal_library_get_pipeline_flash_attn_ext_vec(lib, op, has_mask, has_sinks, has_bias, has_scap, has_kvpad, nsg, nwg); GGML_ASSERT(nsg*32 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_src1, 2); ggml_metal_encoder_set_buffer (enc, bid_src2, 3); ggml_metal_encoder_set_buffer (enc, bid_src3, 4); ggml_metal_encoder_set_buffer (enc, bid_src4, 5); const size_t smem = FATTN_SMEM(nsg); //printf("smem: %zu, max: %zu, nsg = %d, nsgmax = %d\n", smem, props_dev->max_theadgroup_memory_size, (int) nsg, (int) nsgmax); GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size); if (nwg == 1) { assert(ggml_metal_op_flash_attn_ext_extra_tmp(op) == 0); // using 1 workgroup -> write the result directly into dst ggml_metal_encoder_set_buffer(enc, bid_pad, 6); ggml_metal_encoder_set_buffer(enc, bid_dst, 7); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg, 32, nsg, 1); } else { // sanity checks assert(ggml_metal_op_flash_attn_ext_extra_tmp(op) != 0); GGML_ASSERT(ne01*ne02*ne03 == ne1*ne2*ne3); GGML_ASSERT((uint64_t)ne1*ne2*ne3 <= (1u << 31)); // write the results from each workgroup into a temp buffer ggml_metal_encoder_set_buffer(enc, bid_pad, 6); ggml_metal_encoder_set_buffer(enc, bid_tmp, 7); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03*nwg, 32, nsg, 1); // sync the 2 kernels ggml_metal_op_concurrency_reset(ctx); // reduce the results from the workgroups { const int32_t nrows = ne1*ne2*ne3; ggml_metal_kargs_flash_attn_ext_vec_reduce args0 = { nrows, }; auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(lib, op, ne20, nwg); ggml_metal_encoder_set_pipeline(enc, pipeline0); ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0); ggml_metal_encoder_set_buffer (enc, bid_tmp, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, 32*nwg, 1, 1); } } #undef FATTN_SMEM } return 1; } int ggml_metal_op_bin(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const bool use_fusion = ctx->use_fusion; const int debug_fusion = ctx->debug_fusion; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); GGML_ASSERT(ggml_is_contiguous_rows(op->src[1])); bool bcast_row = false; ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_kargs_bin args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne10 =*/ ne10, /*.ne11 =*/ ne11, /*.ne12 =*/ ne12, /*.ne13 =*/ ne13, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.offs =*/ 0, /*.o1 =*/ { bid_src1.offs }, }; ggml_op fops[8]; int n_fuse = 1; // c[0] = add(a, b[0]) // c[1] = add(c[0], b[1]) // c[2] = add(c[1], b[2]) // ... if (use_fusion) { fops[0] = GGML_OP_ADD; fops[1] = GGML_OP_ADD; fops[2] = GGML_OP_ADD; fops[3] = GGML_OP_ADD; fops[4] = GGML_OP_ADD; fops[5] = GGML_OP_ADD; fops[6] = GGML_OP_ADD; fops[7] = GGML_OP_ADD; // note: in metal, we sometimes encode the graph in parallel so we have to avoid fusing ops // across splits. idx_end indicates the last node in the current split for (n_fuse = 0; n_fuse <= 6; ++n_fuse) { if (!ctx->can_fuse(idx + n_fuse, fops + n_fuse, 2)) { break; } ggml_tensor * f0 = ctx->node(idx + n_fuse); ggml_tensor * f1 = ctx->node(idx + n_fuse + 1); if (f0 != f1->src[0]) { break; } // b[0] === b[1] === ... if (!ggml_are_same_layout(f0->src[1], f1->src[1])) { break; } // only fuse ops if src1 is in the same Metal buffer ggml_metal_buffer_id bid_fuse = ggml_metal_get_buffer_id(f1->src[1]); if (bid_fuse.metal != bid_src1.metal) { break; } //ctx->fuse_cnt[ops[n_fuse + 1]->op]++; args.o1[n_fuse + 1] = bid_fuse.offs; } ++n_fuse; if (debug_fusion > 1 && n_fuse > 1) { GGML_LOG_DEBUG("%s: fuse: ADD x %d\n", __func__, n_fuse); } } // the offsets of src1 and all fused buffers are relative to the start of the src1 buffer bid_src1.offs = 0; struct ggml_metal_pipeline_with_params pipeline; if (ggml_nelements(op->src[1]) == ne10 && ggml_is_contiguous(op->src[1]) && ne00 % 4 == 0 && ne10 % 4 == 0) { GGML_ASSERT(ggml_is_contiguous(op->src[0])); // src1 is a row GGML_ASSERT(ne11 == 1); pipeline = ggml_metal_library_get_pipeline_bin(lib, op->op, n_fuse, true); bcast_row = true; } else { pipeline = ggml_metal_library_get_pipeline_bin(lib, op->op, n_fuse, false); } if (n_fuse > 1) { bid_dst = ggml_metal_get_buffer_id(ctx->node(idx + n_fuse - 1)); for (int i = 1; i < n_fuse; ++i) { if (!ggml_metal_op_concurrency_check(ctx, ctx->node(idx + i))) { ggml_metal_op_concurrency_reset(ctx); break; } } } ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_src1, 2); ggml_metal_encoder_set_buffer (enc, bid_dst, 3); if (bcast_row) { const int64_t n = ggml_nelements(op)/4; ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); } else { int nth = 32; while (16*nth < ne0 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); } return n_fuse; } int ggml_metal_op_l2_norm(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float eps; memcpy(&eps, op->op_params, sizeof(float)); int nth = 32; // SIMD width ggml_metal_kargs_l2_norm args = { /*.ne00 =*/ ne00, /*.ne00_4 =*/ ne00/4, /*.nb01 =*/ nb01, /*.eps =*/ eps, }; auto pipeline = ggml_metal_library_get_pipeline_l2_norm(lib, op); while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); nth = std::min(nth, ne00/4); const size_t smem = pipeline.smem; const int64_t nrows = ggml_nrows(op->src[0]); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_group_norm(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t ngrp = ((const int32_t *) op->op_params)[0]; float eps; memcpy(&eps, op->op_params + 1, sizeof(float)); ggml_metal_kargs_group_norm args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.ngrp =*/ ngrp, /*.eps =*/ eps, }; auto pipeline = ggml_metal_library_get_pipeline_group_norm(lib, op); int nth = 32; // SIMD width //while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { // nth *= 2; //} //nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); //nth = std::min(nth, ne00/4); const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ngrp, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_norm(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; const bool use_fusion = ctx->use_fusion; const int debug_fusion = ctx->debug_fusion; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float eps; memcpy(&eps, op->op_params, sizeof(float)); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_kargs_norm args = { /*.ne00 =*/ ne00, /*.ne00_t =*/ ne00 % 4 == 0 ? ne00/4 : ne00, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.eps =*/ eps, /*.nef1 =*/ { ne01 }, /*.nef2 =*/ { ne02 }, /*.nef3 =*/ { ne03 }, /*.nbf1 =*/ { nb01 }, /*.nbf2 =*/ { nb02 }, /*.nbf3 =*/ { nb03 }, }; ggml_op fops[8]; int n_fuse = 1; ggml_metal_buffer_id bid_fuse[2] = { bid_src0, bid_src0 }; // d[0] = norm(a) // d[1] = mul(d[0], b) // d[2] = add(d[1], c) if (use_fusion) { fops[0] = op->op; fops[1] = GGML_OP_MUL; fops[2] = GGML_OP_ADD; for (n_fuse = 0; n_fuse <= 1; ++n_fuse) { if (!ctx->can_fuse(idx + n_fuse, fops + n_fuse, 2)) { break; } ggml_tensor * f0 = ctx->node(idx + n_fuse); ggml_tensor * f1 = ctx->node(idx + n_fuse + 1); if (f0 != f1->src[0]) { break; } if (f1->src[1]->ne[0] != op->ne[0]) { break; } if (!ggml_is_contiguous_rows(f1->src[1])) { break; } if (f1->type != GGML_TYPE_F32) { break; } //ctx->fuse_cnt[f1->op]++; bid_fuse[n_fuse] = ggml_metal_get_buffer_id(f1->src[1]); args.nef1[n_fuse + 1] = f1->src[1]->ne[1]; args.nef2[n_fuse + 1] = f1->src[1]->ne[2]; args.nef3[n_fuse + 1] = f1->src[1]->ne[3]; args.nbf1[n_fuse + 1] = f1->src[1]->nb[1]; args.nbf2[n_fuse + 1] = f1->src[1]->nb[2]; args.nbf3[n_fuse + 1] = f1->src[1]->nb[3]; } ++n_fuse; if (debug_fusion > 1 && n_fuse > 1) { if (n_fuse == 2) { GGML_LOG_DEBUG("%s: fuse: %s + MUL\n", __func__, ggml_op_name(op->op)); } if (n_fuse == 3) { GGML_LOG_DEBUG("%s: fuse: %s + MUL + ADD\n", __func__, ggml_op_name(op->op)); } } } if (n_fuse > 1) { bid_dst = ggml_metal_get_buffer_id(ctx->node(idx + n_fuse - 1)); for (int i = 1; i < n_fuse; ++i) { if (!ggml_metal_op_concurrency_check(ctx, ctx->node(idx + i))) { ggml_metal_op_concurrency_reset(ctx); break; } } } auto pipeline = ggml_metal_library_get_pipeline_norm(lib, op, n_fuse); int nth = 32; // SIMD width while (nth < args.ne00_t && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); nth = std::min(nth, args.ne00_t); const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_fuse[0], 2); ggml_metal_encoder_set_buffer (enc, bid_fuse[1], 3); ggml_metal_encoder_set_buffer (enc, bid_dst, 4); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); return n_fuse; } int ggml_metal_op_rope(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); // make sure we have one or more position id(ne10) per token(ne02) GGML_ASSERT(ne10 % ne02 == 0); GGML_ASSERT(ne10 >= ne02); const int nth = std::min(1024, ne00); const int n_past = ((const int32_t *) op->op_params)[0]; const int n_dims = ((const int32_t *) op->op_params)[1]; //const int mode = ((const int32_t *) op->op_params)[2]; // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal const int n_ctx_orig = ((const int32_t *) op->op_params)[4]; float freq_base; float freq_scale; float ext_factor; float attn_factor; float beta_fast; float beta_slow; memcpy(&freq_base, (const int32_t *) op->op_params + 5, sizeof(float)); memcpy(&freq_scale, (const int32_t *) op->op_params + 6, sizeof(float)); memcpy(&ext_factor, (const int32_t *) op->op_params + 7, sizeof(float)); memcpy(&attn_factor, (const int32_t *) op->op_params + 8, sizeof(float)); memcpy(&beta_fast, (const int32_t *) op->op_params + 9, sizeof(float)); memcpy(&beta_slow, (const int32_t *) op->op_params + 10, sizeof(float)); // mrope const int sect_0 = ((const int32_t *) op->op_params)[11]; const int sect_1 = ((const int32_t *) op->op_params)[12]; const int sect_2 = ((const int32_t *) op->op_params)[13]; const int sect_3 = ((const int32_t *) op->op_params)[14]; ggml_metal_kargs_rope args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.n_past =*/ n_past, /*.n_dims =*/ n_dims, /*.n_ctx_orig =*/ n_ctx_orig, /*.freq_base =*/ freq_base, /*.freq_scale =*/ freq_scale, /*.ext_factor =*/ ext_factor, /*.attn_factor =*/ attn_factor, /*.beta_fast =*/ beta_fast, /*.beta_slow =*/ beta_slow, /* sect_0 =*/ sect_0, /* sect_1 =*/ sect_1, /* sect_2 =*/ sect_2, /* sect_3 =*/ sect_3, /* src2 =*/ op->src[2] != nullptr, }; auto pipeline = ggml_metal_library_get_pipeline_rope(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); if (op->src[2]) { ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3); } else { ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 3); } ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 4); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); return 1; } int ggml_metal_op_im2col(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t s0 = ((const int32_t *)(op->op_params))[0]; const int32_t s1 = ((const int32_t *)(op->op_params))[1]; const int32_t p0 = ((const int32_t *)(op->op_params))[2]; const int32_t p1 = ((const int32_t *)(op->op_params))[3]; const int32_t d0 = ((const int32_t *)(op->op_params))[4]; const int32_t d1 = ((const int32_t *)(op->op_params))[5]; const bool is_2D = ((const int32_t *)(op->op_params))[6] == 1; const int32_t N = op->src[1]->ne[is_2D ? 3 : 2]; const int32_t IC = op->src[1]->ne[is_2D ? 2 : 1]; const int32_t IH = is_2D ? op->src[1]->ne[1] : 1; const int32_t IW = op->src[1]->ne[0]; const int32_t KH = is_2D ? op->src[0]->ne[1] : 1; const int32_t KW = op->src[0]->ne[0]; const int32_t OH = is_2D ? op->ne[2] : 1; const int32_t OW = op->ne[1]; const int32_t CHW = IC * KH * KW; const uint64_t ofs0 = op->src[1]->nb[is_2D ? 3 : 2] / 4; const uint64_t ofs1 = op->src[1]->nb[is_2D ? 2 : 1] / 4; ggml_metal_kargs_im2col args = { /*.ofs0 =*/ ofs0, /*.ofs1 =*/ ofs1, /*.IW =*/ IW, /*.IH =*/ IH, /*.CHW =*/ CHW, /*.s0 =*/ s0, /*.s1 =*/ s1, /*.p0 =*/ p0, /*.p1 =*/ p1, /*.d0 =*/ d0, /*.d1 =*/ d1, /*.N =*/ N, /*.KH =*/ KH, /*.KW =*/ KW, /*.KHW =*/ KH * KW, }; auto pipeline = ggml_metal_library_get_pipeline_im2col(lib, op); GGML_ASSERT(KH*KW <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); const uint64_t ntptg0 = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)/(KH*KW), N); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, IC, OH, OW, ntptg0, KH, KW); return 1; } int ggml_metal_op_conv_2d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); GGML_ASSERT(ggml_is_contiguous(op->src[0])); GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); GGML_ASSERT(op->type == GGML_TYPE_F32); GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); const int32_t s0 = ((const int32_t *) op->op_params)[0]; const int32_t s1 = ((const int32_t *) op->op_params)[1]; const int32_t p0 = ((const int32_t *) op->op_params)[2]; const int32_t p1 = ((const int32_t *) op->op_params)[3]; const int32_t d0 = ((const int32_t *) op->op_params)[4]; const int32_t d1 = ((const int32_t *) op->op_params)[5]; ggml_metal_kargs_conv_2d args = { /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.IW =*/ ne10, /*.IH =*/ ne11, /*.KW =*/ ne00, /*.KH =*/ ne01, /*.IC =*/ ne02, /*.OC =*/ ne03, /*.OW =*/ ne0, /*.OH =*/ ne1, /*.N =*/ ne3, /*.s0 =*/ s0, /*.s1 =*/ s1, /*.p0 =*/ p0, /*.p1 =*/ p1, /*.d0 =*/ d0, /*.d1 =*/ d1, }; auto pipeline = ggml_metal_library_get_pipeline_conv_2d(lib, op); int nth = ggml_metal_pipeline_max_theads_per_threadgroup(pipeline); nth = std::min(nth, 256); nth = std::max(nth, 1); const uint64_t n_out = ggml_nelements(op); uint64_t tg = (n_out + nth - 1)/nth; tg = std::max(tg, 1); tg = std::min(tg, (uint64_t) std::numeric_limits::max()); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, tg, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_conv_transpose_1d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t s0 = ((const int32_t *)(op->op_params))[0]; const int32_t IC = op->src[1]->ne[1]; const int32_t IL = op->src[1]->ne[0]; const int32_t K = op->src[0]->ne[0]; const int32_t OL = op->ne[0]; const int32_t OC = op->ne[1]; ggml_metal_kargs_conv_transpose_1d args = { /*.IC =*/ IC, /*.IL =*/ IL, /*.K =*/ K, /*.s0 =*/ s0, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, }; auto pipeline = ggml_metal_library_get_pipeline_conv_transpose_1d(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_dispatch_threadgroups(enc, OL, OC, 1, 1, 1, 1); return 1; } int ggml_metal_op_conv_transpose_2d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int32_t s0 = ((const int32_t *)(op->op_params))[0]; const int32_t IC = op->src[1]->ne[2]; const int32_t IH = op->src[1]->ne[1]; const int32_t IW = op->src[1]->ne[0]; const int32_t KH = op->src[0]->ne[1]; const int32_t KW = op->src[0]->ne[0]; const int32_t OW = op->ne[0]; const int32_t OH = op->ne[1]; const int32_t OC = op->ne[2]; ggml_metal_kargs_conv_transpose_2d args = { /*.IC =*/ IC, /*.IH =*/ IH, /*.IW =*/ IW, /*.KH =*/ KH, /*.KW =*/ KW, /*.OC =*/ OC, /*.s0 =*/ s0, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, }; auto pipeline = ggml_metal_library_get_pipeline_conv_transpose_2d(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); // Metal requires buffer size to be multiple of 16 bytes const size_t smem = GGML_PAD(KW * KH * sizeof(float), 16); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, OW, OH, OC, KW, KH, 1); return 1; } int ggml_metal_op_upscale(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const float sf0 = (float)ne0/op->src[0]->ne[0]; const float sf1 = (float)ne1/op->src[0]->ne[1]; const float sf2 = (float)ne2/op->src[0]->ne[2]; const float sf3 = (float)ne3/op->src[0]->ne[3]; ggml_metal_kargs_upscale args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.sf0 =*/ sf0, /*.sf1 =*/ sf1, /*.sf2 =*/ sf2, /*.sf3 =*/ sf3 }; auto pipeline = ggml_metal_library_get_pipeline_upscale(lib, op); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1); return 1; } int ggml_metal_op_pad(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_pad args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3 }; auto pipeline = ggml_metal_library_get_pipeline_pad(lib, op); const int nth = std::min(1024, ne0); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1); return 1; } int ggml_metal_op_pad_reflect_1d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_pad_reflect_1d args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.p0 =*/ ((const int32_t *)(op->op_params))[0], /*.p1 =*/ ((const int32_t *)(op->op_params))[1] }; auto pipeline = ggml_metal_library_get_pipeline_pad_reflect_1d(lib, op); const int nth = std::min(1024, ne0); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1); return 1; } int ggml_metal_op_arange(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float start; float step; memcpy(&start, ((const int32_t *) op->op_params) + 0, sizeof(float)); memcpy(&step, ((const int32_t *) op->op_params) + 2, sizeof(float)); ggml_metal_kargs_arange args = { /*.ne0 =*/ ne0, /*.start =*/ start, /*.step =*/ step }; const int nth = std::min(1024, ne0); auto pipeline = ggml_metal_library_get_pipeline_arange(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 1); ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); const int dim = op->op_params[0]; const int max_period = op->op_params[1]; ggml_metal_kargs_timestep_embedding args = { /*.nb1 =*/ nb1, /*.dim =*/ dim, /*.max_period =*/ max_period, }; auto pipeline = ggml_metal_library_get_pipeline_timestep_embedding(lib, op); const int nth = std::max(1, std::min(1024, dim/2)); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ne00, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_argmax(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_argmax args = { /*.ne00 = */ ne00, /*.nb01 = */ nb01, }; auto pipeline = ggml_metal_library_get_pipeline_argmax(lib, op); const int64_t nrows = ggml_nrows(op->src[0]); int nth = 32; // SIMD width while (nth < ne00 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } const size_t smem = pipeline.smem; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_argsort(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_argsort(lib, op); // bitonic sort requires the number of elements to be power of 2 int nth = 1; while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } const int npr = (ne00 + nth - 1)/nth; // Metal kernels require the buffer size to be multiple of 16 bytes // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength const size_t smem = GGML_PAD(nth*sizeof(int32_t), 16); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_buffer_id bid_tmp = bid_dst; bid_tmp.offs += ggml_nbytes(op); if ((int) ceil(std::log(npr) / std::log(2)) % 2 == 1) { std::swap(bid_dst, bid_tmp); } ggml_metal_kargs_argsort args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.top_k =*/ nth, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, npr*ne01, ne02, ne03, nth, 1, 1); auto pipeline_merge = ggml_metal_library_get_pipeline_argsort_merge(lib, op); int len = nth; while (len < ne00) { ggml_metal_op_concurrency_reset(ctx); ggml_metal_kargs_argsort_merge args_merge = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.top_k =*/ ne00, /*.len =*/ len, }; // merges per row const int nm = (ne00 + 2*len - 1) / (2*len); const int nth = std::min(512, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_merge)); ggml_metal_encoder_set_pipeline(enc, pipeline_merge); ggml_metal_encoder_set_bytes (enc, &args_merge, sizeof(args_merge), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_set_buffer (enc, bid_tmp, 3); ggml_metal_encoder_dispatch_threadgroups(enc, nm*ne01, ne02, ne03, nth, 1, 1); std::swap(bid_dst, bid_tmp); len <<= 1; } return 1; } int ggml_metal_op_top_k(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_top_k(lib, op); // bitonic sort requires the number of elements to be power of 2 int nth = 1; while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } // blocks per row const int npr = (ne00 + nth - 1)/nth; const size_t smem = GGML_PAD(nth*sizeof(int32_t), 16); ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); ggml_metal_buffer_id bid_tmp = bid_dst; bid_tmp.offs += sizeof(int32_t)*ggml_nelements(op->src[0]); if ((int) ceil(std::log(npr) / std::log(2)) % 2 == 1) { std::swap(bid_dst, bid_tmp); } const int top_k = ne0; ggml_metal_kargs_argsort args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.top_k =*/ std::min(nth, top_k), // for each block, keep just the top_k indices }; if (npr > 1) { args.ne0 = (npr - 1)*args.top_k + std::min(ne00 - (npr - 1)*nth, args.top_k); } ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, npr*ne01, ne02, ne03, nth, 1, 1); auto pipeline_merge = ggml_metal_library_get_pipeline_top_k_merge(lib, op); int len = args.top_k; while (len < args.ne0) { ggml_metal_op_concurrency_reset(ctx); // merges per row const int nm = (args.ne0 + 2*len - 1) / (2*len); const int nth = std::min(512, std::min(len, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_merge))); ggml_metal_kargs_argsort_merge args_merge = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ args.ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.top_k =*/ nm == 1 ? top_k : args.ne0, // the final merge outputs top_k elements /*.len =*/ len, }; ggml_metal_encoder_set_pipeline(enc, pipeline_merge); ggml_metal_encoder_set_bytes (enc, &args_merge, sizeof(args_merge), 0); ggml_metal_encoder_set_buffer (enc, bid_src0, 1); ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_set_buffer (enc, bid_tmp, 3); ggml_metal_encoder_dispatch_threadgroups(enc, nm*ne01, ne02, ne03, nth, 1, 1); std::swap(bid_dst, bid_tmp); len <<= 1; } return 1; } int ggml_metal_op_leaky_relu(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); float slope; memcpy(&slope, op->op_params, sizeof(float)); ggml_metal_kargs_leaky_relu args = { /*.slope =*/ slope }; auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op); int64_t n = ggml_nelements(op); if (n % 4 == 0) { n /= 4; } ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); return 1; } int ggml_metal_op_tri(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); ggml_metal_kargs_tri args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.ne0 =*/ ne0, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.ne3 =*/ ne3, /*.nb0 =*/ nb0, /*.nb1 =*/ nb1, /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, }; auto pipeline = ggml_metal_library_get_pipeline_tri(lib, op); int nth = 32; // SIMD width while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { nth *= 2; } nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); nth = std::min(nth, ne00); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); return 1; } int ggml_metal_op_opt_step_adamw(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_opt_step_adamw(lib, op); const int64_t np = ggml_nelements(op->src[0]); ggml_metal_kargs_opt_step_adamw args = { /*.np =*/ np, }; int ida = 0; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), ida++); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0); const int64_t n = (np + nth - 1) / nth; ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_opt_step_sgd(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); auto pipeline = ggml_metal_library_get_pipeline_opt_step_sgd(lib, op); const int64_t np = ggml_nelements(op->src[0]); ggml_metal_kargs_opt_step_sgd args = { /*.np =*/ np, }; int ida = 0; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++); ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++); const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0); const int64_t n = (np + nth - 1) / nth; ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, nth, 1, 1); return 1; } int ggml_metal_op_count_equal(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; GGML_TENSOR_LOCALS(int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); { ggml_metal_kargs_memset args = { /*.val =*/ 0 }; auto pipeline = ggml_metal_library_get_pipeline_memset(lib, op); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 1); ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, 1, 1, 1); } ggml_metal_op_concurrency_reset(ctx); { ggml_metal_kargs_count_equal args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, /*.nb00 =*/ nb00, /*.nb01 =*/ nb01, /*.nb02 =*/ nb02, /*.nb03 =*/ nb03, /*.nb10 =*/ nb10, /*.nb11 =*/ nb11, /*.nb12 =*/ nb12, /*.nb13 =*/ nb13, }; auto pipeline = ggml_metal_library_get_pipeline_count_equal(lib, op); const size_t smem = pipeline.smem; const int nth = 32*pipeline.nsg; GGML_ASSERT(nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2); ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1); } return 1; } ggml-org-ggml-3678254/src/ggml-metal/ggml-metal-ops.h000066400000000000000000000103151512524704700221770ustar00rootroot00000000000000#pragma once #include "ggml-metal-device.h" #ifdef __cplusplus extern "C" { #endif typedef struct ggml_metal_op * ggml_metal_op_t; ggml_metal_op_t ggml_metal_op_init( ggml_metal_device_t dev, ggml_metal_cmd_buf_t cmd_buf, struct ggml_cgraph * gf, int idx_start, int idx_end, bool use_fusion, bool use_concurrency, bool use_capture, int debug_graph, int debug_fusion); void ggml_metal_op_free(ggml_metal_op_t ctx); int ggml_metal_op_n_nodes(ggml_metal_op_t ctx); int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx); // // available ops: // // tokens per expert size_t ggml_metal_op_mul_mat_id_extra_tpe(const struct ggml_tensor * op); // id map [n_tokens, n_expert] size_t ggml_metal_op_mul_mat_id_extra_ids(const struct ggml_tensor * op); // return true if we should use the FA vector kernel for this op bool ggml_metal_op_flash_attn_ext_use_vec(const struct ggml_tensor * op); size_t ggml_metal_op_flash_attn_ext_extra_pad(const struct ggml_tensor * op); size_t ggml_metal_op_flash_attn_ext_extra_blk(const struct ggml_tensor * op); size_t ggml_metal_op_flash_attn_ext_extra_tmp(const struct ggml_tensor * op); int ggml_metal_op_concat (ggml_metal_op_t ctx, int idx); int ggml_metal_op_repeat (ggml_metal_op_t ctx, int idx); int ggml_metal_op_acc (ggml_metal_op_t ctx, int idx); int ggml_metal_op_scale (ggml_metal_op_t ctx, int idx); int ggml_metal_op_fill (ggml_metal_op_t ctx, int idx); int ggml_metal_op_clamp (ggml_metal_op_t ctx, int idx); int ggml_metal_op_unary (ggml_metal_op_t ctx, int idx); int ggml_metal_op_glu (ggml_metal_op_t ctx, int idx); int ggml_metal_op_sum (ggml_metal_op_t ctx, int idx); int ggml_metal_op_sum_rows (ggml_metal_op_t ctx, int idx); int ggml_metal_op_cumsum (ggml_metal_op_t ctx, int idx); int ggml_metal_op_get_rows (ggml_metal_op_t ctx, int idx); int ggml_metal_op_set_rows (ggml_metal_op_t ctx, int idx); int ggml_metal_op_soft_max (ggml_metal_op_t ctx, int idx); int ggml_metal_op_ssm_conv (ggml_metal_op_t ctx, int idx); int ggml_metal_op_ssm_scan (ggml_metal_op_t ctx, int idx); int ggml_metal_op_rwkv (ggml_metal_op_t ctx, int idx); int ggml_metal_op_cpy (ggml_metal_op_t ctx, int idx); int ggml_metal_op_pool_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_mul_mat (ggml_metal_op_t ctx, int idx); int ggml_metal_op_mul_mat_id (ggml_metal_op_t ctx, int idx); int ggml_metal_op_add_id (ggml_metal_op_t ctx, int idx); int ggml_metal_op_flash_attn_ext (ggml_metal_op_t ctx, int idx); int ggml_metal_op_bin (ggml_metal_op_t ctx, int idx); int ggml_metal_op_l2_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_group_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_rope (ggml_metal_op_t ctx, int idx); int ggml_metal_op_im2col (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_transpose_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_upscale (ggml_metal_op_t ctx, int idx); int ggml_metal_op_pad (ggml_metal_op_t ctx, int idx); int ggml_metal_op_pad_reflect_1d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_arange (ggml_metal_op_t ctx, int idx); int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx); int ggml_metal_op_argmax (ggml_metal_op_t ctx, int idx); int ggml_metal_op_argsort (ggml_metal_op_t ctx, int idx); int ggml_metal_op_top_k (ggml_metal_op_t ctx, int idx); int ggml_metal_op_leaky_relu (ggml_metal_op_t ctx, int idx); int ggml_metal_op_tri (ggml_metal_op_t ctx, int idx); int ggml_metal_op_opt_step_adamw (ggml_metal_op_t ctx, int idx); int ggml_metal_op_opt_step_sgd (ggml_metal_op_t ctx, int idx); int ggml_metal_op_count_equal (ggml_metal_op_t ctx, int idx); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-metal/ggml-metal.cpp000066400000000000000000000630011512524704700217330ustar00rootroot00000000000000#include "ggml-metal.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-metal-device.h" #include "ggml-metal-context.h" #include "ggml-metal-ops.h" // globals // initialized in ggml_backend_metal_reg static ggml_backend_reg g_ggml_metal_reg; static ggml_backend_device g_ggml_metal_device; //////////////////////////////////////////////////////////////////////////////// // backend interface //////////////////////////////////////////////////////////////////////////////// // shared buffer static void ggml_backend_metal_buffer_shared_free_buffer(ggml_backend_buffer_t buffer) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_free(ctx); } static void * ggml_backend_metal_buffer_shared_get_base(ggml_backend_buffer_t buffer) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); return ggml_metal_buffer_get_base(ctx); } static void ggml_backend_metal_buffer_shared_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size); } static void ggml_backend_metal_buffer_shared_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size); } static void ggml_backend_metal_buffer_shared_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size); } static bool ggml_backend_metal_buffer_shared_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); GGML_UNUSED(buffer); GGML_UNUSED(src); GGML_UNUSED(dst); return false; } static void ggml_backend_metal_buffer_shared_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_clear(ctx, value); } static ggml_backend_buffer_i ggml_backend_metal_buffer_shared_i = { /* .free_buffer = */ ggml_backend_metal_buffer_shared_free_buffer, /* .get_base = */ ggml_backend_metal_buffer_shared_get_base, /* .init_tensor = */ NULL, /* .memset_tensor = */ ggml_backend_metal_buffer_shared_memset_tensor, /* .set_tensor = */ ggml_backend_metal_buffer_shared_set_tensor, /* .get_tensor = */ ggml_backend_metal_buffer_shared_get_tensor, /* .cpy_tensor = */ ggml_backend_metal_buffer_shared_cpy_tensor, /* .clear = */ ggml_backend_metal_buffer_shared_clear, /* .reset = */ NULL, }; // private buffer static void ggml_backend_metal_buffer_private_free_buffer(ggml_backend_buffer_t buffer) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_free(ctx); } static void * ggml_backend_metal_buffer_private_get_base(ggml_backend_buffer_t buffer) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); return ggml_metal_buffer_get_base(ctx); } static void ggml_backend_metal_buffer_private_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size); } static void ggml_backend_metal_buffer_private_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size); } static void ggml_backend_metal_buffer_private_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size); } static bool ggml_backend_metal_buffer_private_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); GGML_UNUSED(buffer); GGML_UNUSED(src); GGML_UNUSED(dst); return false; } static void ggml_backend_metal_buffer_private_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context; GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx)); ggml_metal_buffer_clear(ctx, value); } static ggml_backend_buffer_i ggml_backend_metal_buffer_private_i = { /* .free_buffer = */ ggml_backend_metal_buffer_private_free_buffer, /* .get_base = */ ggml_backend_metal_buffer_private_get_base, /* .init_tensor = */ NULL, /* .memset_tensor = */ ggml_backend_metal_buffer_private_memset_tensor, /* .set_tensor = */ ggml_backend_metal_buffer_private_set_tensor, /* .get_tensor = */ ggml_backend_metal_buffer_private_get_tensor, /* .cpy_tensor = */ ggml_backend_metal_buffer_private_cpy_tensor, /* .clear = */ ggml_backend_metal_buffer_private_clear, /* .reset = */ NULL, }; // // buffer types // // common method for allocating shread or private Metal buffers static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size, bool shared) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context; ggml_metal_buffer_t res = ggml_metal_buffer_init(ctx_dev, size, shared); ggml_backend_buffer_i buf_i = ggml_metal_buffer_is_shared(res) ? ggml_backend_metal_buffer_shared_i : ggml_backend_metal_buffer_private_i; return ggml_backend_buffer_init(buft, buf_i, res, size); } static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { size_t res = ggml_nbytes(tensor); // some operations require additional memory for fleeting data: switch (tensor->op) { case GGML_OP_MUL_MAT_ID: { res += ggml_metal_op_mul_mat_id_extra_tpe(tensor); res += ggml_metal_op_mul_mat_id_extra_ids(tensor); } break; case GGML_OP_FLASH_ATTN_EXT: { res += ggml_metal_op_flash_attn_ext_extra_pad(tensor); res += ggml_metal_op_flash_attn_ext_extra_blk(tensor); res += ggml_metal_op_flash_attn_ext_extra_tmp(tensor); } break; case GGML_OP_CUMSUM: case GGML_OP_ARGSORT: { res *= 2; } break; case GGML_OP_TOP_K: { res = 2*sizeof(int32_t)*ggml_nelements(tensor->src[0]); } break; default: break; } return res; GGML_UNUSED(buft); } // default (shared) buffer type static const char * ggml_backend_metal_buffer_type_shared_get_name(ggml_backend_buffer_type_t buft) { return "Metal"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_metal_buffer_type_shared_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true); } static size_t ggml_backend_metal_buffer_type_shared_get_alignment(ggml_backend_buffer_type_t buft) { return 32; GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_shared_get_max_size(ggml_backend_buffer_type_t buft) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context; return ggml_metal_device_get_props(ctx_dev)->max_buffer_size; } static size_t ggml_backend_metal_buffer_type_shared_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor); } static bool ggml_backend_metal_buffer_type_shared_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_shared(void) { static ggml_backend_buffer_type ggml_backend_buffer_type_metal = { /* .iface = */ { /* .get_name = */ ggml_backend_metal_buffer_type_shared_get_name, /* .alloc_buffer = */ ggml_backend_metal_buffer_type_shared_alloc_buffer, /* .get_alignment = */ ggml_backend_metal_buffer_type_shared_get_alignment, /* .get_max_size = */ ggml_backend_metal_buffer_type_shared_get_max_size, /* .get_alloc_size = */ ggml_backend_metal_buffer_type_shared_get_alloc_size, /* .is_host = */ ggml_backend_metal_buffer_type_shared_is_host, }, /* .device = */ &g_ggml_metal_device, /* .context = */ NULL, }; return &ggml_backend_buffer_type_metal; } // default (private) buffer type static const char * ggml_backend_metal_buffer_type_private_get_name(ggml_backend_buffer_type_t buft) { return "Metal_Private"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_metal_buffer_type_private_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, false); } static size_t ggml_backend_metal_buffer_type_private_get_alignment(ggml_backend_buffer_type_t buft) { return 32; GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_private_get_max_size(ggml_backend_buffer_type_t buft) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context; return ggml_metal_device_get_props(ctx_dev)->max_buffer_size; } static size_t ggml_backend_metal_buffer_type_private_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor); } static bool ggml_backend_metal_buffer_type_private_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_private(void) { static ggml_backend_buffer_type ggml_backend_buffer_type_metal = { /* .iface = */ { /* .get_name = */ ggml_backend_metal_buffer_type_private_get_name, /* .alloc_buffer = */ ggml_backend_metal_buffer_type_private_alloc_buffer, /* .get_alignment = */ ggml_backend_metal_buffer_type_private_get_alignment, /* .get_max_size = */ ggml_backend_metal_buffer_type_private_get_max_size, /* .get_alloc_size = */ ggml_backend_metal_buffer_type_private_get_alloc_size, /* .is_host = */ ggml_backend_metal_buffer_type_private_is_host, }, /* .device = */ &g_ggml_metal_device, /* .context = */ NULL, }; return &ggml_backend_buffer_type_metal; } // mapped buffer type static const char * ggml_backend_metal_buffer_type_mapped_get_name(ggml_backend_buffer_type_t buft) { return "Metal_Mapped"; GGML_UNUSED(buft); } static ggml_backend_buffer_t ggml_backend_metal_buffer_type_mapped_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { // for mapped buffers, prefer shared memory return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true); } static size_t ggml_backend_metal_buffer_type_mapped_get_alignment(ggml_backend_buffer_type_t buft) { return 32; GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_mapped_get_max_size(ggml_backend_buffer_type_t buft) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context; return ggml_metal_device_get_props(ctx_dev)->max_buffer_size; } static size_t ggml_backend_metal_buffer_type_mapped_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor); } static bool ggml_backend_metal_buffer_type_mapped_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_mapped(void) { // note: not obvious, but this buffer type still needs to implement .alloc_buffer: // https://github.com/ggml-org/llama.cpp/pull/15832#discussion_r2333177099 static ggml_backend_buffer_type ggml_backend_buffer_type_mapped_metal = { /* .iface = */ { /* .get_name = */ ggml_backend_metal_buffer_type_mapped_get_name, /* .alloc_buffer = */ ggml_backend_metal_buffer_type_mapped_alloc_buffer, /* .get_alignment = */ ggml_backend_metal_buffer_type_mapped_get_alignment, /* .get_max_size = */ ggml_backend_metal_buffer_type_mapped_get_max_size, /* .get_alloc_size = */ ggml_backend_metal_buffer_type_mapped_get_alloc_size, /* .is_host = */ ggml_backend_metal_buffer_type_mapped_is_host, }, /* .device = */ &g_ggml_metal_device, /* .context = */ NULL, }; return &ggml_backend_buffer_type_mapped_metal; } // backend static const char * ggml_backend_metal_name(ggml_backend_t backend) { return "Metal"; GGML_UNUSED(backend); } static void ggml_backend_metal_free(ggml_backend_t backend) { ggml_metal_t ctx = (ggml_metal_t)backend->context; // wait for any ongoing async operations to finish ggml_metal_synchronize(ctx); ggml_metal_free(ctx); free(backend); } static void ggml_backend_metal_synchronize(ggml_backend_t backend) { ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_synchronize(ctx); } static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_set_tensor_async(ctx, tensor, data, offset, size); } static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_get_tensor_async(ctx, tensor, data, offset, size); } static bool ggml_backend_metal_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) { return false; GGML_UNUSED(backend_src); GGML_UNUSED(backend_dst); GGML_UNUSED(src); GGML_UNUSED(dst); } static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_metal_t ctx = (ggml_metal_t)backend->context; return ggml_metal_graph_compute(ctx, cgraph); } static void ggml_backend_metal_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_graph_optimize(ctx, cgraph); } static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) { GGML_ASSERT(ggml_backend_is_metal(backend)); ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_set_n_cb(ctx, n_cb); } static ggml_backend_i ggml_backend_metal_i = { /* .get_name = */ ggml_backend_metal_name, /* .free = */ ggml_backend_metal_free, /* .set_tensor_async = */ ggml_backend_metal_set_tensor_async, /* .get_tensor_async = */ ggml_backend_metal_get_tensor_async, /* .cpy_tensor_async = */ ggml_backend_metal_cpy_tensor_async, // only needed for multi-GPU setups /* .synchronize = */ ggml_backend_metal_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_metal_graph_compute, // the events API is needed only for multi-GPU setups, so likely no need to implement it for Metal // in any case, these docs seem relevant if we ever decide to implement it: // https://developer.apple.com/documentation/metal/mtlcommandbuffer#Synchronizing-Passes-with-Events /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ ggml_backend_metal_graph_optimize, }; static ggml_guid_t ggml_backend_metal_guid(void) { static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 }; return &guid; } ggml_backend_t ggml_backend_metal_init(void) { ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_metal_reg(), 0); ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; ggml_metal_t ctx = ggml_metal_init(ctx_dev); if (ctx == NULL) { GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__); return NULL; } ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend)); *backend = { /* .guid = */ ggml_backend_metal_guid(), /* .interface = */ ggml_backend_metal_i, /* .device = */ dev, /* .context = */ ctx, }; ggml_backend_metal_set_n_cb(backend, 1); return backend; } bool ggml_backend_is_metal(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid()); } void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) { GGML_ASSERT(ggml_backend_is_metal(backend)); ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_set_abort_callback(ctx, abort_callback, user_data); } bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) { GGML_ASSERT(ggml_backend_is_metal(backend)); ggml_metal_t ctx = (ggml_metal_t)backend->context; return ggml_metal_supports_family(ctx, family); } void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) { GGML_ASSERT(ggml_backend_is_metal(backend)); ggml_metal_t ctx = (ggml_metal_t)backend->context; ggml_metal_capture_next_compute(ctx); } // backend device static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) { return "Metal"; GGML_UNUSED(dev); } static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; return ggml_metal_device_get_props(ctx_dev)->name; } static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; ggml_metal_device_get_memory(ctx_dev, free, total); } static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backend_dev_t dev) { return GGML_BACKEND_DEVICE_TYPE_GPU; GGML_UNUSED(dev); } static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_metal_device_get_name(dev); props->description = ggml_backend_metal_device_get_description(dev); props->type = ggml_backend_metal_device_get_type(dev); ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ true, /* .host_buffer = */ false, /* .buffer_from_host_ptr = */ true, /* .events = */ false, }; } static ggml_backend_t ggml_backend_metal_device_init(ggml_backend_dev_t dev, const char * params) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; ggml_metal_t ctx = ggml_metal_init(ctx_dev); if (ctx == NULL) { GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__); return NULL; } ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend)); *backend = { /* .guid = */ ggml_backend_metal_guid(), /* .interface = */ ggml_backend_metal_i, /* .device = */ dev, /* .context = */ ctx, }; ggml_backend_metal_set_n_cb(backend, 1); return backend; GGML_UNUSED(params); } static ggml_backend_buffer_type_t ggml_backend_metal_device_get_buffer_type(ggml_backend_dev_t dev) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev); return props_dev->use_shared_buffers ? ggml_backend_metal_buffer_type_shared() : ggml_backend_metal_buffer_type_private(); } static ggml_backend_buffer_t ggml_backend_metal_device_buffer_mapped(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; ggml_metal_buffer_t res = ggml_metal_buffer_map(ctx_dev, ptr, size, max_tensor_size); return ggml_backend_buffer_init(ggml_backend_metal_buffer_type_mapped(), ggml_backend_metal_buffer_shared_i, res, size); } static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context; return ggml_metal_device_supports_op(ctx_dev, op); } static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { return buft->iface.get_name == ggml_backend_metal_buffer_type_shared_get_name || buft->iface.get_name == ggml_backend_metal_buffer_type_private_get_name || buft->iface.get_name == ggml_backend_metal_buffer_type_mapped_get_name; GGML_UNUSED(dev); } static int64_t get_op_batch_size(const ggml_tensor * op) { switch (op->op) { case GGML_OP_MUL_MAT: return op->ne[1]; case GGML_OP_MUL_MAT_ID: return op->ne[2]; default: return ggml_nrows(op); } } static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { const int min_batch_size = 32; return (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) && get_op_batch_size(op) >= min_batch_size; GGML_UNUSED(dev); GGML_UNUSED(op); } static ggml_backend_device_i ggml_backend_metal_device_i = { /* .get_name = */ ggml_backend_metal_device_get_name, /* .get_description = */ ggml_backend_metal_device_get_description, /* .get_memory = */ ggml_backend_metal_device_get_memory, /* .get_type = */ ggml_backend_metal_device_get_type, /* .get_props = */ ggml_backend_metal_device_get_props, /* .init_backend = */ ggml_backend_metal_device_init, /* .get_buffer_type = */ ggml_backend_metal_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, /* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_mapped, /* .supports_op = */ ggml_backend_metal_device_supports_op, /* .supports_buft = */ ggml_backend_metal_device_supports_buft, /* .offload_op = */ ggml_backend_metal_device_offload_op, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; // backend registry static const char * ggml_backend_metal_reg_get_name(ggml_backend_reg_t reg) { return "Metal"; GGML_UNUSED(reg); } static size_t ggml_backend_metal_reg_device_count(ggml_backend_reg_t reg) { return 1; GGML_UNUSED(reg); } static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index == 0); return &g_ggml_metal_device; GGML_UNUSED(reg); GGML_UNUSED(index); } static ggml_backend_feature g_ggml_backend_metal_features[] = { #if defined(GGML_METAL_EMBED_LIBRARY) { "EMBED_LIBRARY", "1" }, #endif { NULL, NULL }, }; static ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) { return g_ggml_backend_metal_features; GGML_UNUSED(reg); } static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (strcmp(name, "ggml_backend_get_features") == 0) { return (void *)ggml_backend_metal_get_features; } return NULL; GGML_UNUSED(reg); } static ggml_backend_reg_i ggml_backend_metal_reg_i = { /* .get_name = */ ggml_backend_metal_reg_get_name, /* .device_count = */ ggml_backend_metal_reg_device_count, /* .device_get = */ ggml_backend_metal_reg_device_get, /* .get_proc_address = */ ggml_backend_metal_get_proc_address, }; ggml_backend_reg_t ggml_backend_metal_reg(void) { { g_ggml_metal_reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_metal_reg_i, /* .context = */ NULL, }; g_ggml_metal_device = { /* .iface = */ ggml_backend_metal_device_i, /* .reg = */ &g_ggml_metal_reg, /* .context = */ ggml_metal_device_get(), }; } return &g_ggml_metal_reg; } GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg) ggml-org-ggml-3678254/src/ggml-metal/ggml-metal.metal000066400000000000000000014561521512524704700222710ustar00rootroot00000000000000#define GGML_COMMON_DECL_METAL #define GGML_COMMON_IMPL_METAL #if defined(GGML_METAL_EMBED_LIBRARY) __embed_ggml-common.h__ #else #include "ggml-common.h" #endif #include "ggml-metal-impl.h" #include #ifdef GGML_METAL_HAS_TENSOR #include #include #endif using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) #define MIN(x, y) ((x) < (y) ? (x) : (y)) #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define PAD2(x, n) (((x) + (n) - 1) & ~((n) - 1)) #define FOR_UNROLL(x) _Pragma("clang loop unroll(full)") for (x) #define N_SIMDWIDTH 32 // assuming SIMD group size is 32 // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf // // cmd: // .../usr/bin/metal -dM -E -c ggml/src/ggml-metal/ggml-metal.metal // .../usr/bin/metal -dM -E -c -target air64-apple-ios14.0 ggml/src/ggml-metal/ggml-metal.metal // #if __METAL_VERSION__ < 310 && defined(GGML_METAL_HAS_BF16) #undef GGML_METAL_HAS_BF16 #endif #if defined(GGML_METAL_HAS_BF16) typedef matrix bfloat4x4; typedef matrix bfloat2x4; #endif constexpr constant static float kvalues_iq4nl_f[16] = { -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f }; constexpr constant static float kvalues_mxfp4_f[16] = { 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f }; static inline int best_index_int8(int n, constant float * val, float x) { if (x <= val[0]) return 0; if (x >= val[n-1]) return n-1; int ml = 0, mu = n-1; while (mu-ml > 1) { int mav = (ml+mu)/2; if (x < val[mav]) mu = mav; else ml = mav; } return x - val[mu-1] < val[mu] - x ? mu-1 : mu; } static inline float e8m0_to_fp32(uint8_t x) { uint32_t bits; if (x == 0) { bits = 0x00400000; } else { bits = (uint32_t) x << 23; } return as_type(bits); } static inline float dot(float x, float y) { return x*y; } // NOTE: this is not dequantizing - we are simply fitting the template template void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { reg = (type4x4)(*src); } template void dequantize_f32_t4(device const float4 * src, short il, thread type4 & reg) { reg = (type4)(*src); } template void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) { reg = (type4x4)(*src); } template void dequantize_f16_t4(device const half4 * src, short il, thread type4 & reg) { reg = (type4)(*(src)); } #if defined(GGML_METAL_HAS_BF16) template void dequantize_bf16(device const bfloat4x4 * src, short il, thread type4x4 & reg) { reg = (type4x4)(*src); } template void dequantize_bf16_t4(device const bfloat4 * src, short il, thread type4 & reg) { reg = (type4)(*(src)); } #endif template void dequantize_q4_0(device const block_q4_0 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 1); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float md = -8.h * xb->d; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; float4x4 reg_f; for (int i = 0; i < 8; i++) { reg_f[i/2][2*(i%2) + 0] = d1 * (qs[i] & mask0) + md; reg_f[i/2][2*(i%2) + 1] = d2 * (qs[i] & mask1) + md; } reg = (type4x4) reg_f; } template void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 1); const float d1 = (il/4) ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float md = -8.h * xb->d; const ushort mask0 = (il/4) ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i = 0; i < 2; i++) { reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + md; reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + md; } } void quantize_q4_0(device const float * src, device block_q4_0 & dst) { #pragma METAL fp math_mode(safe) float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < QK4_0; j++) { const float v = src[j]; if (amax < fabs(v)) { amax = fabs(v); max = v; } } const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; dst.d = d; for (int j = 0; j < QK4_0/2; ++j) { const float x0 = src[0 + j]*id; const float x1 = src[QK4_0/2 + j]*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); dst.qs[j] = xi0; dst.qs[j] |= xi1 << 4; } } void quantize_q4_1(device const float * src, device block_q4_1 & dst) { #pragma METAL fp math_mode(safe) float min = FLT_MAX; float max = -FLT_MAX; for (int j = 0; j < QK4_1; j++) { const float v = src[j]; if (min > v) min = v; if (max < v) max = v; } const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; dst.d = d; dst.m = min; for (int j = 0; j < QK4_1/2; ++j) { const float x0 = (src[0 + j] - min)*id; const float x1 = (src[QK4_1/2 + j] - min)*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); dst.qs[j] = xi0; dst.qs[j] |= xi1 << 4; } } void quantize_q5_0(device const float * src, device block_q5_0 & dst) { #pragma METAL fp math_mode(safe) float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < QK5_0; j++) { const float v = src[j]; if (amax < fabs(v)) { amax = fabs(v); max = v; } } const float d = max / -16; const float id = d ? 1.0f/d : 0.0f; dst.d = d; uint32_t qh = 0; for (int j = 0; j < QK5_0/2; ++j) { const float x0 = src[0 + j]*id; const float x1 = src[QK5_0/2 + j]*id; const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); } thread const uint8_t * qh8 = (thread const uint8_t *)&qh; for (int j = 0; j < 4; ++j) { dst.qh[j] = qh8[j]; } } void quantize_q5_1(device const float * src, device block_q5_1 & dst) { #pragma METAL fp math_mode(safe) float max = src[0]; float min = src[0]; for (int j = 1; j < QK5_1; j++) { const float v = src[j]; min = v < min ? v : min; max = v > max ? v : max; } const float d = (max - min) / 31; const float id = d ? 1.0f/d : 0.0f; dst.d = d; dst.m = min; uint32_t qh = 0; for (int j = 0; j < QK5_1/2; ++j) { const float x0 = (src[0 + j] - min)*id; const float x1 = (src[QK5_1/2 + j] - min)*id; const uint8_t xi0 = (uint8_t)(x0 + 0.5f); const uint8_t xi1 = (uint8_t)(x1 + 0.5f); dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); } thread const uint8_t * qh8 = (thread const uint8_t *)&qh; for (int j = 0; j < 4; ++j) { dst.qh[j] = qh8[j]; } } void quantize_q8_0(device const float * src, device block_q8_0 & dst) { #pragma METAL fp math_mode(safe) float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = src[j]; amax = MAX(amax, fabs(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; dst.d = d; for (int j = 0; j < QK8_0; ++j) { const float x0 = src[j]*id; dst.qs[j] = round(x0); } } void quantize_iq4_nl(device const float * src, device block_iq4_nl & dst) { #pragma METAL fp math_mode(safe) float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < QK4_NL; j++) { const float v = src[j]; if (amax < fabs(v)) { amax = fabs(v); max = v; } } const float d = max / kvalues_iq4nl_f[0]; const float id = d ? 1.0f/d : 0.0f; float sumqx = 0, sumq2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { const float x0 = src[0 + j]*id; const float x1 = src[QK4_NL/2 + j]*id; const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); dst.qs[j] = xi0 | (xi1 << 4); const float v0 = kvalues_iq4nl_f[xi0]; const float v1 = kvalues_iq4nl_f[xi1]; const float w0 = src[0 + j]*src[0 + j]; const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; sumq2 += w0*v0*v0 + w1*v1*v1; } dst.d = sumq2 > 0 ? sumqx/sumq2 : d; } template void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float m = xb->m; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; float4x4 reg_f; for (int i = 0; i < 8; i++) { reg_f[i/2][2*(i%2) + 0] = ((qs[i] & mask0) * d1) + m; reg_f[i/2][2*(i%2) + 1] = ((qs[i] & mask1) * d2) + m; } reg = (type4x4) reg_f; } template void dequantize_q4_1_t4(device const block_q4_1 * xb, short il, thread type4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); const float d1 = (il/4) ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float m = xb->m; const ushort mask0 = (il/4) ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i = 0; i < 2; i++) { reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + m; reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + m; } } template void dequantize_q5_0(device const block_q5_0 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 3); const float d = xb->d; const float md = -16.h * xb->d; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; float4x4 reg_f; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg_f[i/2][2*(i%2) + 0] = d * x0 + md; reg_f[i/2][2*(i%2) + 1] = d * x1 + md; } reg = (type4x4) reg_f; } template void dequantize_q5_0_t4(device const block_q5_0 * xb, short il, thread type4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 3); const float d = xb->d; const float md = -16.h * xb->d; const ushort mask = (il/4) ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = (il/4) ? 4 : 0; const int gh_mv = (il/4) ? 12 : 0; const int gh_bk = (il/4) ? 0 : 4; for (int ii = 0; ii < 2; ii++) { int i = 2*(il%4) + ii; // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[2*ii + 0] = d * x0 + md; reg[2*ii + 1] = d * x1 + md; } } template void dequantize_q5_1(device const block_q5_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 4); const float d = xb->d; const float m = xb->m; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; float4x4 reg_f; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg_f[i/2][2*(i%2) + 0] = d * x0 + m; reg_f[i/2][2*(i%2) + 1] = d * x1 + m; } reg = (type4x4) reg_f; } template void dequantize_q5_1_t4(device const block_q5_1 * xb, short il, thread type4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 4); const float d = xb->d; const float m = xb->m; const ushort mask = (il/4) ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = (il/4) ? 4 : 0; const int gh_mv = (il/4) ? 12 : 0; const int gh_bk = (il/4) ? 0 : 4; for (int ii = 0; ii < 2; ii++) { int i = 2*(il%4) + ii; // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[2*ii + 0] = d * x0 + m; reg[2*ii + 1] = d * x1 + m; } } template void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) { device const int8_t * qs = ((device const int8_t *)xb->qs); const float d = xb->d; float4x4 reg_f; for (int i = 0; i < 16; i++) { reg_f[i/4][i%4] = (qs[i + 16*il] * d); } reg = (type4x4) reg_f; } template void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & reg) { device const int8_t * qs = ((device const int8_t *)xb->qs); const float d = xb->d; for (int i = 0; i < 4; i++) { reg[i] = (qs[4*(il%4) + i + 16*(il/4)] * d); } } template void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { device const uint8_t * q2 = (device const uint8_t *)xb->qs; const float d = e8m0_to_fp32(xb->e); const uint8_t shr = il >= 1 ? 4 : 0; for (int i = 0; i < 4; ++i) { reg[i][0] = d * kvalues_mxfp4_f[(q2[4*i + 0] >> shr) & 0x0F]; reg[i][1] = d * kvalues_mxfp4_f[(q2[4*i + 1] >> shr) & 0x0F]; reg[i][2] = d * kvalues_mxfp4_f[(q2[4*i + 2] >> shr) & 0x0F]; reg[i][3] = d * kvalues_mxfp4_f[(q2[4*i + 3] >> shr) & 0x0F]; } } template void dequantize_mxfp4_t4(device const block_mxfp4 * xb, short il, thread type4 & reg) { device const uint8_t * q2 = (device const uint8_t *)xb->qs; const float d = e8m0_to_fp32(xb->e); const short il4 = il%4; const uint8_t shr = il >= 4 ? 4 : 0; reg[0] = d * kvalues_mxfp4_f[(q2[4*il4 + 0] >> shr) & 0x0F]; reg[1] = d * kvalues_mxfp4_f[(q2[4*il4 + 1] >> shr) & 0x0F]; reg[2] = d * kvalues_mxfp4_f[(q2[4*il4 + 2] >> shr) & 0x0F]; reg[3] = d * kvalues_mxfp4_f[(q2[4*il4 + 3] >> shr) & 0x0F]; } template void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; const float min = xb->dmin; device const uint8_t * q = (device const uint8_t *)xb->qs; float dl, ml; uint8_t sc = xb->scales[il]; q = q + 32*(il/8) + 16*(il&1); il = (il/2)%4; half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4); for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint8_t * q = (device const uint8_t *)xb->qs; device const uint8_t * h = (device const uint8_t *)xb->hmask; device const int8_t * scales = (device const int8_t *)xb->scales; q = q + 32 * (il/8) + 16 * (il&1); h = h + 16 * (il&1); uint8_t m = 1 << (il/2); uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \ ((il/4)>0 ? 12 : 3); uint16_t kmask2 = il/8 ? 0xF0 : 0x0F; uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4]; int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) : (scale_2&kmask2) | ((scale_1&kmask1) << 4); float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f); const float ml = 4.f * dl; il = (il/2) & 3; const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl *= coef; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml); } } static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) { return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)} : uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))}; } template void dequantize_q4_K(device const block_q4_K * xb, short il, thread type4x4 & reg) { device const uchar * q = xb->qs; short is = (il/4) * 2; q = q + (il/4) * 32 + 16 * (il&1); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.h; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; const ushort mask = il < 2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) { device const uint8_t * q = xb->qs; device const uint8_t * qh = xb->qh; short is = (il/4) * 2; q = q + 32 * (il/4) + 16 * (il&1); qh = qh + 16 * (il&1); uint8_t ul = 1 << (il/2); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.f; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; const ushort mask = il<2 ? 0x0F : 0xF0; const float qh_val = il<2 ? 16.f : 256.f; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml; } } template void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint16_t * ql = (device const uint16_t *)xb->ql; device const uint16_t * qh = (device const uint16_t *)xb->qh; device const int8_t * scales = (device const int8_t *)xb->scales; ql = ql + 32*(il/8) + 16*((il/2)&1) + 8*(il&1); qh = qh + 16*(il/8) + 8*(il&1); float sc = scales[(il%2) + 2 * ((il/2))]; il = (il/2) & 3; const uint32_t kmask1 = il>1 ? (il>2 ? 0xC0C0C0C0 : 0x30303030) : (il>0 ? 0x0C0C0C0C : 0x03030303); const uint32_t kmask2 = il>1 ? 0xF0F0F0F0 : 0x0F0F0F0F; const float ml = d_all * sc * 32.f; const float dl0 = d_all * sc; const float dl1 = dl0 / 256.f; const float dl2 = dl0 / (256.f * 256.f); const float dl3 = dl0 / (256.f * 256.f * 256.f); const uint8_t shr_h = il>2 ? 2 : 0; const uint8_t shl_h = il>1 ? 0 : (il>0 ? 2 : 4); const uint8_t shr_l = il>1 ? 4 : 0; for (int i = 0; i < 4; ++i) { const uint32_t low = (ql[2*i] | (uint32_t)(ql[2*i+1] << 16)) & kmask2; const uint32_t high = (qh[2*i] | (uint32_t)(qh[2*i+1] << 16)) & kmask1; const uint32_t q = ((high << shl_h) >> shr_h) | (low >> shr_l); reg[i][0] = dl0 * ((half)(q & 0xFF)) - ml; reg[i][1] = dl1 * ((float)(q & 0xFF00)) - ml; reg[i][2] = dl2 * ((float)(q & 0xFF0000)) - ml; reg[i][3] = dl3 * ((float)(q & 0xFF000000)) - ml; } } template void dequantize_iq2_xxs(device const block_iq2_xxs * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const float d = xb->d; const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 // each block of 32 needs 2 uint32_t's for the quants & scale, so 4 uint16_t's. device const uint16_t * q2 = xb->qs + 4*ib32; const uint32_t aux32_g = q2[0] | (q2[1] << 16); const uint32_t aux32_s = q2[2] | (q2[3] << 16); thread const uint8_t * aux8 = (thread const uint8_t *)&aux32_g; const float dl = d * (0.5f + (aux32_s >> 28)) * 0.25f; constant uint8_t * grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+0]); uint8_t signs = ksigns_iq2xs[(aux32_s >> 14*il) & 127]; for (int i = 0; i < 8; ++i) { reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); } grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+1]); signs = ksigns_iq2xs[(aux32_s >> (14*il+7)) & 127]; for (int i = 0; i < 8; ++i) { reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); } } template void dequantize_iq2_xs(device const block_iq2_xs * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const float d = xb->d; const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 device const uint16_t * q2 = xb->qs + 4*ib32; const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f; constant uint8_t * grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+0] & 511)); uint8_t signs = ksigns_iq2xs[q2[2*il+0] >> 9]; for (int i = 0; i < 8; ++i) { reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); } grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+1] & 511)); signs = ksigns_iq2xs[q2[2*il+1] >> 9]; for (int i = 0; i < 8; ++i) { reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f); } } template void dequantize_iq3_xxs(device const block_iq3_xxs * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const float d = xb->d; const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 device const uint8_t * q3 = xb->qs + 8*ib32; device const uint16_t * gas = (device const uint16_t *)(xb->qs + QK_K/4) + 2*ib32; const uint32_t aux32 = gas[0] | (gas[1] << 16); const float dl = d * (0.5f + (aux32 >> 28)) * 0.5f; constant uint8_t * grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+0]); constant uint8_t * grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+1]); uint8_t signs = ksigns_iq2xs[(aux32 >> 14*il) & 127]; for (int i = 0; i < 4; ++i) { reg[0][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f); reg[1][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f); } grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+2]); grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+3]); signs = ksigns_iq2xs[(aux32 >> (14*il+7)) & 127]; for (int i = 0; i < 4; ++i) { reg[2][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f); reg[3][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f); } } template void dequantize_iq3_s(device const block_iq3_s * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const float d = xb->d; const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 device const uint8_t * qs = xb->qs + 8*ib32; device const uint8_t * signs = xb->signs + 4*ib32 + 2*il; const uint8_t qh = xb->qh[ib32] >> 4*il; const float dl = d * (1 + 2*((xb->scales[ib32/2] >> 4*(ib32%2)) & 0xf)); constant uint8_t * grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+0] | ((qh << 8) & 256))); constant uint8_t * grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+1] | ((qh << 7) & 256))); for (int i = 0; i < 4; ++i) { reg[0][i] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i+0]); reg[1][i] = dl * grid2[i] * select(1, -1, signs[0] & kmask_iq2xs[i+4]); } grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+2] | ((qh << 6) & 256))); grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+3] | ((qh << 5) & 256))); for (int i = 0; i < 4; ++i) { reg[2][i] = dl * grid1[i] * select(1, -1, signs[1] & kmask_iq2xs[i+0]); reg[3][i] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i+4]); } } template void dequantize_iq2_s(device const block_iq2_s * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const float d = xb->d; const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; device const uint8_t * signs = qs + QK_K/8; const uint8_t qh = xb->qh[ib32] >> 4*il; const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f; constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[0] | ((qh << 8) & 0x300))); constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[1] | ((qh << 6) & 0x300))); for (int i = 0; i < 8; ++i) { reg[i/4+0][i%4] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i]); reg[i/4+2][i%4] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i]); } } template void dequantize_iq1_s(device const block_iq1_s * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const int ib32 = il/2; il = il%2; const float d = xb->d; device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; device const uint16_t * qh = xb->qh; const float dl = d * (2*((qh[ib32] >> 12) & 7) + 1); const float ml = dl * (qh[ib32] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA); const uint16_t h = qh[ib32] >> 6*il; constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((h << 8) & 0x700))); constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((h << 5) & 0x700))); for (int i = 0; i < 4; ++i) { reg[0][i] = dl * (grid1[i] & 0xf) + ml; reg[1][i] = dl * (grid1[i] >> 4) + ml; reg[2][i] = dl * (grid2[i] & 0xf) + ml; reg[3][i] = dl * (grid2[i] >> 4) + ml; } } template void dequantize_iq1_m(device const block_iq1_m * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const int ib32 = il/2; il = il%2; device const uint16_t * sc = (device const uint16_t *)xb->scales; iq1m_scale_t scale; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); const float d = scale.f16; device const uint8_t * qs = xb->qs + 4*ib32 + 2*il; device const uint8_t * qh = xb->qh + 2*ib32 + il; const float dl = d * (2*((sc[ib32/2] >> (6*(ib32%2)+3*il)) & 7) + 1); const float ml1 = dl * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); const float ml2 = dl * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700))); for (int i = 0; i < 4; ++i) { reg[0][i] = dl * (grid1[i] & 0xf) + ml1; reg[1][i] = dl * (grid1[i] >> 4) + ml1; reg[2][i] = dl * (grid2[i] & 0xf) + ml2; reg[3][i] = dl * (grid2[i] >> 4) + ml2; } } template void dequantize_iq4_nl(device const block_iq4_nl * xb, short il, thread type4x4 & reg) { device const uint16_t * q4 = (device const uint16_t *)xb->qs; const float d = xb->d; uint32_t aux32; thread const uint8_t * q8 = (thread const uint8_t *)&aux32; for (int i = 0; i < 4; ++i) { aux32 = ((q4[2*i] | (q4[2*i+1] << 16)) >> 4*il) & 0x0f0f0f0f; reg[i][0] = d * kvalues_iq4nl_f[q8[0]]; reg[i][1] = d * kvalues_iq4nl_f[q8[1]]; reg[i][2] = d * kvalues_iq4nl_f[q8[2]]; reg[i][3] = d * kvalues_iq4nl_f[q8[3]]; } } template void dequantize_iq4_nl_t4(device const block_iq4_nl * xb, short il, thread type4 & reg) { device const uint16_t * q4 = (device const uint16_t *)xb->qs; const float d = xb->d; uint32_t aux32; thread const uint8_t * q8 = (thread const uint8_t *)&aux32; aux32 = ((q4[2*(il%4)] | (q4[2*(il%4)+1] << 16)) >> 4*(il/4)) & 0x0f0f0f0f; reg[0] = d * kvalues_iq4nl_f[q8[0]]; reg[1] = d * kvalues_iq4nl_f[q8[1]]; reg[2] = d * kvalues_iq4nl_f[q8[2]]; reg[3] = d * kvalues_iq4nl_f[q8[3]]; } template void dequantize_iq4_xs(device const block_iq4_xs * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 const int ib32 = il/2; il = il%2; // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16 device const uint32_t * q4 = (device const uint32_t *)xb->qs + 4*ib32; const int ls = ((xb->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((xb->scales_h >> 2*ib32) & 3) << 4); const float d = (float)xb->d * (ls - 32); uint32_t aux32; thread const uint8_t * q8 = (thread const uint8_t *)&aux32; for (int i = 0; i < 4; ++i) { aux32 = (q4[i] >> 4*il) & 0x0f0f0f0f; reg[i][0] = d * kvalues_iq4nl_f[q8[0]]; reg[i][1] = d * kvalues_iq4nl_f[q8[1]]; reg[i][2] = d * kvalues_iq4nl_f[q8[2]]; reg[i][3] = d * kvalues_iq4nl_f[q8[3]]; } } enum ggml_sort_order { GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC, }; // general-purpose kernel for addition, subtraction, multiplication and division of two tensors // pros: works for non-contiguous tensors, supports broadcast across all dims // cons: not very efficient template kernel void kernel_add_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig.z; const int i02 = tgpig.y; const int i01 = tgpig.x; const int i13 = i03%args.ne13; const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs); device float * dst_ptr = (device float *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs); device const float * src1_ptr[F]; for (short j = 0; j < F; ++j) { src1_ptr[j] = (device const float *) (src1 + args.o1[j] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11); } for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; float res = src0_ptr[i0]; #pragma unroll for (short j = 0; j < F; ++j) { res += src1_ptr[j][i10]; } dst_ptr[i0] = res; } } typedef decltype(kernel_add_fuse_impl<2>) kernel_add_fuse_t; template [[host_name("kernel_add_fuse_1")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<1>; template [[host_name("kernel_add_fuse_2")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<2>; template [[host_name("kernel_add_fuse_3")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<3>; template [[host_name("kernel_add_fuse_4")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<4>; template [[host_name("kernel_add_fuse_5")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<5>; template [[host_name("kernel_add_fuse_6")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<6>; template [[host_name("kernel_add_fuse_7")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<7>; template [[host_name("kernel_add_fuse_8")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<8>; kernel void kernel_sub_fuse_1( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig.z; const int i02 = tgpig.y; const int i01 = tgpig.x; const int i13 = i03%args.ne13; const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) - *((device float *)(src1_ptr + i10*args.nb10)); } } kernel void kernel_mul_fuse_1( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig.z; const int i02 = tgpig.y; const int i01 = tgpig.x; const int i13 = i03%args.ne13; const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; if (args.ne10 == 1) { const float x = *((device float *)(src1_ptr)); for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) * x; } } else { for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) * *((device float *)(src1_ptr + i10*args.nb10)); } } } kernel void kernel_div_fuse_1( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig.z; const int i02 = tgpig.y; const int i01 = tgpig.x; const int i13 = i03%args.ne13; const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; if (args.ne10 == 1) { const float x = 1.0f / *((device float *)(src1_ptr)); for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) * x; } } else { for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) / *((device float *)(src1_ptr + i10*args.nb10)); } } } kernel void kernel_add_id( constant ggml_metal_kargs_add_id & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i1 = tgpig.x; const int i2 = tgpig.y; const int i11 = *((device const int32_t *) (src2 + i1*sizeof(int32_t) + i2*args.nb21)); const size_t nb1 = args.ne0 * sizeof(float); const size_t nb2 = args.ne1 * nb1; device float * dst_row = (device float *)((device char *)dst + i1*nb1 + i2*nb2); device const float * src0_row = (device const float *)((device char *)src0 + i1*args.nb01 + i2*args.nb02); device const float * src1_row = (device const float *)((device char *)src1 + i11*args.nb11); for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { dst_row[i0] = src0_row[i0] + src1_row[i0]; } } template kernel void kernel_repeat( constant ggml_metal_kargs_repeat & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i3 = tgpig.z; const int i2 = tgpig.y; const int i1 = tgpig.x; const int i03 = i3%args.ne03; const int i02 = i2%args.ne02; const int i01 = i1%args.ne01; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01; device char * dst_ptr = dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i00 = i0%args.ne00; *((device T *)(dst_ptr + i0*args.nb0)) = *((device T *)(src0_ptr + i00*args.nb00)); } } typedef decltype(kernel_repeat) kernel_repeat_t; template [[host_name("kernel_repeat_f32")]] kernel kernel_repeat_t kernel_repeat; template [[host_name("kernel_repeat_f16")]] kernel kernel_repeat_t kernel_repeat; template [[host_name("kernel_repeat_i32")]] kernel kernel_repeat_t kernel_repeat; template [[host_name("kernel_repeat_i16")]] kernel kernel_repeat_t kernel_repeat; // assumption: src1 is a row // broadcast src1 into src0 template kernel void kernel_add_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint tpig[[thread_position_in_grid]]) { const uint nb = args.ne00/4; const uint i = tpig % nb; device const float4 * src0_row = (device const float4 *) (src0); device float4 * dst_row = (device float4 *) (dst); float4 res = src0_row[tpig]; #pragma unroll(F) for (short j = 0; j < F; ++j) { res += ((device const float4 *) (src1 + args.o1[j]))[i]; } dst_row[tpig] = res; } typedef decltype(kernel_add_row_c4_fuse_impl<1>) kernel_add_row_c4_fuse_t; template [[host_name("kernel_add_row_c4_fuse_1")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<1>; template [[host_name("kernel_add_row_c4_fuse_2")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<2>; template [[host_name("kernel_add_row_c4_fuse_3")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<3>; template [[host_name("kernel_add_row_c4_fuse_4")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<4>; template [[host_name("kernel_add_row_c4_fuse_5")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<5>; template [[host_name("kernel_add_row_c4_fuse_6")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<6>; template [[host_name("kernel_add_row_c4_fuse_7")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<7>; template [[host_name("kernel_add_row_c4_fuse_8")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<8>; template kernel void kernel_sub_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint tpig[[thread_position_in_grid]]) { const uint nb = args.ne00/4; const uint i = tpig % nb; device const float4 * src0_row = (device const float4 *) (src0); device float4 * dst_row = (device float4 *) (dst); device const float4 * src1_row[F]; for (short j = 0; j < F; ++j) { src1_row[j] = (device const float4 *) (src1 + args.o1[j]); } float4 res = src0_row[tpig]; #pragma unroll(F) for (short j = 0; j < F; ++j) { res -= src1_row[j][i]; } dst_row[tpig] = res; } typedef decltype(kernel_sub_row_c4_fuse_impl<1>) kernel_sub_row_c4_fuse_t; template [[host_name("kernel_sub_row_c4_fuse_1")]] kernel kernel_sub_row_c4_fuse_t kernel_sub_row_c4_fuse_impl<1>; template kernel void kernel_mul_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint tpig[[thread_position_in_grid]]) { const uint nb = args.ne00/4; const uint i = tpig % nb; device const float4 * src0_row = (device const float4 *) (src0); device float4 * dst_row = (device float4 *) (dst); device const float4 * src1_row[F]; for (short j = 0; j < F; ++j) { src1_row[j] = (device const float4 *) (src1 + args.o1[j]); } float4 res = src0_row[tpig]; #pragma unroll(F) for (short j = 0; j < F; ++j) { res *= src1_row[j][i]; } dst_row[tpig] = res; } typedef decltype(kernel_mul_row_c4_fuse_impl<1>) kernel_mul_row_c4_fuse_t; template [[host_name("kernel_mul_row_c4_fuse_1")]] kernel kernel_mul_row_c4_fuse_t kernel_mul_row_c4_fuse_impl<1>; template kernel void kernel_div_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, device char * dst, uint tpig[[thread_position_in_grid]]) { const uint nb = args.ne00/4; const uint i = tpig % nb; device const float4 * src0_row = (device const float4 *) (src0); device float4 * dst_row = (device float4 *) (dst); device const float4 * src1_row[F]; for (short j = 0; j < F; ++j) { src1_row[j] = (device const float4 *) (src1 + args.o1[j]); } float4 res = src0_row[tpig]; #pragma unroll(F) for (short j = 0; j < F; ++j) { res /= src1_row[j][i]; } dst_row[tpig] = res; } typedef decltype(kernel_div_row_c4_fuse_impl<1>) kernel_div_row_c4_fuse_t; template [[host_name("kernel_div_row_c4_fuse_1")]] kernel kernel_div_row_c4_fuse_t kernel_div_row_c4_fuse_impl<1>; kernel void kernel_scale_f32( constant ggml_metal_kargs_scale & args, device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * args.scale + args.bias; } kernel void kernel_scale_f32_4( constant ggml_metal_kargs_scale & args, device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * args.scale + args.bias; } kernel void kernel_fill_f32( constant ggml_metal_kargs_fill & args, device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = args.val; } kernel void kernel_fill_f32_4( constant ggml_metal_kargs_fill & args, device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = args.val; } kernel void kernel_clamp_f32( constant ggml_metal_kargs_clamp & args, device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = clamp(src0[tpig], args.min, args.max); } kernel void kernel_clamp_f32_4( constant ggml_metal_kargs_clamp & args, device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = clamp(src0[tpig], args.min, args.max); } kernel void kernel_relu_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = max(0.0f, src0[tpig]); } kernel void kernel_relu_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = max(0.0f, src0[tpig]); } kernel void kernel_sigmoid_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig])); } kernel void kernel_sigmoid_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig])); } kernel void kernel_tanh_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = precise::tanh(src0[tpig]); } kernel void kernel_tanh_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = precise::tanh(src0[tpig]); } constant float GELU_COEF_A = 0.044715f; constant float GELU_QUICK_COEF = -1.702f; constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; constant float SQRT_2_INV = 0.70710678118654752440084436210484f; kernel void kernel_gelu_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; // BEWARE !!! // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs! // This was observed with Falcon 7B and 40B models // dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_quick_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } kernel void kernel_gelu_quick_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } // based on Abramowitz and Stegun formula 7.1.26 or similar Hastings' approximation // ref: https://www.johndcook.com/blog/python_erf/ constant float p_erf = 0.3275911f; constant float a1_erf = 0.254829592f; constant float a2_erf = -0.284496736f; constant float a3_erf = 1.421413741f; constant float a4_erf = -1.453152027f; constant float a5_erf = 1.061405429f; template T erf_approx(T x) { T sign_x = sign(x); x = fabs(x); T t = 1.0f / (1.0f + p_erf * x); T y = 1.0f - (((((a5_erf * t + a4_erf) * t) + a3_erf) * t + a2_erf) * t + a1_erf) * t * exp(-x * x); return sign_x * y; } kernel void kernel_gelu_erf_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); } kernel void kernel_gelu_erf_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); } kernel void kernel_silu_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = x / (1.0f + exp(-x)); } kernel void kernel_silu_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x / (1.0f + exp(-x)); } kernel void kernel_elu_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { const float x = src0[tpig]; dst[tpig] = (x > 0.0f) ? x : (exp(x) - 1.0f); } kernel void kernel_elu_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { const float4 x = src0[tpig]; dst[tpig][0] = (x[0] > 0.0f) ? x[0] : (exp(x[0]) - 1.0f); dst[tpig][1] = (x[1] > 0.0f) ? x[1] : (exp(x[1]) - 1.0f); dst[tpig][2] = (x[2] > 0.0f) ? x[2] : (exp(x[2]) - 1.0f); dst[tpig][3] = (x[3] > 0.0f) ? x[3] : (exp(x[3]) - 1.0f); } kernel void kernel_sqr_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src0[tpig]; } kernel void kernel_sqr_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src0[tpig]; } kernel void kernel_sqrt_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sqrt(src0[tpig]); } kernel void kernel_sqrt_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sqrt(src0[tpig]); } kernel void kernel_sin_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sin(src0[tpig]); } kernel void kernel_sin_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sin(src0[tpig]); } kernel void kernel_cos_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = cos(src0[tpig]); } kernel void kernel_cos_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = cos(src0[tpig]); } kernel void kernel_log_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = log(src0[tpig]); } kernel void kernel_log_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = log(src0[tpig]); } kernel void kernel_neg_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = -src0[tpig]; } kernel void kernel_neg_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = -src0[tpig]; } kernel void kernel_abs_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = fabs(src0[tpig]); } kernel void kernel_abs_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = fabs(src0[tpig]); } kernel void kernel_sgn_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sign(src0[tpig]); } kernel void kernel_sgn_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = sign(src0[tpig]); } kernel void kernel_step_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = step(0.0f, src0[tpig]); } kernel void kernel_step_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = step(0.0f, src0[tpig]); } kernel void kernel_hardswish_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { const float x = src0[tpig]; dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); } kernel void kernel_hardswish_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { const float4 x = src0[tpig]; dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); } kernel void kernel_hardsigmoid_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { const float x = src0[tpig]; dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); } kernel void kernel_hardsigmoid_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { const float4 x = src0[tpig]; dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); } kernel void kernel_exp_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = exp(src0[tpig]); } kernel void kernel_exp_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = exp(src0[tpig]); } kernel void kernel_softplus_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = select(log(1.0f + exp(x)), x, x > 20.0f); } kernel void kernel_softplus_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = select(log(1.0f + exp(x)), x, x > 20.0f); } kernel void kernel_expm1_f32( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = exp(src0[tpig]) - 1.0f; } kernel void kernel_expm1_f32_4( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = exp(src0[tpig]) - 1.0f; } kernel void kernel_reglu_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; dst_row[i0] = x0*x1*(x0 > 0.0f); } } kernel void kernel_geglu_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu = 0.5f*x0*(1.0f + precise::tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); dst_row[i0] = gelu*x1; } } kernel void kernel_swiglu_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float silu = x0 / (1.0f + exp(-x0)); dst_row[i0] = silu*x1; } } kernel void kernel_swiglu_oai_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { float x0 = src0_row[i0]; float x1 = src1_row[i0]; x0 = min(x0, args.limit); x1 = max(min(x1, args.limit), -args.limit); float out_glu = x0 / (1.0f + exp(-x0 * args.alpha)); out_glu = out_glu * (1.0f + x1); dst_row[i0] = out_glu; } } kernel void kernel_geglu_erf_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu_erf = 0.5f*x0*(1.0f+erf_approx(x0*SQRT_2_INV)); dst_row[i0] = gelu_erf*x1; } } kernel void kernel_geglu_quick_f32( constant ggml_metal_kargs_glu & args, device const char * src0, device const char * src1, device char * dst, uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu_quick = x0*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x0))); dst_row[i0] = gelu_quick*x1; } } kernel void kernel_op_sum_f32( constant ggml_metal_kargs_sum & args, device const float * src0, device float * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { if (args.np == 0) { return; } // TODO: become function constant const uint nsg = (ntg.x + 31) / 32; float sumf = 0; for (uint64_t i0 = tpitg.x; i0 < args.np; i0 += ntg.x) { sumf += src0[i0]; } sumf = simd_sum(sumf); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); float total = 0; if (sgitg == 0) { float v = 0; if (tpitg.x < nsg) { v = shmem_f32[tpitg.x]; } total = simd_sum(v); if (tpitg.x == 0) { dst[0] = total; } } } template kernel void kernel_sum_rows( constant ggml_metal_kargs_sum_rows & args, device const float * src0, device float * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { int64_t i3 = tgpig.z; int64_t i2 = tgpig.y; int64_t i1 = tgpig.x; if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { return; } if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03); device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3); float sumf = 0; for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) { sumf += src_row[i0]; } sumf = simd_sum(sumf); threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); if (tpitg.x == 0) { dst_row[0] = norm ? sumf / args.ne00 : sumf; } } typedef decltype(kernel_sum_rows) kernel_sum_rows_t; template [[host_name("kernel_sum_rows_f32")]] kernel kernel_sum_rows_t kernel_sum_rows; template [[host_name("kernel_mean_f32")]] kernel kernel_sum_rows_t kernel_sum_rows; template kernel void kernel_cumsum_blk( constant ggml_metal_kargs_cumsum_blk & args, device const char * src0, device char * tmp, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int ib = tgpig[0]/args.ne01; const int i00 = ib*ntg.x; const int i01 = tgpig[0]%args.ne01; const int i02 = tgpig[1]; const int i03 = tgpig[2]; device const float * src0_row = (device const float *) (src0 + args.nb01*i01 + args.nb02*i02 + args.nb03*i03); threadgroup float * shmem_f32 = (threadgroup float *) shmem; float v = 0.0f; if (i00 + tpitg.x < args.ne00) { v = src0_row[i00 + tpitg.x]; } float s = simd_prefix_inclusive_sum(v); if (tiisg == N_SIMDWIDTH - 1) { shmem_f32[sgitg] = s; } threadgroup_barrier(mem_flags::mem_threadgroup); if (sgitg == 0) { shmem_f32[tiisg] = simd_prefix_exclusive_sum(shmem_f32[tiisg]); } threadgroup_barrier(mem_flags::mem_threadgroup); s += shmem_f32[sgitg]; device float * dst_row = (device float *) dst + args.ne00*i01 + args.ne00*args.ne01*i02 + args.ne00*args.ne01*args.ne02*i03; if (i00 + tpitg.x < args.ne00) { dst_row[i00 + tpitg.x] = s; } if (args.outb && tpitg.x == ntg.x - 1) { device float * tmp_row = (device float *) tmp + args.net0*i01 + args.net0*args.net1*i02 + args.net0*args.net1*args.net2*i03; tmp_row[ib] = s; } } typedef decltype(kernel_cumsum_blk) kernel_cumsum_blk_t; template [[host_name("kernel_cumsum_blk_f32")]] kernel kernel_cumsum_blk_t kernel_cumsum_blk; template kernel void kernel_cumsum_add( constant ggml_metal_kargs_cumsum_add & args, device const char * tmp, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int ib = tgpig[0]/args.ne01; if (ib == 0) { return; } const int i00 = ib*ntg.x; const int i01 = tgpig[0]%args.ne01; const int i02 = tgpig[1]; const int i03 = tgpig[2]; device const float * tmp_row = (device const float *) (tmp + args.nbt1*i01 + args.nbt2*i02 + args.nbt3*i03); device float * dst_row = (device float *) dst + args.ne00*i01 + args.ne00*args.ne01*i02 + args.ne00*args.ne01*args.ne02*i03; if (i00 + tpitg.x < args.ne00) { dst_row[i00 + tpitg.x] += tmp_row[ib - 1]; } } typedef decltype(kernel_cumsum_add) kernel_cumsum_add_t; template [[host_name("kernel_cumsum_add_f32")]] kernel kernel_cumsum_add_t kernel_cumsum_add; template bool _ggml_vec_tri_cmp(const int i, const int r); template<> bool _ggml_vec_tri_cmp(const int i, const int r) { return i < r; } template<> bool _ggml_vec_tri_cmp(const int i, const int r) { return i <= r; } template<> bool _ggml_vec_tri_cmp(const int i, const int r) { return i > r; } template<> bool _ggml_vec_tri_cmp(const int i, const int r) { return i >= r; } template kernel void kernel_tri( constant ggml_metal_kargs_tri & args, device const char * src0, device const char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i3 = tgpig.z; const int i2 = tgpig.y; const int i1 = tgpig.x; if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { return; } device const T * src_row = (device const T *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03); device T * dst_row = (device T *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3); // Each thread is a single element of the row if ne00 < max threads per // threadgroup, so this will loop once for each index that this thread is // responsible for for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) { // Use the comparison as a mask for branchless dst_row[i0] = static_cast(_ggml_vec_tri_cmp(i0, i1)) * src_row[i0]; } } typedef decltype(kernel_tri) kernel_tri_t; template [[host_name("kernel_tri_f32_0")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f32_1")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f32_2")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f32_3")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f16_0")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f16_1")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f16_2")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_f16_3")]] kernel kernel_tri_t kernel_tri; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_tri_bf16_0")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_bf16_1")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_bf16_2")]] kernel kernel_tri_t kernel_tri; template [[host_name("kernel_tri_bf16_3")]] kernel kernel_tri_t kernel_tri; #endif template kernel void kernel_soft_max( constant ggml_metal_kargs_soft_max & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, threadgroup float * buf [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint3 tptg[[threads_per_threadgroup]]) { const int32_t i03 = tgpig.z; const int32_t i02 = tgpig.y; const int32_t i01 = tgpig.x; const int32_t i13 = i03%args.ne13; const int32_t i12 = i02%args.ne12; const int32_t i11 = i01; device const float * psrc0 = (device const float *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; device const float * psrc2 = src2 != src0 ? (device const float *) (src2) : nullptr; device float * pdst = (device float *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; // ALiBi if (args.max_bias > 0.0f) { const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exp); } // parallel max float lmax = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { lmax = MAX(lmax, psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)); } // find the max value in the block float max_val = simd_max(lmax); if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float lsum = 0.0f; for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { const float exp_psrc0 = exp((psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; } // This barrier fixes a failing test // ref: https://github.com/ggml-org/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } if (psrc2) { sum += exp(psrc2[i02] - max_val); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { pdst[i00] *= inv_sum; } } template kernel void kernel_soft_max_4( constant ggml_metal_kargs_soft_max & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, threadgroup float * buf [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint3 tptg[[threads_per_threadgroup]]) { const int32_t i03 = tgpig.z; const int32_t i02 = tgpig.y; const int32_t i01 = tgpig.x; const int32_t i13 = i03%args.ne13; const int32_t i12 = i02%args.ne12; const int32_t i11 = i01; device const float4 * psrc4 = (device const float4 *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; device const float * psrc2 = src2 != src0 ? (device const float * ) (src2) : nullptr; device float4 * pdst4 = (device float4 *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; if (args.max_bias > 0.0f) { const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exp); } // parallel max float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { lmax4 = fmax(lmax4, psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); float max_val = simd_max(lmax); if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float4 lsum4 = 0.0f; for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { const float4 exp_psrc4 = exp((psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; // This barrier fixes a failing test // ref: https://github.com/ggml-org/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } if (psrc2) { sum += exp(psrc2[i02] - max_val); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { pdst4[i00] *= inv_sum; } } typedef decltype(kernel_soft_max) kernel_soft_max_t; typedef decltype(kernel_soft_max_4) kernel_soft_max_4_t; template [[host_name("kernel_soft_max_f16")]] kernel kernel_soft_max_t kernel_soft_max; template [[host_name("kernel_soft_max_f32")]] kernel kernel_soft_max_t kernel_soft_max; template [[host_name("kernel_soft_max_f16_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4; template [[host_name("kernel_soft_max_f32_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4; // ref: ggml.c:ggml_compute_forward_ssm_conv_f32 kernel void kernel_ssm_conv_f32_f32( constant ggml_metal_kargs_ssm_conv & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t ir = tgpig.x; const int64_t i2 = tgpig.y; const int64_t i3 = tgpig.z; const int64_t nc = args.ne10; //const int64_t ncs = args.ne00; //const int64_t nr = args.ne01; //const int64_t n_t = args.ne1; //const int64_t n_s = args.ne2; device const float * s = (device const float *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02); device const float * c = (device const float *) ((device const char *) src1 + ir*args.nb11); device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2); float sumf = 0.0f; for (int64_t i0 = 0; i0 < nc; ++i0) { sumf += s[i0] * c[i0]; } x[0] = sumf; } kernel void kernel_ssm_conv_f32_f32_4( constant ggml_metal_kargs_ssm_conv & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t ir = tgpig.x; const int64_t i2 = tgpig.y; const int64_t i3 = tgpig.z; const int64_t nc = args.ne10; //const int64_t ncs = args.ne00; //const int64_t nr = args.ne01; //const int64_t n_t = args.ne1; //const int64_t n_s = args.ne2; device const float4 * s = (device const float4 *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02); device const float4 * c = (device const float4 *) ((device const char *) src1 + ir*args.nb11); device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2); float sumf = 0.0f; for (int64_t i0 = 0; i0 < nc/4; ++i0) { sumf += dot(s[i0], c[i0]); } x[0] = sumf; } constant short FC_ssm_conv_bs [[function_constant(FC_SSM_CONV + 0)]]; // Batched version: each threadgroup processes multiple tokens for better efficiency // Thread layout: each thread handles one token, threadgroup covers BATCH_SIZE tokens kernel void kernel_ssm_conv_f32_f32_batched( constant ggml_metal_kargs_ssm_conv & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { // tgpig.x = row index (ir) // tgpig.y = batch of tokens (i2_base / BATCH_SIZE) // tgpig.z = sequence index (i3) // tpitg.x = thread within batch (0..BATCH_SIZE-1) const short BATCH_SIZE = FC_ssm_conv_bs; const int64_t ir = tgpig.x; const int64_t i2_base = tgpig.y * BATCH_SIZE; const int64_t i3 = tgpig.z; const int64_t i2_off = tpitg.x; const int64_t i2 = i2_base + i2_off; const int64_t nc = args.ne10; // conv kernel size (typically 4) const int64_t n_t = args.ne1; // number of tokens // Bounds check for partial batches at the end if (i2 >= n_t) { return; } // Load conv weights (shared across all tokens for this row) device const float * c = (device const float *) ((device const char *) src1 + ir*args.nb11); // Load source for this specific token device const float * s = (device const float *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02); // Output location for this token device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2); float sumf = 0.0f; for (int64_t i0 = 0; i0 < nc; ++i0) { sumf += s[i0] * c[i0]; } x[0] = sumf; } kernel void kernel_ssm_conv_f32_f32_batched_4( constant ggml_metal_kargs_ssm_conv & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { // tgpig.x = row index (ir) // tgpig.y = batch of tokens (i2_base / BATCH_SIZE) // tgpig.z = sequence index (i3) // tpitg.x = thread within batch (0..BATCH_SIZE-1) const short BATCH_SIZE = FC_ssm_conv_bs; const int64_t ir = tgpig.x; const int64_t i2_base = tgpig.y * BATCH_SIZE; const int64_t i3 = tgpig.z; const int64_t i2_off = tpitg.x; const int64_t i2 = i2_base + i2_off; const int64_t nc = args.ne10; // conv kernel size (typically 4) const int64_t n_t = args.ne1; // number of tokens // Bounds check for partial batches at the end if (i2 >= n_t) { return; } // Load conv weights (shared across all tokens for this row) device const float4 * c = (device const float4 *) ((device const char *) src1 + ir*args.nb11); // Load source for this specific token device const float4 * s = (device const float4 *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02); // Output location for this token device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2); float sumf = 0.0f; for (int64_t i0 = 0; i0 < nc/4; ++i0) { sumf += dot(s[i0], c[i0]); } x[0] = sumf; } // ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-2 part // Optimized version: reduces redundant memory loads by having one thread load shared values kernel void kernel_ssm_scan_f32( constant ggml_metal_kargs_ssm_scan & args, device const void * src0, device const void * src1, device const void * src2, device const void * src3, device const void * src4, device const void * src5, device const void * src6, device float * dst, threadgroup float * shared [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgptg[[simdgroups_per_threadgroup]], uint3 tgpg[[threadgroups_per_grid]]) { constexpr short NW = N_SIMDWIDTH; // Shared memory layout: // [0..sgptg*NW-1]: partial sums for reduction (existing) // [sgptg*NW..sgptg*NW+sgptg-1]: pre-computed x_dt values for each token in batch // [sgptg*NW+sgptg..sgptg*NW+2*sgptg-1]: pre-computed dA values for each token in batch threadgroup float * shared_sums = shared; threadgroup float * shared_x_dt = shared + sgptg * NW; threadgroup float * shared_dA = shared + sgptg * NW + sgptg; shared_sums[tpitg.x] = 0.0f; const int32_t i0 = tpitg.x; const int32_t i1 = tgpig.x; const int32_t ir = tgpig.y; // current head const int32_t i3 = tgpig.z; // current seq const int32_t nc = args.d_state; const int32_t nr = args.d_inner; const int32_t nh = args.n_head; const int32_t ng = args.n_group; const int32_t n_t = args.n_seq_tokens; const int32_t s_off = args.s_off; device const int32_t * ids = (device const int32_t *) src6; device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03); device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off); const int32_t i = i0 + i1*nc; const int32_t g = ir / (nh / ng); // repeat_interleave float s0 = s0_buff[i]; float s = 0.0f; device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); // {ne30, nh} const float A0 = A[i0%args.ne30]; device const float * x = (device const float *)((device const char *) src1 + i1*args.nb10 + ir*args.nb11 + i3*args.nb13); // {dim, nh, nt, ns} device const float * dt = (device const float *)((device const char *) src2 + ir*args.nb20 + i3*args.nb22); // {nh, nt, ns} device const float * B = (device const float *)((device const char *) src4 + g*args.nb41 + i3*args.nb43); // {d_state, ng, nt, ns} device const float * C = (device const float *)((device const char *) src5 + g*args.nb51 + i3*args.nb53); // {d_state, ng, nt, ns} device float * y = dst + (i1 + ir*(nr) + i3*(n_t*nh*nr)); // {dim, nh, nt, ns} for (int i2 = 0; i2 < n_t; i2 += sgptg) { threadgroup_barrier(mem_flags::mem_threadgroup); // Pre-compute x_dt and dA for this batch of tokens // Only first sgptg threads do the loads and expensive math if (i0 < sgptg && i2 + i0 < n_t) { // ns12 and ns21 are element strides (nb12/nb10, nb21/nb20) device const float * x_t = x + i0 * args.ns12; device const float * dt_t = dt + i0 * args.ns21; const float dt0 = dt_t[0]; const float dtsp = dt0 <= 20.0f ? log(1.0f + exp(dt0)) : dt0; shared_x_dt[i0] = x_t[0] * dtsp; shared_dA[i0] = dtsp; // Store dtsp, compute exp(dtsp * A0) per-thread since A0 varies } threadgroup_barrier(mem_flags::mem_threadgroup); for (int t = 0; t < sgptg && i2 + t < n_t; t++) { const float x_dt = shared_x_dt[t]; const float dA = exp(shared_dA[t] * A0); s = (s0 * dA) + (B[i0] * x_dt); const float sumf = simd_sum(s * C[i0]); if (tiisg == 0) { shared_sums[t*NW + sgitg] = sumf; } // recurse s0 = s; B += args.ns42; C += args.ns52; } // Advance pointers for next batch x += sgptg * args.ns12; dt += sgptg * args.ns21; threadgroup_barrier(mem_flags::mem_threadgroup); const float sumf = simd_sum(shared_sums[sgitg*NW + tiisg]); if (tiisg == 0 && i2 + sgitg < n_t) { y[sgitg*nh*nr] = sumf; } y += sgptg*nh*nr; } s_buff[i] = s; } kernel void kernel_rwkv_wkv6_f32( device const float * k, device const float * v, device const float * r, device const float * tf, device const float * td, device const float * state_in, device float * dst, constant uint & B, constant uint & T, constant uint & C, constant uint & H, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const uint head_size = 64; // TODO: support head_size = 128 const uint batch_id = tgpig.x / H; const uint head_id = tgpig.x % H; const uint tid = tpitg.x; if (batch_id >= B || head_id >= H) { return; } const uint state_size = C * head_size; const uint n_seq_tokens = T / B; threadgroup float _k[head_size]; threadgroup float _r[head_size]; threadgroup float _tf[head_size]; threadgroup float _td[head_size]; float state[head_size]; for (uint i = 0; i < head_size; i++) { state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + i * head_size + tid]; } threadgroup_barrier(mem_flags::mem_threadgroup); _tf[tid] = tf[head_id * head_size + tid]; threadgroup_barrier(mem_flags::mem_threadgroup); const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; for (uint t = start_t; t < end_t; t += C) { threadgroup_barrier(mem_flags::mem_threadgroup); _k[tid] = k[t]; _r[tid] = r[t]; _td[tid] = td[t]; threadgroup_barrier(mem_flags::mem_threadgroup); const float v_val = v[t]; float y = 0.0; for (uint j = 0; j < head_size; j += 4) { float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); float4 tf_vec = float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]); float4 td_vec = float4(_td[j], _td[j+1], _td[j+2], _td[j+3]); float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); float4 kv = k_vec * v_val; float4 temp = tf_vec * kv + s_vec; y += dot(r_vec, temp); s_vec = s_vec * td_vec + kv; state[j] = s_vec[0]; state[j+1] = s_vec[1]; state[j+2] = s_vec[2]; state[j+3] = s_vec[3]; } dst[t] = y; } for (uint i = 0; i < head_size; i++) { dst[T * C + batch_id * state_size + head_id * head_size * head_size + i * head_size + tid] = state[i]; } } kernel void kernel_rwkv_wkv7_f32( device const float * r, device const float * w, device const float * k, device const float * v, device const float * a, device const float * b, device const float * state_in, device float * dst, constant uint & B, constant uint & T, constant uint & C, constant uint & H, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const uint head_size = 64; // TODO: support head_size = 128 const uint batch_id = tgpig.x / H; const uint head_id = tgpig.x % H; const uint tid = tpitg.x; if (batch_id >= B || head_id >= H) { return; } const uint state_size = C * head_size; const uint n_seq_tokens = T / B; threadgroup float _r[head_size]; threadgroup float _w[head_size]; threadgroup float _k[head_size]; threadgroup float _a[head_size]; threadgroup float _b[head_size]; float state[head_size]; for (uint i = 0; i < head_size; i++) { state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + tid * head_size + i]; } const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; for (uint t = start_t; t < end_t; t += C) { threadgroup_barrier(mem_flags::mem_threadgroup); _r[tid] = r[t]; _w[tid] = w[t]; _k[tid] = k[t]; _a[tid] = a[t]; _b[tid] = b[t]; threadgroup_barrier(mem_flags::mem_threadgroup); const float v_val = v[t]; float y = 0.0, sa = 0.0; float4 sa_vec(0.0); for (uint j = 0; j < head_size; j += 4) { float4 a_vec = float4(_a[j], _a[j+1], _a[j+2], _a[j+3]); float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); sa_vec += a_vec * s_vec; } sa = sa_vec[0] + sa_vec[1] + sa_vec[2] + sa_vec[3]; for (uint j = 0; j < head_size; j += 4) { float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]); float4 w_vec = float4(_w[j], _w[j+1], _w[j+2], _w[j+3]); float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]); float4 b_vec = float4(_b[j], _b[j+1], _b[j+2], _b[j+3]); float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]); float4 kv = k_vec * v_val; s_vec = s_vec * w_vec + kv + sa * b_vec; y += dot(s_vec, r_vec); state[j] = s_vec[0]; state[j+1] = s_vec[1]; state[j+2] = s_vec[2]; state[j+3] = s_vec[3]; } dst[t] = y; } for (uint i = 0; i < head_size; i++) { dst[T * C + batch_id * state_size + head_id * head_size * head_size + tid * head_size + i] = state[i]; } } kernel void kernel_argmax_f32( constant ggml_metal_kargs_argmax & args, device const char * src0, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * x_row = (device const float *) ((device const char *) src0 + tgpig * args.nb01); float lmax = -INFINITY; int32_t larg = -1; for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { if (x_row[i00] > lmax) { lmax = x_row[i00]; larg = i00; } } // find the argmax value in the block float max_val = simd_max(lmax); int32_t arg_val = simd_max(select(-1, larg, lmax == max_val)); device int32_t * dst_i32 = (device int32_t *) dst; threadgroup float * shared_maxval = (threadgroup float *) shmem; threadgroup int32_t * shared_argmax = (threadgroup int32_t *) shmem + N_SIMDWIDTH; if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { shared_maxval[tiisg] = -INFINITY; shared_argmax[tiisg] = -1; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shared_maxval[sgitg] = max_val; shared_argmax[sgitg] = arg_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = shared_maxval[tiisg]; arg_val = shared_argmax[tiisg]; float max_val_reduced = simd_max(max_val); int32_t arg_val_reduced = simd_max(select(-1, arg_val, max_val == max_val_reduced)); dst_i32[tgpig] = arg_val_reduced; return; } dst_i32[tgpig] = arg_val; } // F == 1 : norm (no fuse) // F == 2 : norm + mul // F == 3 : norm + mul + add template kernel void kernel_norm_fuse_impl( constant ggml_metal_kargs_norm & args, device const char * src0, device const char * src1_0, device const char * src1_1, device char * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } const int i01 = tgpig.x; const int i02 = tgpig.y; const int i03 = tgpig.z; device const T * x = (device const T *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]); device const T * f0 = (device const T *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]); device const T * f1 = (device const T *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]); T sumft(0.0f); float sumf = 0.0f; for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) { sumft += x[i00]; } sumf = dot(sumft, T(1.0f)); sumf = simd_sum(sumf); threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); const float mean = sumf/args.ne00; device T * y = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1); sumf = 0.0f; for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) { y[i00] = x[i00] - mean; sumf += dot(y[i00], y[i00]); } sumf = simd_sum(sumf); threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); const float variance = sumf/args.ne00; const float scale = 1.0f/sqrt(variance + args.eps); for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) { if (F == 1) { y[i00] = (y[i00]*scale); } if (F == 2) { y[i00] = (y[i00]*scale)*f0[i00]; } if (F == 3) { y[i00] = (y[i00]*scale)*f0[i00] + f1[i00]; } } } typedef decltype(kernel_norm_fuse_impl) kernel_norm_fuse_t; template [[host_name("kernel_norm_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; template [[host_name("kernel_norm_mul_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; template [[host_name("kernel_norm_mul_add_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; template [[host_name("kernel_norm_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; template [[host_name("kernel_norm_mul_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; template [[host_name("kernel_norm_mul_add_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl; // F == 1 : rms_norm (no fuse) // F == 2 : rms_norm + mul // F == 3 : rms_norm + mul + add template kernel void kernel_rms_norm_fuse_impl( constant ggml_metal_kargs_norm & args, device const char * src0, device const char * src1_0, device const char * src1_1, device char * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } const int i01 = tgpig.x; const int i02 = tgpig.y; const int i03 = tgpig.z; device const T * x = (device const T *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]); device const T * f0 = (device const T *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]); device const T * f1 = (device const T *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]); float sumf = 0.0f; // parallel sum for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) { sumf += dot(x[i00], x[i00]); } sumf = simd_sum(sumf); threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); const float mean = sumf/args.ne00; const float scale = 1.0f/sqrt(mean + args.eps); device T * y = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1); for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) { if (F == 1) { y[i00] = (x[i00]*scale); } if (F == 2) { y[i00] = (x[i00]*scale)*f0[i00]; } if (F == 3) { y[i00] = (x[i00]*scale)*f0[i00] + f1[i00]; } } } typedef decltype(kernel_rms_norm_fuse_impl) kernel_rms_norm_fuse_t; template [[host_name("kernel_rms_norm_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; template [[host_name("kernel_rms_norm_mul_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; template [[host_name("kernel_rms_norm_mul_add_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; template [[host_name("kernel_rms_norm_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; template [[host_name("kernel_rms_norm_mul_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; template [[host_name("kernel_rms_norm_mul_add_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl; kernel void kernel_l2_norm_f32( constant ggml_metal_kargs_l2_norm & args, device const char * src0, device char * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], ushort tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort ntg[[threads_per_threadgroup]]) { if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } device const float4 * x = (device const float4 *) (src0 + tgpig*args.nb01); float sumf = 0.0f; // parallel sum for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { sumf += dot(x[i00], x[i00]); } sumf = simd_sum(sumf); threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { shmem_f32[sgitg] = sumf; } threadgroup_barrier(mem_flags::mem_threadgroup); sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); const float scale = 1.0f/sqrt(max(sumf, args.eps)); device float4 * y = (device float4 *) dst + tgpig*args.ne00_4; for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { y[i00] = x[i00] * scale; } } kernel void kernel_group_norm_f32( constant ggml_metal_kargs_group_norm & args, device const float * src0, device float * dst, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t ne = args.ne00*args.ne01*args.ne02; const int64_t gs = args.ne00*args.ne01*((args.ne02 + args.ngrp - 1) / args.ngrp); int start = tgpig * gs; int end = start + gs; start += tpitg; if (end >= ne) { end = ne; } float tmp = 0.0f; // partial sum for thread in warp for (int j = start; j < end; j += ntg) { tmp += src0[j]; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float mean = tmp / gs; tmp = 0.0f; for (int j = start; j < end; j += ntg) { float xi = src0[j] - mean; dst[j] = xi; tmp += xi * xi; } tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float variance = tmp / gs; const float scale = 1.0f/sqrt(variance + args.eps); for (int j = start; j < end; j += ntg) { dst[j] *= scale; } } // function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; device const uint16_t * qs = ((device const uint16_t *) qb_curr + 1 + il/2); for (int i = 0; i < 8; i += 2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F); acc[1] += yl[i + 1] * (qs[i / 2] & 0x0F00); acc[2] += yl[i + 8] * (qs[i / 2] & 0x00F0); acc[3] += yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (sumy * -8.f + acc[0] + acc[1] + acc[2] + acc[3]); } // function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; device const uint16_t * qs = ((device const uint16_t *) qb_curr + 2 + il/2); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F); acc[1] += yl[i + 1] * (qs[i / 2] & 0x0F00); acc[2] += yl[i + 8] * (qs[i / 2] & 0x00F0); acc[3] += yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (acc[0] + acc[1] + acc[2] + acc[3]) + sumy * m; } // function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)); acc[1] += yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[2] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)); acc[3] += yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (sumy * -16.f + acc[0] + acc[1] + acc[2] + acc[3]); } // function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_1/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)); acc[1] += yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[2] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)); acc[3] += yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (acc[0] + acc[1] + acc[2] + acc[3]) + sumy * m; } template static inline void helper_mv_reduce_and_write( device float * dst_f32, float sumf[NR0], const int r0, const int ne01, ushort tiisg, ushort sgitg, threadgroup char * shmem) { constexpr short NW = N_SIMDWIDTH; threadgroup float * shmem_f32[NR0]; for (short row = 0; row < NR0; ++row) { shmem_f32[row] = (threadgroup float *) shmem + NW*row; if (sgitg == 0) { shmem_f32[row][tiisg] = 0.0f; } sumf[row] = simd_sum(sumf[row]); } threadgroup_barrier(mem_flags::mem_threadgroup); for (short row = 0; row < NR0; ++row) { if (tiisg == 0) { shmem_f32[row][sgitg] = sumf[row]; } } threadgroup_barrier(mem_flags::mem_threadgroup); for (short row = 0; row < NR0 && r0 + row < ne01; ++row) { float tot = simd_sum(shmem_f32[row][tiisg]); if (tiisg == 0 && sgitg == 0) { dst_f32[r0 + row] = tot; } } } constant short FC_mul_mv_nsg [[function_constant(FC_MUL_MV + 0)]]; constant short FC_mul_mv_nxpsg [[function_constant(FC_MUL_MV + 1)]]; template void mul_vec_q_n_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr short NW = N_SIMDWIDTH; constexpr short NQ = 16; const int nb = args.ne00/QK4_0; const int r0 = (tgpig.x*NSG + sgitg)*NR0; //const int r0 = tgpig.x*NR0; const int r1 = tgpig.y; const int im = tgpig.z; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; //device const block_q_type * x = (device const block_q_type *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); // pointers to src0 rows device const block_q_type * ax[NR0]; FOR_UNROLL (int row = 0; row < NR0; ++row) { const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; ax[row] = (device const block_q_type *) ((device char *) src0 + offset0); } float sumf[NR0] = {0.f}; const short ix = (tiisg/(NW/NQ)); const short il = (tiisg%(NW/NQ))*8; //const int ib0 = sgitg*NQ + ix; const int ib0 = ix; float yl[16]; // src1 vector cache //device const float * yb = y + ix*QK4_0 + il; device const float * yb = y + ib0*QK4_0 + il; // each thread in a SIMD group deals with half a block. //for (int ib = ib0; ib < nb; ib += NSG*NQ) { for (int ib = ib0; ib < nb; ib += NQ) { float sumy[2] = { 0.f, 0.f }; FOR_UNROLL (short i = 0; i < 8; i += 2) { sumy[0] += yb[i + 0] + yb[i + 1]; yl[i + 0] = yb[i + 0]; yl[i + 1] = yb[i + 1]/256.f; sumy[1] += yb[i + 16] + yb[i + 17]; yl[i + 8] = yb[i + 16]/16.f; yl[i + 9] = yb[i + 17]/4096.f; } FOR_UNROLL (short row = 0; row < NR0; row++) { sumf[row] += block_q_n_dot_y(ax[row] + ib, sumy[0] + sumy[1], yl, il); } yb += QK4_0 * 16; //yb += NSG*NQ*QK4_0; } device float * dst_f32 = (device float *) dst + im*args.ne0*args.ne1 + r1*args.ne0; //helper_mv_reduce_and_write(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem); for (int row = 0; row < NR0; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && r0 + row < args.ne01) { dst_f32[r0 + row] = tot; } } } kernel void kernel_mul_mv_q4_0_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } kernel void kernel_mul_mv_q4_1_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } kernel void kernel_mul_mv_q5_0_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } kernel void kernel_mul_mv_q5_1_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_q8_0_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr short NW = N_SIMDWIDTH; constexpr short NQ = 8; const int nb = args.ne00/QK8_0; const int r0 = tgpig.x*NR0; const int r1 = tgpig.y; const int im = tgpig.z; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; //device const block_q8_0 * x = (device const block_q8_0 *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); // pointers to src0 rows device const block_q8_0 * ax[NR0]; FOR_UNROLL (short row = 0; row < NR0; ++row) { const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; ax[row] = (device const block_q8_0 *) ((device char *) src0 + offset0); } float sumf[NR0] = { 0.f }; const short ix = tiisg/(NW/NQ); const short il = tiisg%(NW/NQ); const int ib0 = sgitg*NQ + ix; float yl[NQ]; device const float * yb = y + ib0*QK8_0 + il*NQ; // each thread in a SIMD group deals with NQ quants at a time for (int ib = ib0; ib < nb; ib += NSG*NQ) { for (short i = 0; i < NQ; ++i) { yl[i] = yb[i]; } for (short row = 0; row < NR0; row++) { device const int8_t * qs = ax[row][ib].qs + il*NQ; float sumq = 0.f; FOR_UNROLL (short i = 0; i < NQ; ++i) { sumq += qs[i] * yl[i]; } sumf[row] += sumq*ax[row][ib].d; } yb += NSG*NQ*QK8_0; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; helper_mv_reduce_and_write(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem); } [[host_name("kernel_mul_mv_q8_0_f32")]] kernel void kernel_mul_mv_q8_0_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q8_0_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } // mat-vec kernel processing in chunks of float4 // chpb - chunks per quantization block template void kernel_mul_mv_ext_q4_f32_impl( constant ggml_metal_kargs_mul_mv_ext & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { const short NSG = FC_mul_mv_nsg; const short nxpsg = FC_mul_mv_nxpsg; const short chpt = 4; // chunks per thread //const short nxpsg = (32); const short nypsg = (32/nxpsg); const short tx = tiisg%nxpsg; const short ty = tiisg/nxpsg; const int i01 = tgpig.x*(nypsg*NSG) + nypsg*sgitg + ty; const int i11 = tgpig.y*r1ptg; const int i1m = tgpig.z; const int i12 = i1m%args.ne12; const int i13 = i1m/args.ne12; const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0; device const float4 * y4[r1ptg]; for (int ir1 = 0; ir1 < r1ptg; ++ir1) { y4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4 *) src1; } float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f }; short cch = tx%chpb; // current chunk index for (int ich = tx; 4*ich < args.ne00; ich += chpt*nxpsg) { float4 lx[chpt]; #pragma unroll(chpt) for (short ch = 0; ch < chpt; ++ch) { deq_t4(xq, cch, lx[ch]); cch += nxpsg; if (cch >= chpb) { xq += cch/chpb; cch %= chpb; } } #pragma unroll(chpt) for (short ch = 0; ch < chpt; ++ch) { #pragma unroll(r1ptg) for (short ir1 = 0; ir1 < r1ptg; ++ir1) { sumf[ir1] += dot(lx[ch], y4[ir1][ch*nxpsg]); } } #pragma unroll(r1ptg) for (short ir1 = 0; ir1 < r1ptg; ++ir1) { y4[ir1] += chpt*nxpsg; } } // reduce only the threads in each row for (short ir1 = 0; ir1 < r1ptg; ++ir1) { if (nxpsg >= 32) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 16); } if (nxpsg >= 16) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 8); } if (nxpsg >= 8) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 4); } if (nxpsg >= 4) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 2); } if (nxpsg >= 2) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 1); } //sumf[ir1] = simd_sum(sumf[ir1]); } if (tx == 0) { for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) { device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0; if (i01 < args.ne01) { dst_f32[i01] = sumf[ir1]; } } } } // mat-vec kernel processing in chunks of float4x4 template void kernel_mul_mv_ext_q4x4_f32_impl( constant ggml_metal_kargs_mul_mv_ext & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { const short NSG = FC_mul_mv_nsg; const short nxpsg = FC_mul_mv_nxpsg; const short chpt = 1; //const short nxpsg = (32); const short nypsg = (32/nxpsg); const short tx = tiisg%nxpsg; const short ty = tiisg/nxpsg; const int i01 = tgpig.x*(nypsg*NSG) + nypsg*sgitg + ty; const int i11 = tgpig.y*r1ptg; const int i1m = tgpig.z; const int i12 = i1m%args.ne12; const int i13 = i1m/args.ne12; const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0; device const float4x4 * y4x4[r1ptg]; for (int ir1 = 0; ir1 < r1ptg; ++ir1) { y4x4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4x4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4x4 *) src1; } float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f }; short cch = tx%chpb; for (int ich = tx; 16*ich < args.ne00; ich += chpt*nxpsg) { float4x4 lx[chpt]; #pragma unroll(chpt) for (short ch = 0; ch < chpt; ++ch) { deq_t4x4(xq, cch, lx[ch]); cch += nxpsg; if (cch >= chpb) { xq += cch/chpb; cch %= chpb; } } #pragma unroll(chpt) for (short ch = 0; ch < chpt; ++ch) { #pragma unroll(r1ptg) for (short ir1 = 0; ir1 < r1ptg; ++ir1) { sumf[ir1] += dot(lx[ch][0], y4x4[ir1][ch*nxpsg][0]) + dot(lx[ch][1], y4x4[ir1][ch*nxpsg][1]) + dot(lx[ch][2], y4x4[ir1][ch*nxpsg][2]) + dot(lx[ch][3], y4x4[ir1][ch*nxpsg][3]); } } #pragma unroll(r1ptg) for (short ir1 = 0; ir1 < r1ptg; ++ir1) { y4x4[ir1] += chpt*nxpsg; } } for (short ir1 = 0; ir1 < r1ptg; ++ir1) { if (nxpsg >= 32) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 16); } if (nxpsg >= 16) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 8); } if (nxpsg >= 8) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 4); } if (nxpsg >= 4) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 2); } if (nxpsg >= 2) { sumf[ir1] += simd_shuffle_down(sumf[ir1], 1); } //sumf[ir1] = simd_sum(sumf[ir1]); } if (tx == 0) { for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) { device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0; if (i01 < args.ne01) { dst_f32[i01] = sumf[ir1]; } } } } // dispatchers needed for compile-time nxpsg // epb - elements per quantization block template kernel void kernel_mul_mv_ext_q4_f32_disp( constant ggml_metal_kargs_mul_mv_ext & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_ext_q4_f32_impl(args, src0, src1, dst, tgpig, tiisg, sgitg); } template kernel void kernel_mul_mv_ext_q4x4_f32_disp( constant ggml_metal_kargs_mul_mv_ext & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_ext_q4x4_f32_impl(args, src0, src1, dst, tgpig, tiisg, sgitg); } typedef decltype(kernel_mul_mv_ext_q4_f32_disp <2, block_q8_0, 32, dequantize_q8_0_t4>) mul_mv_ext_q4_f32_t; typedef decltype(kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>) mul_mv_ext_q4x4_f32_t; template [[host_name("kernel_mul_mv_ext_f32_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, float4, 4, dequantize_f32_t4>; template [[host_name("kernel_mul_mv_ext_f32_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, float4, 4, dequantize_f32_t4>; template [[host_name("kernel_mul_mv_ext_f32_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, float4, 4, dequantize_f32_t4>; template [[host_name("kernel_mul_mv_ext_f32_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, float4, 4, dequantize_f32_t4>; template [[host_name("kernel_mul_mv_ext_f16_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, half4, 4, dequantize_f16_t4>; template [[host_name("kernel_mul_mv_ext_f16_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, half4, 4, dequantize_f16_t4>; template [[host_name("kernel_mul_mv_ext_f16_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, half4, 4, dequantize_f16_t4>; template [[host_name("kernel_mul_mv_ext_f16_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, half4, 4, dequantize_f16_t4>; template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_0, 32, dequantize_q4_0_t4>; template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_0, 32, dequantize_q4_0_t4>; template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_0, 32, dequantize_q4_0_t4>; template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_0, 32, dequantize_q4_0_t4>; template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_1, 32, dequantize_q4_1_t4>; template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_1, 32, dequantize_q4_1_t4>; template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_1, 32, dequantize_q4_1_t4>; template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_1, 32, dequantize_q4_1_t4>; template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_0, 32, dequantize_q5_0_t4>; template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_0, 32, dequantize_q5_0_t4>; template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_0, 32, dequantize_q5_0_t4>; template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_0, 32, dequantize_q5_0_t4>; template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_1, 32, dequantize_q5_1_t4>; template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_1, 32, dequantize_q5_1_t4>; template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_1, 32, dequantize_q5_1_t4>; template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_1, 32, dequantize_q5_1_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_mxfp4, 32, dequantize_mxfp4_t4>; template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_mxfp4, 32, dequantize_mxfp4_t4>; template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_mxfp4, 32, dequantize_mxfp4_t4>; template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_mxfp4, 32, dequantize_mxfp4_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>; template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q4_K, 256, dequantize_q4_K>; template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q4_K, 256, dequantize_q4_K>; template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q4_K, 256, dequantize_q4_K>; template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q5_K, 256, dequantize_q5_K>; template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q5_K, 256, dequantize_q5_K>; template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q5_K, 256, dequantize_q5_K>; template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q5_K, 256, dequantize_q5_K>; template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q6_K, 256, dequantize_q6_K>; template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q6_K, 256, dequantize_q6_K>; template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q6_K, 256, dequantize_q6_K>; template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q6_K, 256, dequantize_q6_K>; template void kernel_mul_mv_t_t_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr short NW = N_SIMDWIDTH; constexpr short NB = 32; constexpr short NF = 8; const int nb = args.ne00/NB; const int r0 = tgpig.x*NR0; const int r1 = tgpig.y; const int im = tgpig.z; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; //device const T0 * x = (device const T0 *) (src0 + offset0); device const T1 * y = (device const T1 *) (src1 + offset1); // pointers to src0 rows device const T0 * ax [NR0]; FOR_UNROLL (short row = 0; row < NR0; ++row) { const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; ax[row] = (device const T0 *) ((device char *) src0 + offset0); } float sumf[NR0] = { 0.f }; const short ix = tiisg/(NW/NF); const short il = tiisg%(NW/NF); const int ib0 = sgitg*NF + ix; T1 yl[NF]; device const T1 * yb = y + (ib0*NB + il*NF); for (int ib = ib0; ib < nb; ib += NSG*NF) { for (short i = 0; i < NF; ++i) { yl[i] = yb[i]; } for (short row = 0; row < NR0; row++) { device const T0 * xb = ax[row] + (ib*NB + il*NF); float sumq = 0.f; FOR_UNROLL (short i = 0; i < NF; ++i) { sumq += xb[i] * yl[i]; } sumf[row] += sumq; } yb += NSG*NF*NW; } for (int i = nb*NB + sgitg*NW + tiisg; i < args.ne00; i += NW*NSG) { for (short row = 0; row < NR0; row++) { sumf[row] += ax[row][i] * y[i]; } } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; helper_mv_reduce_and_write(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem); } template void kernel_mul_mv_t_t_disp( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { switch (args.nr0) { //case 1: kernel_mul_mv_t_t_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; case 2: kernel_mul_mv_t_t_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; //case 3: kernel_mul_mv_t_t_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; //case 4: kernel_mul_mv_t_t_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; } } template kernel void kernel_mul_mv_t_t( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_t_t_disp(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } typedef decltype(kernel_mul_mv_t_t) mul_mv_t_t; template [[host_name("kernel_mul_mv_f32_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t; template [[host_name("kernel_mul_mv_f16_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t; template [[host_name("kernel_mul_mv_f16_f16")]] kernel mul_mv_t_t kernel_mul_mv_t_t; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mv_bf16_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t; template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t_t kernel_mul_mv_t_t; #endif template void kernel_mul_mv_t_t_4_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr short NW = N_SIMDWIDTH; constexpr short NB = 32; constexpr short NF = 16; constexpr short NF4 = NF/4; const int nb = args.ne00/NB; const int r0 = tgpig.x*NR0; const int r1 = tgpig.y; const int im = tgpig.z; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const T1 * y = (device const T1 *) (src1 + offset1); device const T14 * y4 = (device const T14 *) (src1 + offset1); // pointers to src0 rows device const T0 * ax [NR0]; device const T04 * ax4[NR0]; FOR_UNROLL (short row = 0; row < NR0; ++row) { const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; ax [row] = (device const T0 *) ((device char *) src0 + offset0); ax4[row] = (device const T04 *) ((device char *) src0 + offset0); } float sumf[NR0] = { 0.f }; const short ix = tiisg/(NW/NF); const short il = tiisg%(NW/NF); const int ib0 = sgitg*NF + ix; T14 yl4[NF4]; device const T14 * yb4 = y4 + (ib0*NB + il*NF)/4; for (int ib = ib0; ib < nb; ib += NSG*NF) { for (short i = 0; i < NF4; ++i) { yl4[i] = yb4[i]; } for (short row = 0; row < NR0; row++) { device const T04 * xb4 = ax4[row] + (ib*NB + il*NF)/4; float sumq = 0.f; FOR_UNROLL (short i = 0; i < NF4; ++i) { sumq += dot(float4(xb4[i]), float4(yl4[i])); } sumf[row] += sumq; } yb4 += NSG*NF*NW/4; } for (int i = nb*NB + sgitg*NW + tiisg; i < args.ne00; i += NW*NSG) { for (short row = 0; row < NR0; row++) { sumf[row] += ax[row][i] * y[i]; } } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; helper_mv_reduce_and_write(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem); } template void kernel_mul_mv_t_t_4_disp( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { switch (args.nr0) { //case 1: kernel_mul_mv_t_t_4_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; case 2: kernel_mul_mv_t_t_4_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; //case 3: kernel_mul_mv_t_t_4_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; //case 4: kernel_mul_mv_t_t_4_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break; }; } template kernel void kernel_mul_mv_t_t_4( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_t_t_4_disp(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } typedef decltype(kernel_mul_mv_t_t_4) mul_mv_t_t_4; template [[host_name("kernel_mul_mv_f32_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4; template [[host_name("kernel_mul_mv_f16_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4; template [[host_name("kernel_mul_mv_f16_f16_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mv_bf16_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4; template [[host_name("kernel_mul_mv_bf16_bf16_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4; #endif template void kernel_mul_mv_t_t_short_impl( args_t args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig, ushort tiisg) { const int r0 = tgpig.x*32 + tiisg; const int r1 = tgpig.y; const int im = tgpig.z; if (r0 >= args.ne01) { return; } const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; device const T0 * x = (device const T0 *) (src0 + offset0); device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const T1 * y = (device const T1 *) (src1 + offset1); float res = 0.0f; for (int i = 0; i < args.ne00; ++i) { res += (float) x[i] * (float) y[i]; } dst_f32[(uint64_t)r1*args.ne0 + r0] = res; } template kernel void kernel_mul_mv_t_t_short( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_t_t_short_impl( args, src0, src1, dst, tgpig, tiisg); } typedef decltype(kernel_mul_mv_t_t_short) mul_mv_t_t_short_t; template [[host_name("kernel_mul_mv_f32_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short; template [[host_name("kernel_mul_mv_f16_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short; template [[host_name("kernel_mul_mv_f16_f16_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mv_bf16_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short; template [[host_name("kernel_mul_mv_bf16_bf16_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short; #endif constant bool FC_rope_is_imrope [[function_constant(FC_ROPE + 0)]]; static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. static void rope_yarn( float theta_extrap, float freq_scale, float corr_dims[2], int i0, float ext_factor, float mscale, thread float * cos_theta, thread float * sin_theta) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); } *cos_theta = cos(theta) * mscale; *sin_theta = sin(theta) * mscale; } // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get // `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` static float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) { return n_dims * log(n_ctx_orig / (n_rot * 2 * M_PI_F)) / (2 * log(base)); } static void rope_yarn_corr_dims( int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2] ) { // start and end correction dims dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base))); dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base))); } template kernel void kernel_rope_norm( constant ggml_metal_kargs_rope & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, ushort tiitg[[thread_index_in_threadgroup]], ushort3 tptg [[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int i3 = tgpig[2]; const int i2 = tgpig[1]; const int i1 = tgpig[0]; float corr_dims[2]; rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims); device const int32_t * pos = (device const int32_t *) src1; const float theta_base = (float) pos[i2]; const float inv_ndims = -1.f/args.n_dims; float cos_theta; float sin_theta; for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) { if (i0 < args.n_dims) { const int ic = i0/2; const float theta = theta_base * pow(args.freq_base, inv_ndims*i0); const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f; rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); const float x0 = src[0]; const float x1 = src[1]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[1] = x0*sin_theta + x1*cos_theta; } else { device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } template kernel void kernel_rope_neox( constant ggml_metal_kargs_rope & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, ushort tiitg[[thread_index_in_threadgroup]], ushort3 tptg [[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int i3 = tgpig[2]; const int i2 = tgpig[1]; const int i1 = tgpig[0]; float corr_dims[2]; rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims); device const int32_t * pos = (device const int32_t *) src1; const float theta_base = (float) pos[i2]; const float inv_ndims = -1.f/args.n_dims; float cos_theta; float sin_theta; for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) { if (i0 < args.n_dims) { const int ic = i0/2; const float theta = theta_base * pow(args.freq_base, inv_ndims*i0); const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f; rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0); const float x0 = src[0]; const float x1 = src[args.n_dims/2]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[args.n_dims/2] = x0*sin_theta + x1*cos_theta; } else { device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } template kernel void kernel_rope_multi( constant ggml_metal_kargs_rope & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, ushort tiitg[[thread_index_in_threadgroup]], ushort3 tptg [[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int i3 = tgpig[2]; const int i2 = tgpig[1]; const int i1 = tgpig[0]; float corr_dims[2]; rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims); device const int32_t * pos = (device const int32_t *) src1; const float inv_ndims = -1.f/args.n_dims; float cos_theta; float sin_theta; for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) { if (i0 < args.n_dims) { const int ic = i0/2; // mrope theta calculations // note: the rest is the same as kernel_rope_neox const int sect_dims = args.sect_0 + args.sect_1 + args.sect_2 + args.sect_3; const int sec_w01 = args.sect_0 + args.sect_1; // end of section 1 const int sec_w012 = args.sect_0 + args.sect_1 + args.sect_2; // end of section 2 const int sector = ic % sect_dims; float theta_base; if (FC_rope_is_imrope) { if (sector % 3 == 1 && sector < 3 * args.sect_1) { // h theta_base = (float) pos[i2 + args.ne02 * 1]; } else if (sector % 3 == 2 && sector < 3 * args.sect_2) { // w theta_base = (float) pos[i2 + args.ne02 * 2]; } else if (sector % 3 == 0 && sector < 3 * args.sect_0) { // t theta_base = (float) pos[i2 + args.ne02 * 0]; } else { // e theta_base = (float) pos[i2 + args.ne02 * 3]; } } else { if (sector < args.sect_0) { theta_base = (float) pos[i2]; } else if (sector < sec_w01) { theta_base = (float) pos[i2 + args.ne02 * 1]; } else if (sector < sec_w012) { theta_base = (float) pos[i2 + args.ne02 * 2]; } else { theta_base = (float) pos[i2 + args.ne02 * 3]; } } // end of mrope const float theta = theta_base * pow(args.freq_base, inv_ndims*i0); const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f; rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0); const float x0 = src[0]; const float x1 = src[args.n_dims/2]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[args.n_dims/2] = x0*sin_theta + x1*cos_theta; } else { device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } template kernel void kernel_rope_vision( constant ggml_metal_kargs_rope & args, device const char * src0, device const char * src1, device const char * src2, device char * dst, ushort tiitg[[thread_index_in_threadgroup]], ushort3 tptg [[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int i3 = tgpig[2]; const int i2 = tgpig[1]; const int i1 = tgpig[0]; float corr_dims[2]; rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims); device const int32_t * pos = (device const int32_t *) src1; const float inv_ndims = -1.f/args.n_dims; float cos_theta; float sin_theta; for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) { if (i0 < 2*args.n_dims) { // different from kernel_rope_multi const int ic = i0/2; // mrope theta calculations (only support 2 dimensions) const int sect_dims = args.sect_0 + args.sect_1; const int sector = ic % sect_dims; float p; float theta_base; if (sector < args.sect_1) { p = (float) sector; theta_base = (float) pos[i2]; } else { p = (float) sector - args.sect_0; theta_base = (float) pos[i2 + args.ne02]; } const float theta = theta_base * pow(args.freq_base, 2.0f * inv_ndims * p); // end of mrope const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f; rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0); const float x0 = src[0]; const float x1 = src[args.n_dims]; // different from kernel_rope_multi dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[args.n_dims] = x0*sin_theta + x1*cos_theta; // different from kernel_rope_multi } else { device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00); device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } typedef decltype(kernel_rope_norm) kernel_rope_norm_t; typedef decltype(kernel_rope_neox) kernel_rope_neox_t; typedef decltype(kernel_rope_multi) kernel_rope_multi_t; typedef decltype(kernel_rope_vision) kernel_rope_vision_t; template [[host_name("kernel_rope_norm_f32")]] kernel kernel_rope_norm_t kernel_rope_norm; template [[host_name("kernel_rope_norm_f16")]] kernel kernel_rope_norm_t kernel_rope_norm; template [[host_name("kernel_rope_neox_f32")]] kernel kernel_rope_neox_t kernel_rope_neox; template [[host_name("kernel_rope_neox_f16")]] kernel kernel_rope_neox_t kernel_rope_neox; template [[host_name("kernel_rope_multi_f32")]] kernel kernel_rope_multi_t kernel_rope_multi; template [[host_name("kernel_rope_multi_f16")]] kernel kernel_rope_multi_t kernel_rope_multi; template [[host_name("kernel_rope_vision_f32")]] kernel kernel_rope_vision_t kernel_rope_vision; template [[host_name("kernel_rope_vision_f16")]] kernel kernel_rope_vision_t kernel_rope_vision; typedef void (im2col_t)( constant ggml_metal_kargs_im2col & args, device const float * x, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]); template kernel void kernel_im2col( constant ggml_metal_kargs_im2col & args, device const float * x, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { // const int64_t IC = tgpg[0]; const int64_t OH = tgpg[1]; const int64_t OW = tgpg[2]; const int64_t KH = ntg[1]; const int64_t KW = ntg[2]; int64_t in = tpitg[0]; const int64_t ikh = tpitg[1]; const int64_t ikw = tpitg[2]; const int64_t iic = tgpig[0]; const int64_t ioh = tgpig[1]; const int64_t iow = tgpig[2]; const int64_t iiw = iow*args.s0 + ikw*args.d0 - args.p0; const int64_t iih = ioh*args.s1 + ikh*args.d1 - args.p1; int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*args.CHW + (iic*(KH*KW) + ikh*KW + ikw); device T * pdst = (device T *) (dst); if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) { while (in < args.N) { pdst[offset_dst] = 0.0f; offset_dst += ntg[0]*args.CHW*OH*OW; in += ntg[0]; } } else { int64_t offset_src = in*args.ofs0 + iic*args.ofs1 + iih*args.IW + iiw; while (in < args.N) { pdst[offset_dst] = x[offset_src]; offset_dst += ntg[0]*args.CHW*OH*OW; offset_src += ntg[0]*args.ofs0; in += ntg[0]; } } } template [[host_name("kernel_im2col_f32")]] kernel im2col_t kernel_im2col; template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col; // TODO: obolete -- remove //typedef void (im2col_ext_t)( // constant ggml_metal_kargs_im2col & args, // device const float * x, // device char * dst, // uint3 tgpig[[threadgroup_position_in_grid]], // uint3 tgpg[[threadgroups_per_grid]], // uint3 tpitg[[thread_position_in_threadgroup]], // uint3 ntg[[threads_per_threadgroup]]); // //template //kernel void kernel_im2col_ext( // constant ggml_metal_kargs_im2col & args, // device const float * x, // device char * dst, // uint3 tgpig[[threadgroup_position_in_grid]], // uint3 tgpg[[threadgroups_per_grid]], // tgpg[0] = D x IC x KH x KW, CHW = IC x KH x KW // uint3 tpitg[[thread_position_in_threadgroup]], // uint3 ntg[[threads_per_threadgroup]]) { // [M, 1, 1] // const int64_t KHW = (int64_t)args.KHW; // // const int64_t d = tgpig[0] / args.CHW; // const int64_t chw = tgpig[0] % args.CHW; // const int64_t tgpig_0 = chw / KHW; // 0 ~ (IC - 1) // const int64_t HW = tgpig[0] % KHW; // // const int64_t tpitg_0 = (d * ntg[0]) + tpitg[0]; // if (tpitg_0 >= args.N) { // return; // } // // const int64_t tpitg_1 = HW / args.KW; // const int64_t tpitg_2 = HW % args.KW; // // const int64_t iiw = tgpig[2] * args.s0 + tpitg_2 * args.d0 - args.p0; // const int64_t iih = tgpig[1] * args.s1 + tpitg_1 * args.d1 - args.p1; // // const int64_t offset_dst = // (tpitg_0 * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * args.CHW + // (tgpig_0 * KHW + tpitg_1 * args.KW + tpitg_2); // // device T * pdst = (device T *) (dst); // // if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) { // pdst[offset_dst] = 0.0f; // } else { // const int64_t offset_src = tpitg_0 * args.ofs0 + tgpig_0 * args.ofs1; // pdst[offset_dst] = x[offset_src + iih * args.IW + iiw]; // } //} // //template [[host_name("kernel_im2col_ext_f32")]] kernel im2col_ext_t kernel_im2col_ext; //template [[host_name("kernel_im2col_ext_f16")]] kernel im2col_ext_t kernel_im2col_ext; template kernel void kernel_conv_2d( constant ggml_metal_kargs_conv_2d & args, device const char * weights, device const char * src, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const uint threads_per_tg = ntg.x * ntg.y * ntg.z; const uint tg_index = (tgpig.z * tgpg.y + tgpig.y) * tgpg.x + tgpig.x; const uint local_thread = tpitg.z * (ntg.x * ntg.y) + tpitg.y * ntg.x + tpitg.x; const uint thread_index = tg_index * threads_per_tg + local_thread; const uint64_t total_threads = (uint64_t) threads_per_tg * tgpg.x * tgpg.y * tgpg.z; const uint64_t total_outputs = (uint64_t) args.N * args.OC * args.OH * args.OW; for (uint64_t index = thread_index; index < total_outputs; index += total_threads) { uint64_t tmp = index; const int32_t ow = tmp % args.OW; tmp /= args.OW; const int32_t oh = tmp % args.OH; tmp /= args.OH; const int32_t oc = tmp % args.OC; tmp /= args.OC; const int32_t n = tmp; float acc = 0.0f; const int32_t base_x = ow*args.s0 - args.p0; const int32_t base_y = oh*args.s1 - args.p1; int32_t ky_start = 0; if (base_y < 0) { ky_start = (-base_y + args.d1 - 1)/args.d1; } int32_t ky_end = args.KH; const int32_t y_max = args.IH - 1 - base_y; if (y_max < 0) { ky_end = ky_start; } else if (base_y + (args.KH - 1)*args.d1 >= args.IH) { ky_end = min(ky_end, y_max/args.d1 + 1); } int32_t kx_start = 0; if (base_x < 0) { kx_start = (-base_x + args.d0 - 1)/args.d0; } int32_t kx_end = args.KW; const int32_t x_max = args.IW - 1 - base_x; if (x_max < 0) { kx_end = kx_start; } else if (base_x + (args.KW - 1)*args.d0 >= args.IW) { kx_end = min(kx_end, x_max/args.d0 + 1); } if (ky_start < ky_end && kx_start < kx_end) { const uint64_t src_base_n = (uint64_t) n * args.nb13; const uint64_t w_base_oc = (uint64_t) oc * args.nb03; for (int32_t ic = 0; ic < args.IC; ++ic) { const uint64_t src_base_nc = src_base_n + (uint64_t) ic * args.nb12; const uint64_t w_base_ocic = w_base_oc + (uint64_t) ic * args.nb02; for (int32_t ky = ky_start; ky < ky_end; ++ky) { const int32_t iy = base_y + ky*args.d1; const uint64_t src_base_row = src_base_nc + (uint64_t) iy * args.nb11; const uint64_t w_base_row = w_base_ocic + (uint64_t) ky * args.nb01; for (int32_t kx = kx_start; kx < kx_end; ++kx) { const int32_t ix = base_x + kx*args.d0; const uint64_t src_offs = src_base_row + (uint64_t) ix * args.nb10; const uint64_t w_offs = w_base_row + (uint64_t) kx * args.nb00; const float x = *(device const float *)(src + src_offs); const float w = (float) (*(device const TK *)(weights + w_offs)); acc += x * w; } } } } const uint64_t dst_offs = (uint64_t) n * args.nb3 + (uint64_t) oc * args.nb2 + (uint64_t) oh * args.nb1 + (uint64_t) ow * args.nb0; *(device float *)(dst + dst_offs) = acc; } } template [[host_name("kernel_conv_2d_f32_f32")]] kernel void kernel_conv_2d( constant ggml_metal_kargs_conv_2d & args, device const char * weights, device const char * src, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]); template [[host_name("kernel_conv_2d_f16_f32")]] kernel void kernel_conv_2d( constant ggml_metal_kargs_conv_2d & args, device const char * weights, device const char * src, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]); typedef void (conv_transpose_1d_t)( constant ggml_metal_kargs_conv_transpose_1d & args, device const float * src0, device const float * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); template kernel void kernel_conv_transpose_1d( constant ggml_metal_kargs_conv_transpose_1d & args, device const T * src0, device const float * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]) { float v = 0.0f; for (int64_t c = 0; c < args.IC; c++) { const int32_t kernel_offset = c * tgpg[1] * args.K + args.K * tgpig[1]; const int32_t input_offset = c * args.IL; for (int64_t i = 0; i < args.IL; i++) { if (tgpig[0] >= i * args.s0 && tgpig[0] < i * args.s0 + args.K) { v += src0[kernel_offset + tgpig[0] - i * args.s0] * src1[input_offset + i]; } } } device float * dst_ptr = (device float *) (dst + tgpig[0] * args.nb0 + tgpig[1] * args.nb1); dst_ptr[0] = v; } template [[host_name("kernel_conv_transpose_1d_f32_f32")]] kernel void kernel_conv_transpose_1d( constant ggml_metal_kargs_conv_transpose_1d & args, device const float * src0, device const float * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); template [[host_name("kernel_conv_transpose_1d_f16_f32")]] kernel void kernel_conv_transpose_1d( constant ggml_metal_kargs_conv_transpose_1d & args, device const half * src0, device const float * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); typedef void (conv_transpose_2d_t)( constant ggml_metal_kargs_conv_transpose_2d & args, device const float * src0, device const float * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); template kernel void kernel_conv_transpose_2d( constant ggml_metal_kargs_conv_transpose_2d & args, device const T * src0, device const float * src1, device char * dst, threadgroup float * shared_sum [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t out_x = tgpig[0]; const int64_t out_y = tgpig[1]; const int64_t out_c = tgpig[2]; const int64_t kw = tpitg[0]; const int64_t kh = tpitg[1]; float v = 0.0f; for (int64_t in_c = 0; in_c < args.IC; in_c++) { int64_t in_y = out_y - kh; if (in_y < 0 || in_y % args.s0) continue; in_y /= args.s0; if (in_y >= args.IH) continue; int64_t in_x = out_x - kw; if (in_x < 0 || in_x % args.s0) continue; in_x /= args.s0; if (in_x >= args.IW) continue; const int64_t input_idx = (args.IW * args.IH) * in_c + (args.IW) * in_y + in_x; const int64_t kernel_idx = (args.KH * args.KW * args.OC) * in_c + (args.KH * args.KW) * out_c + (args.KW) * kh + kw; v += (float)src0[kernel_idx] * src1[input_idx]; } const uint tid = tpitg.y * ntg.x + tpitg.x; shared_sum[tid] = v; threadgroup_barrier(mem_flags::mem_threadgroup); if (tid == 0) { float total = 0.0f; const uint num_threads = ntg.x * ntg.y; for (uint i = 0; i < num_threads; i++) { total += shared_sum[i]; } device float * dst_ptr = (device float *) (dst + out_x*args.nb0 + out_y * args.nb1 + out_c*args.nb2); dst_ptr[0] = total; } } template [[host_name("kernel_conv_transpose_2d_f32_f32")]] kernel void kernel_conv_transpose_2d( constant ggml_metal_kargs_conv_transpose_2d & args, device const float * src0, device const float * src1, device char * dst, threadgroup float * shared_sum [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]); template [[host_name("kernel_conv_transpose_2d_f16_f32")]] kernel void kernel_conv_transpose_2d( constant ggml_metal_kargs_conv_transpose_2d & args, device const half * src0, device const float * src1, device char * dst, threadgroup float * shared_sum [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]); kernel void kernel_upscale_f32( constant ggml_metal_kargs_upscale & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3/args.sf3; const int64_t i02 = i2/args.sf2; const int64_t i01 = i1/args.sf1; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int64_t i00 = i0/args.sf0; device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); dst_ptr[0] = src0_ptr[0]; } } kernel void kernel_pad_f32( constant ggml_metal_kargs_pad & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1; device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01); device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1); if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) { for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { if (i0 < args.ne00) { dst_ptr[i0] = src0_ptr[i0]; } else { dst_ptr[i0] = 0.0f; } } return; } for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { dst_ptr[i0] = 0.0f; } } kernel void kernel_pad_reflect_1d_f32( constant ggml_metal_kargs_pad_reflect_1d & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1; device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01); device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1); if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) { for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { if (i0 < args.p0) { dst_ptr[i0] = src0_ptr[args.p0 - i0]; } else if (i0 < args.ne0 - args.p1) { dst_ptr[i0] = src0_ptr[i0 - args.p0]; } else { dst_ptr[i0] = src0_ptr[(args.ne0 - args.p1 - args.p0) - (args.p1 + 1 - (args.ne0 - i0)) - 1]; } } } } kernel void kernel_arange_f32( constant ggml_metal_kargs_arange & args, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { device float * dst_ptr = (device float *) dst; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { dst_ptr[i0] = args.start + args.step * i0; } } kernel void kernel_timestep_embedding_f32( constant ggml_metal_kargs_timestep_embedding & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { int i = tgpig.x; device float * embed_data = (device float *)(dst + i*args.nb1); int half_ = args.dim / 2; for (int j = tpitg.x; j < half_; j += ntg.x) { float timestep = ((device float *)src0)[i]; float freq = (float)exp(-log((float)args.max_period) * j / half_); float arg = timestep * freq; embed_data[j ] = cos(arg); embed_data[j + half_] = sin(arg); } if (args.dim % 2 != 0 && tpitg.x == 0) { embed_data[2 * half_] = 0.f; } } // bitonic sort implementation following the CUDA kernels as reference typedef void (argsort_t)( constant ggml_metal_kargs_argsort & args, device const char * src0, device int32_t * dst, threadgroup int32_t * shmem_i32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]); template kernel void kernel_argsort_f32_i32( constant ggml_metal_kargs_argsort & args, device const char * src0, device int32_t * dst, threadgroup int32_t * shmem_i32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { // bitonic sort const int col = tpitg[0]; const int ib = tgpig[0] / args.ne01; const int i00 = ib*ntg.x; const int i01 = tgpig[0] % args.ne01; const int i02 = tgpig[1]; const int i03 = tgpig[2]; device const float * src0_row = (device const float *) (src0 + args.nb01*i01 + args.nb02*i02 + args.nb03*i03); // initialize indices shmem_i32[col] = i00 + col; threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ntg.x; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (shmem_i32[col] >= args.ne00 || (shmem_i32[ixj] < args.ne00 && (order == GGML_SORT_ORDER_ASC ? src0_row[shmem_i32[col]] > src0_row[shmem_i32[ixj]] : src0_row[shmem_i32[col]] < src0_row[shmem_i32[ixj]])) ) { SWAP(shmem_i32[col], shmem_i32[ixj]); } } else { if (shmem_i32[ixj] >= args.ne00 || (shmem_i32[col] < args.ne00 && (order == GGML_SORT_ORDER_ASC ? src0_row[shmem_i32[col]] < src0_row[shmem_i32[ixj]] : src0_row[shmem_i32[col]] > src0_row[shmem_i32[ixj]])) ) { SWAP(shmem_i32[col], shmem_i32[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } const int64_t i0 = ib*args.top_k; // copy the result to dst without the padding if (i0 + col < args.ne0 && col < args.top_k) { dst += i0 + args.ne0*i01 + args.ne0*args.ne1*i02 + args.ne0*args.ne1*args.ne2*i03; dst[col] = shmem_i32[col]; } } template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32; template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32; typedef void (argsort_merge_t)( constant ggml_metal_kargs_argsort_merge & args, device const char * src0, device const int32_t * tmp, device int32_t * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]); template kernel void kernel_argsort_merge_f32_i32( constant ggml_metal_kargs_argsort_merge & args, device const char * src0, device const int32_t * tmp, device int32_t * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int im = tgpig[0] / args.ne01; const int i01 = tgpig[0] % args.ne01; const int i02 = tgpig[1]; const int i03 = tgpig[2]; const int start = im * (2 * args.len); const int len0 = MIN(args.len, MAX(0, args.ne0 - (int)(start))); const int len1 = MIN(args.len, MAX(0, args.ne0 - (int)(start + args.len))); const int total = len0 + len1; device const int32_t * tmp0 = tmp + start + i01*args.ne0 + i02*args.ne0*args.ne01 + i03*args.ne0*args.ne01*args.ne02; device const int32_t * tmp1 = tmp0 + args.len; dst += start + i01*args.top_k + i02*args.top_k*args.ne01 + i03*args.top_k*args.ne01*args.ne02; device const float * src0_row = (device const float *)(src0 + args.nb01*i01 + args.nb02*i02 + args.nb03*i03); if (total == 0) { return; } const int chunk = (total + ntg.x - 1) / ntg.x; const int k0 = tpitg.x * chunk; const int k1 = MIN(MIN(k0 + chunk, total), args.top_k); if (k0 >= args.top_k) { return; } if (k0 >= total) { return; } int low = k0 > len1 ? k0 - len1 : 0; int high = MIN(k0, len0); // binary-search partition (i, j) such that i + j = k while (low < high) { const int mid = (low + high) >> 1; const int32_t idx0 = tmp0[mid]; const int32_t idx1 = tmp1[k0 - mid - 1]; const float val0 = src0_row[idx0]; const float val1 = src0_row[idx1]; bool take_left; if (order == GGML_SORT_ORDER_ASC) { take_left = (val0 <= val1); } else { take_left = (val0 >= val1); } if (take_left) { low = mid + 1; } else { high = mid; } } int i = low; int j = k0 - i; // keep the merge fronts into registers int32_t idx0 = 0; float val0 = 0.0f; if (i < len0) { idx0 = tmp0[i]; val0 = src0_row[idx0]; } int32_t idx1 = 0; float val1 = 0.0f; if (j < len1) { idx1 = tmp1[j]; val1 = src0_row[idx1]; } for (int k = k0; k < k1; ++k) { int32_t out_idx; if (i >= len0) { while (k < k1) { dst[k++] = tmp1[j++]; } break; } else if (j >= len1) { while (k < k1) { dst[k++] = tmp0[i++]; } break; } else { bool take_left; if (order == GGML_SORT_ORDER_ASC) { take_left = (val0 <= val1); } else { take_left = (val0 >= val1); } if (take_left) { out_idx = idx0; ++i; if (i < len0) { idx0 = tmp0[i]; val0 = src0_row[idx0]; } } else { out_idx = idx1; ++j; if (j < len1) { idx1 = tmp1[j]; val1 = src0_row[idx1]; } } } dst[k] = out_idx; } } template [[host_name("kernel_argsort_merge_f32_i32_asc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32; template [[host_name("kernel_argsort_merge_f32_i32_desc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32; kernel void kernel_leaky_relu_f32( constant ggml_metal_kargs_leaky_relu & args, device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { const float x = src0[tpig]; dst[tpig] = x > 0.0f ? x : x * args.slope; } kernel void kernel_leaky_relu_f32_4( constant ggml_metal_kargs_leaky_relu & args, device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { const float4 x = src0[tpig]; dst[tpig] = float4(x > 0.0f)*x + float4(x <= 0.0f)*(x * args.slope); } constant bool FC_flash_attn_ext_pad_has_mask [[function_constant(FC_FLASH_ATTN_EXT_PAD + 0)]]; constant int32_t FC_flash_attn_ext_pad_ncpsg [[function_constant(FC_FLASH_ATTN_EXT_PAD + 25)]]; // pad the last chunk of C elements of k and v into a an extra pad buffer kernel void kernel_flash_attn_ext_pad( constant ggml_metal_kargs_flash_attn_ext_pad & args, device const char * k, device const char * v, device const char * mask, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int32_t C = FC_flash_attn_ext_pad_ncpsg; device char * k_pad = dst; device char * v_pad = k_pad + args.nb11*C*args.ne_12_2*args.ne_12_3; device char * mask_pad = v_pad + args.nb21*C*args.ne_12_2*args.ne_12_3; const int32_t icp = args.ne11 % C; const int32_t ic0 = args.ne11 - icp; const int32_t i1 = tgpig[0]; const int32_t i2 = tgpig[1]; const int32_t i3 = tgpig[2]; if (i2 < args.ne_12_2 && i3 < args.ne_12_3) { device const char * k_src = k + args.nb11*(ic0 + i1) + args.nb12*i2 + args.nb13*i3; device const char * v_src = v + args.nb21*(ic0 + i1) + args.nb22*i2 + args.nb23*i3; device char * k_dst = k_pad + args.nb11*i1 + args.nb11*C*i2 + args.nb11*C*args.ne_12_2*i3; device char * v_dst = v_pad + args.nb21*i1 + args.nb21*C*i2 + args.nb21*C*args.ne_12_2*i3; if (i1 >= icp) { // here it is not important the exact value that will be used as we rely on masking out the scores in the attention for (uint64_t i = tiitg; i < args.nb11; i += ntg.x) { k_dst[i] = 0; } for (uint64_t i = tiitg; i < args.nb21; i += ntg.x) { v_dst[i] = 0; } } else { for (uint64_t i = tiitg; i < args.nb11; i += ntg.x) { k_dst[i] = k_src[i]; } for (uint64_t i = tiitg; i < args.nb21; i += ntg.x) { v_dst[i] = v_src[i]; } } } if (FC_flash_attn_ext_pad_has_mask) { if (i2 < args.ne32 && i3 < args.ne33) { for (int ib = i1; ib < args.ne31; ib += C) { device const half * mask_src = (device const half *)(mask + args.nb31*ib + args.nb32*i2 + args.nb33*i3) + ic0; device half * mask_dst = (device half *)(mask_pad) + C*ib + C*args.ne31*i2 + C*args.ne31*args.ne32*i3; for (int i = tiitg; i < C; i += ntg.x) { if (i >= icp) { mask_dst[i] = -MAXHALF; } else { mask_dst[i] = mask_src[i]; } } } } } } constant int32_t FC_flash_attn_ext_blk_nqptg [[function_constant(FC_FLASH_ATTN_EXT_BLK + 24)]]; constant int32_t FC_flash_attn_ext_blk_ncpsg [[function_constant(FC_FLASH_ATTN_EXT_BLK + 25)]]; // scan the blocks of the mask that are not masked // 0 - masked (i.e. full of -INF, skip) // 1 - not masked (i.e. at least one element of the mask is not -INF) kernel void kernel_flash_attn_ext_blk( constant ggml_metal_kargs_flash_attn_ext_blk & args, device const char * mask, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]]) { // block size C x Q const int32_t Q = FC_flash_attn_ext_blk_nqptg; const int32_t C = FC_flash_attn_ext_blk_ncpsg; constexpr short NW = N_SIMDWIDTH; const int32_t i3 = tgpig[2]/args.ne32; const int32_t i2 = tgpig[2]%args.ne32; const int32_t i1 = tgpig[1]; const int32_t i0 = tgpig[0]; char res = i0*C + C > args.ne30 ? 1 : 0; device const half * mask_src = (device const half *) (mask + (i1*Q)*args.nb31 + i2*args.nb32 + i3*args.nb33) + i0*C + tiisg; // fast route if (res == 0) { if (simd_max(*mask_src) > -MAXHALF/2) { res = 1; } } // detailed check of the elements of the block if ((C > NW || Q > 1) && res == 0) { half m = -MAXHALF; FOR_UNROLL (short j = 0; j < Q; ++j) { FOR_UNROLL (short ii = 0; ii < C/NW; ++ii) { m = max(m, mask_src[ii*NW]); } mask_src += args.nb31/2; } if (simd_max(m) > -MAXHALF/2) { res = 1; } } const int32_t nblk1 = ((args.ne01 + Q - 1)/Q); const int32_t nblk0 = ((args.ne30 + C - 1)/C); if (tiisg == 0) { dst[((i3*args.ne32 + i2)*nblk1 + i1)*nblk0 + i0] = res; } } constant bool FC_flash_attn_ext_has_mask [[function_constant(FC_FLASH_ATTN_EXT + 0)]]; constant bool FC_flash_attn_ext_has_sinks [[function_constant(FC_FLASH_ATTN_EXT + 1)]]; constant bool FC_flash_attn_ext_has_bias [[function_constant(FC_FLASH_ATTN_EXT + 2)]]; constant bool FC_flash_attn_ext_has_scap [[function_constant(FC_FLASH_ATTN_EXT + 3)]]; constant bool FC_flash_attn_ext_has_kvpad [[function_constant(FC_FLASH_ATTN_EXT + 4)]]; constant bool FC_flash_attn_ext_bc_mask [[function_constant(FC_FLASH_ATTN_EXT + 10)]]; //constant float FC_flash_attn_ext_scale [[function_constant(FC_FLASH_ATTN_EXT + 10)]]; //constant float FC_flash_attn_ext_max_bias [[function_constant(FC_FLASH_ATTN_EXT + 11)]]; //constant float FC_flash_attn_ext_logit_softcap [[function_constant(FC_FLASH_ATTN_EXT + 12)]]; constant int32_t FC_flash_attn_ext_ns10 [[function_constant(FC_FLASH_ATTN_EXT + 20)]]; constant int32_t FC_flash_attn_ext_ns20 [[function_constant(FC_FLASH_ATTN_EXT + 21)]]; constant int32_t FC_flash_attn_ext_nsg [[function_constant(FC_FLASH_ATTN_EXT + 22)]]; // ref: https://arxiv.org/pdf/2307.08691.pdf template< typename q_t, // query types in shared memory typename q4_t, typename q8x8_t, typename k_t, // key types in shared memory typename k4x4_t, typename k8x8_t, typename v_t, // value types in shared memory typename v4x4_t, typename v8x8_t, typename qk_t, // Q*K types typename qk8x8_t, typename s_t, // soft-max types typename s2_t, typename s8x8_t, typename o_t, // attention accumulation types typename o4_t, typename o8x8_t, typename kd4x4_t, // key type in device memory short nl_k, void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &), typename vd4x4_t, // value type in device memory short nl_v, void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &), short DK, // K head size short DV, // V head size short Q, // queries per threadgroup short C, // cache items per threadgroup short NSG> // number of simd groups void kernel_flash_attn_ext_impl( constant ggml_metal_kargs_flash_attn_ext & args, device const char * q, device const char * k, device const char * v, device const char * mask, device const char * sinks, device const char * pad, device const char * blk, device char * dst, threadgroup half * shmem_f16, uint3 tgpig, ushort tiisg, ushort sgitg) { const ushort iq3 = tgpig[2]; const ushort iq2 = tgpig[1]; const ushort iq1 = tgpig[0]*Q; #define NS10 (FC_flash_attn_ext_ns10) #define NS20 (FC_flash_attn_ext_ns20) // note: I had some concerns that using this instead of the ugly macros above was affecting performance // need to re-check carefully and if no regressions are observerd - remove the macros // the concerns is that maybe using const variables requires extra registers? but not sure if the compiler // is clever enough to avoid this. unfortunately, using constexpr is not possible with FC //const short NS10 = FC_flash_attn_ext_ns10; //const short NS20 = FC_flash_attn_ext_ns20; constexpr short KV = 8; constexpr short DK4 = DK/4; constexpr short DK8 = DK/8; constexpr short DK16 = DK/16; constexpr short DV4 = DV/4; //constexpr short DV8 = DV/8; constexpr short DV16 = DV/16; constexpr short PV = PAD2(DV, 64); constexpr short PV4 = PV/4; constexpr short PV8 = PV/8; //constexpr short PV16 = PV/16; constexpr short NW = N_SIMDWIDTH; constexpr short NQ = Q/NSG; constexpr short SH = 2*C; // shared memory per simdgroup (s_t == float) constexpr short TS = 2*SH; constexpr short T = DK + 2*PV; // shared memory size per query in (half) threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*T); // holds the query data threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*T); // same as above but in q4_t threadgroup o_t * so = (threadgroup o_t *) (shmem_f16 + 0*T + Q*DK); // the result for all queries in 8x8 matrices (the O matrix from the paper) threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 0*T + Q*DK); threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + Q*T); // scratch buffer for attention, mask and diagonal matrix threadgroup s2_t * ss2 = (threadgroup s2_t *) (shmem_f16 + Q*T); // same as above but in s2_t threadgroup k_t * sk = (threadgroup k_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // scratch buffer to load K in shared memory threadgroup k4x4_t * sk4x4 = (threadgroup k4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // same as above but in k4x4_t threadgroup v_t * sv = (threadgroup v_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // scratch buffer to load V in shared memory threadgroup v4x4_t * sv4x4 = (threadgroup v4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // same as above but in v4x4_t // mask storage in shared mem threadgroup half2 * sm2 = (threadgroup half2 *) (shmem_f16 + Q*T + 2*C); // per-query mask pointers device const half2 * pm2[NQ]; FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; pm2[jj] = (device const half2 *) ((device const char *) mask + (iq1 + j)*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); } { const int32_t nblk1 = ((args.ne01 + Q - 1)/Q); const int32_t nblk0 = ((args.ne11 + C - 1)/C); blk += (((iq3%args.ne33)*args.ne32 + (iq2%args.ne32))*nblk1 + iq1/Q)*nblk0; } { q += iq1*args.nb01 + iq2*args.nb02 + iq3*args.nb03; const short ikv2 = iq2/(args.ne02/args.ne_12_2); const short ikv3 = iq3/(args.ne03/args.ne_12_3); k += ikv2*args.nb12 + ikv3*args.nb13; v += ikv2*args.nb22 + ikv3*args.nb23; } // load heads from Q to shared memory FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; device const float4 * q4 = (device const float4 *) ((device const char *) q + j*args.nb01); for (short i = tiisg; i < DK4; i += NW) { if (iq1 + j < args.ne01) { sq4[j*DK4 + i] = (q4_t) q4[i]; } else { sq4[j*DK4 + i] = 0; } } } // zero out FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; for (short i = tiisg; i < DV4; i += NW) { so4[j*PV4 + i] = 0; } for (short i = tiisg; i < SH; i += NW) { ss[j*SH + i] = 0.0f; } } threadgroup_barrier(mem_flags::mem_threadgroup); float S[NQ] = { [0 ... NQ-1] = 0.0f }; { float M[NQ] = { [0 ... NQ-1] = -FLT_MAX/2 }; float slope = 1.0f; // ALiBi if (FC_flash_attn_ext_has_bias) { const short h = iq2; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const short exph = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exph); } // loop over the KV cache // each simdgroup handles blocks of Q rows and C columns for (int ic0 = 0; ; ++ic0) { int ic = ic0*C; if (ic >= args.ne11) { break; } // the last partial chunk uses the pad buffer as source if (FC_flash_attn_ext_has_kvpad && ic + C > args.ne11) { k = pad; v = k + args.nb11*C*args.ne_12_2*args.ne_12_3; mask = v + args.nb21*C*args.ne_12_2*args.ne_12_3; const short ikv2 = iq2/(args.ne02/args.ne_12_2); const short ikv3 = iq3/(args.ne03/args.ne_12_3); k += (ikv2 + ikv3*args.ne_12_2)*args.nb11*C; v += (ikv2 + ikv3*args.ne_12_2)*args.nb21*C; if (!FC_flash_attn_ext_has_mask) { threadgroup half * sm = (threadgroup half *) (sm2); FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; for (short i = tiisg; i < C; i += NW) { if (ic + i >= args.ne11) { sm[2*j*SH + i] = -MAXHALF; } } } } else { FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; pm2[jj] = (device const half2 *) ((device const half *) mask + (iq1 + j)*C + (iq2%args.ne32)*(C*args.ne31) + (iq3%args.ne33)*(C*args.ne31*args.ne32)); } } ic = 0; } // read the mask into shared mem if (FC_flash_attn_ext_has_mask) { if (blk[ic0] == 0) { FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { pm2[jj] += NW; } continue; } FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; if (FC_flash_attn_ext_bc_mask) { sm2[j*SH + tiisg] = (iq1 + j) < args.ne31 ? pm2[jj][tiisg] : half2(-MAXHALF, -MAXHALF); } else { sm2[j*SH + tiisg] = pm2[jj][tiisg]; } pm2[jj] += NW; } #if 0 // note: old -INF block optimization - obsoleted by pre-computing non-masked blocks threadgroup_barrier(mem_flags::mem_threadgroup); // used to detect blocks full of -INF // skip only when the entire threadgroup is masked half2 smax2(-MAXHALF/2, -MAXHALF/2); FOR_UNROLL (short j = 0; j < Q; ++j) { smax2 = max(smax2, sm2[j*SH + tiisg]); } smax2 = simd_max(smax2); if (max(smax2[0], smax2[1]) <= -MAXHALF/2) { // this barrier is important threadgroup_barrier(mem_flags::mem_threadgroup); continue; } #endif } // Q*K^T // this is compile-time check, so it does not have runtime overhead if (is_same::value) { // we can read directly from global memory device const k_t * pk = (device const k_t *) (k + ic*args.nb11); threadgroup const q_t * pq = sq; threadgroup s_t * ps = ss; pk += sgitg*(8*NS10); ps += sgitg*(8*1); static_assert((C/8) % NSG == 0, ""); constexpr short NC = (C/8)/NSG; // note: do not unroll for large heads #pragma unroll (DK <= 64 ? NC : 1) for (short cc = 0; cc < NC; ++cc) { qk8x8_t mqk = make_filled_simdgroup_matrix((qk_t) 0.0f); if (DK % 16 != 0) { k8x8_t mk; q8x8_t mq; FOR_UNROLL (short i = 0; i < DK8; ++i) { simdgroup_barrier(mem_flags::mem_none); simdgroup_load(mk, pk + 8*i, NS10, 0, true); simdgroup_load(mq, pq + 8*i, DK); simdgroup_barrier(mem_flags::mem_none); simdgroup_multiply_accumulate(mqk, mq, mk, mqk); } } else { k8x8_t mk[2]; q8x8_t mq[2]; FOR_UNROLL (short i = 0; i < DK8/2; ++i) { simdgroup_barrier(mem_flags::mem_none); simdgroup_load(mq[0], pq + 0*8 + 16*i, DK); simdgroup_load(mq[1], pq + 1*8 + 16*i, DK); simdgroup_load(mk[0], pk + 0*8 + 16*i, NS10, 0, true); simdgroup_load(mk[1], pk + 1*8 + 16*i, NS10, 0, true); simdgroup_barrier(mem_flags::mem_none); simdgroup_multiply_accumulate(mqk, mq[0], mk[0], mqk); simdgroup_multiply_accumulate(mqk, mq[1], mk[1], mqk); } } simdgroup_store(mqk, ps, SH, 0, false); pk += 8*(NSG*NS10); ps += 8*(NSG); } } else { // TODO: this is the quantized K cache branch - not optimized yet for (short ccc = 0; ccc < (C/8)/NSG; ++ccc) { const short cc = ccc*NSG + sgitg; const short tx = tiisg%4; const short ty = tiisg/4; qk8x8_t mqk = make_filled_simdgroup_matrix((qk_t) 0.0f); for (short ii = 0; ii < DK16; ii += 4) { device const kd4x4_t * pk4x4 = (device const kd4x4_t *) (k + ((ic + 8*cc + ty)*args.nb11)); if (DK16%4 == 0) { // the head is evenly divisible by 4*16 = 64, so no need for bound checks { k4x4_t tmp; deq_k(pk4x4 + (ii + tx)/nl_k, (ii + tx)%nl_k, tmp); sk4x4[4*ty + tx] = tmp; } simdgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short k = 0; k < 4; ++k) { k8x8_t mk; q8x8_t mq; simdgroup_load(mk, sk + 16*k + 0*8, 4*16, 0, true); // transpose simdgroup_load(mq, sq + (2*(ii + k) + 0)*8, DK); simdgroup_multiply_accumulate(mqk, mq, mk, mqk); simdgroup_load(mk, sk + 16*k + 1*8, 4*16, 0, true); // transpose simdgroup_load(mq, sq + (2*(ii + k) + 1)*8, DK); simdgroup_multiply_accumulate(mqk, mq, mk, mqk); } } else { if (ii + tx < DK16) { k4x4_t tmp; deq_k(pk4x4 + (ii + tx)/nl_k, (ii + tx)%nl_k, tmp); sk4x4[4*ty + tx] = tmp; } simdgroup_barrier(mem_flags::mem_threadgroup); for (short k = 0; k < 4 && ii + k < DK16; ++k) { k8x8_t mk; q8x8_t mq; simdgroup_load(mk, sk + 16*k + 0*8, 4*16, 0, true); // transpose simdgroup_load(mq, sq + (2*(ii + k) + 0)*8, DK); simdgroup_multiply_accumulate(mqk, mq, mk, mqk); simdgroup_load(mk, sk + 16*k + 1*8, 4*16, 0, true); // transpose simdgroup_load(mq, sq + (2*(ii + k) + 1)*8, DK); simdgroup_multiply_accumulate(mqk, mq, mk, mqk); } } } simdgroup_store(mqk, ss + 8*cc, SH, 0, false); } } threadgroup_barrier(mem_flags::mem_threadgroup); // online softmax FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; const float m = M[jj]; // scale and apply the logitcap / mask float2 s2 = ss2[j*SH/2 + tiisg]*args.scale; if (FC_flash_attn_ext_has_scap) { s2 = args.logit_softcap*precise::tanh(s2); } // mqk = mqk + slope*mask if (FC_flash_attn_ext_has_bias) { s2 += s2_t(sm2[j*SH + tiisg])*slope; } else { s2 += s2_t(sm2[j*SH + tiisg]); } M[jj] = simd_max(max(M[jj], max(s2[0], s2[1]))); const float ms = exp(m - M[jj]); const float2 vs2 = exp(s2 - M[jj]); S[jj] = S[jj]*ms + simd_sum(vs2[0] + vs2[1]); // the P matrix from the paper (Q rows, C columns) ss2[j*SH/2 + tiisg] = vs2; if (DV4 % NW == 0) { FOR_UNROLL (short ii = 0; ii < DV4/NW; ++ii) { const short i = ii*NW + tiisg; so4[j*PV4 + i] *= ms; } } else { for (short i = tiisg; i < DV4; i += NW) { so4[j*PV4 + i] *= ms; } } } threadgroup_barrier(mem_flags::mem_threadgroup); // O = O + (Q*K^T)*V { // we can read directly from global memory if (is_same::value) { static_assert(PV8 % NSG == 0, ""); constexpr short NO = PV8/NSG; o8x8_t lo[NO]; { auto sot = so + 8*sgitg; FOR_UNROLL (short ii = 0; ii < NO; ++ii) { simdgroup_load(lo[ii], sot, PV, 0, false); sot += 8*NSG; } } { device const v_t * pv = (device const v_t *) (v + ic*args.nb21); pv += 8*sgitg; if (DV <= 64) { FOR_UNROLL (short cc = 0; cc < C/8; ++cc) { s8x8_t vs; simdgroup_load(vs, ss + 8*cc, SH, 0, false); FOR_UNROLL (short ii = 0; ii < NO/2; ++ii) { v8x8_t mv[2]; simdgroup_load(mv[0], pv + 0*NSG + 16*ii*NSG, NS20, 0, false); simdgroup_load(mv[1], pv + 8*NSG + 16*ii*NSG, NS20, 0, false); simdgroup_multiply_accumulate(lo[2*ii + 0], vs, mv[0], lo[2*ii + 0]); simdgroup_multiply_accumulate(lo[2*ii + 1], vs, mv[1], lo[2*ii + 1]); } pv += 8*NS20; } } else { FOR_UNROLL (short cc = 0; cc < (C/8)/2; ++cc) { s8x8_t vs[2]; simdgroup_load(vs[0], ss + 16*cc + 0, SH, 0, false); simdgroup_load(vs[1], ss + 16*cc + 8, SH, 0, false); FOR_UNROLL (short ii = 0; ii < NO/2; ++ii) { v8x8_t mv[4]; simdgroup_load(mv[0], pv + 0*NSG + 16*ii*NSG + 0*8*NS20, NS20, 0, false); simdgroup_load(mv[1], pv + 8*NSG + 16*ii*NSG + 0*8*NS20, NS20, 0, false); simdgroup_load(mv[2], pv + 0*NSG + 16*ii*NSG + 1*8*NS20, NS20, 0, false); simdgroup_load(mv[3], pv + 8*NSG + 16*ii*NSG + 1*8*NS20, NS20, 0, false); simdgroup_multiply_accumulate(lo[2*ii + 0], vs[0], mv[0], lo[2*ii + 0]); simdgroup_multiply_accumulate(lo[2*ii + 1], vs[0], mv[1], lo[2*ii + 1]); simdgroup_multiply_accumulate(lo[2*ii + 0], vs[1], mv[2], lo[2*ii + 0]); simdgroup_multiply_accumulate(lo[2*ii + 1], vs[1], mv[3], lo[2*ii + 1]); } pv += 2*8*NS20; } } } { auto sot = so + 8*sgitg; FOR_UNROLL (short ii = 0; ii < NO; ++ii) { simdgroup_store(lo[ii], sot, PV, 0, false); sot += 8*NSG; } } } else { // TODO: this is the quantized V cache branch - not optimized yet const short tx = tiisg%4; const short ty = tiisg/4; for (short cc = 0; cc < C/8; ++cc) { s8x8_t vs; simdgroup_load(vs, ss + 8*cc, SH, 0, false); for (short ii = 4*sgitg; ii < DV16; ii += 4*NSG) { device const vd4x4_t * pv4x4 = (device const vd4x4_t *) (v + ((ic + 8*cc + ty)*args.nb21)); if (DV16%4 == 0) { // no need for bound checks { v4x4_t tmp; deq_v(pv4x4 + (ii + tx)/nl_v, (ii + tx)%nl_v, tmp); sv4x4[4*ty + tx] = tmp; } simdgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short k = 0; k < 4; ++k) { v8x8_t mv[2]; o8x8_t lo[2]; simdgroup_load(mv[0], sv + 16*k + 0*8, 4*16, 0, false); simdgroup_load(mv[1], sv + 16*k + 1*8, 4*16, 0, false); simdgroup_load(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false); simdgroup_load(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false); simdgroup_multiply_accumulate(lo[0], vs, mv[0], lo[0]); simdgroup_multiply_accumulate(lo[1], vs, mv[1], lo[1]); simdgroup_store(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false); simdgroup_store(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false); } } else { if (ii + tx < DV16) { v4x4_t tmp; deq_v(pv4x4 + (ii + tx)/nl_v, (ii + tx)%nl_v, tmp); sv4x4[4*ty + tx] = tmp; } simdgroup_barrier(mem_flags::mem_threadgroup); for (short k = 0; k < 4 && ii + k < DV16; ++k) { v8x8_t mv[2]; o8x8_t lo[2]; simdgroup_load(mv[0], sv + 16*k + 0*8, 4*16, 0, false); simdgroup_load(mv[1], sv + 16*k + 1*8, 4*16, 0, false); simdgroup_load(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false); simdgroup_load(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false); simdgroup_multiply_accumulate(lo[0], vs, mv[0], lo[0]); simdgroup_multiply_accumulate(lo[1], vs, mv[1], lo[1]); simdgroup_store(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false); simdgroup_store(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false); } } } } } } threadgroup_barrier(mem_flags::mem_threadgroup); } if (FC_flash_attn_ext_has_sinks) { FOR_UNROLL (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; const float m = M[jj]; const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; M[jj] = simd_max(max(M[jj], s)); const float ms = exp(m - M[jj]); const float vs = exp(s - M[jj]); S[jj] = S[jj]*ms + simd_sum(vs); for (short i = tiisg; i < DV4; i += NW) { so4[j*PV4 + i] *= ms; } } } } // store to global memory for (short jj = 0; jj < NQ; ++jj) { const short j = jj*NSG + sgitg; if (iq1 + j >= args.ne01) { break; } device float4 * dst4 = (device float4 *) dst + ((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4; const float scale = S[jj] == 0.0 ? 0.0f : 1.0f/S[jj]; if (DV4 % NW == 0) { FOR_UNROLL (short ii = 0; ii < DV4/NW; ++ii) { const short i = ii*NW + tiisg; dst4[i] = (float4) so4[j*PV4 + i]*scale; } } else { for (short i = tiisg; i < DV4; i += NW) { dst4[i] = (float4) so4[j*PV4 + i]*scale; } } } #undef NS10 #undef NS20 } template< typename q_t, // query types in shared memory typename q4_t, typename q8x8_t, typename k_t, // key types in shared memory typename k4x4_t, typename k8x8_t, typename v_t, // value types in shared memory typename v4x4_t, typename v8x8_t, typename qk_t, // Q*K types typename qk8x8_t, typename s_t, // soft-max types typename s2_t, typename s8x8_t, typename o_t, // attention accumulation types typename o4_t, typename o8x8_t, typename kd4x4_t, // key type in device memory short nl_k, void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &), typename vd4x4_t, // value type in device memory short nl_v, void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &), short DK, // K head size short DV, // V head size short Q = OP_FLASH_ATTN_EXT_NQPTG, // queries per threadgroup short C = OP_FLASH_ATTN_EXT_NCPSG> // cache items per threadgroup kernel void kernel_flash_attn_ext( constant ggml_metal_kargs_flash_attn_ext & args, device const char * q, device const char * k, device const char * v, device const char * mask, device const char * sinks, device const char * pad, device const char * blk, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { #define FWD_TMPL q_t, q4_t, q8x8_t, k_t, k4x4_t, k8x8_t, v_t, v4x4_t, v8x8_t, qk_t, qk8x8_t, s_t, s2_t, s8x8_t, o_t, o4_t, o8x8_t, kd4x4_t, nl_k, deq_k, vd4x4_t, nl_v, deq_v, DK, DV, Q, C #define FWD_ARGS args, q, k, v, mask, sinks, pad, blk, dst, shmem_f16, tgpig, tiisg, sgitg switch (FC_flash_attn_ext_nsg) { // note: disabled cases to reduce library load time //case 1: kernel_flash_attn_ext_impl(FWD_ARGS); break; //case 2: kernel_flash_attn_ext_impl(FWD_ARGS); break; case 4: kernel_flash_attn_ext_impl(FWD_ARGS); break; } #undef FWD_TMPL #undef FWD_ARGS } // TODO: this is quite ugly. in the future these types will be hardcoded in the kernel, but for now keep them as // template to be able to explore different combinations // #define FA_TYPES \ half, half4, simdgroup_half8x8, \ half, half4x4, simdgroup_half8x8, \ half, half4x4, simdgroup_half8x8, \ float, simdgroup_float8x8, \ float, float2, simdgroup_float8x8, \ float, float4, simdgroup_float8x8 //half, half4, simdgroup_half8x8 #define FA_TYPES_BF \ bfloat, bfloat4, simdgroup_bfloat8x8, \ bfloat, bfloat4x4, simdgroup_bfloat8x8, \ bfloat, bfloat4x4, simdgroup_bfloat8x8, \ float, simdgroup_float8x8, \ float, float2, simdgroup_float8x8, \ half, half4, simdgroup_half8x8 //float, float4, simdgroup_float8x8 #define FA_TYPES_F32 \ half, half4, simdgroup_half8x8, \ float, float4x4, simdgroup_float8x8, \ float, float4x4, simdgroup_float8x8, \ float, simdgroup_float8x8, \ float, float2, simdgroup_float8x8, \ float, float4, simdgroup_float8x8 //half, half4, simdgroup_half8x8 typedef decltype(kernel_flash_attn_ext) flash_attn_ext_t; template [[host_name("kernel_flash_attn_ext_f32_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f32_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_f16_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_bf16_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_bf16_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #endif template [[host_name("kernel_flash_attn_ext_q4_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q4_1_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q5_1_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; template [[host_name("kernel_flash_attn_ext_q8_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #undef FA_TYPES #undef FA_TYPES_BF #undef FA_TYPES_F32 constant bool FC_flash_attn_ext_vec_has_mask [[function_constant(FC_FLASH_ATTN_EXT_VEC + 0)]]; constant bool FC_flash_attn_ext_vec_has_sinks [[function_constant(FC_FLASH_ATTN_EXT_VEC + 1)]]; constant bool FC_flash_attn_ext_vec_has_bias [[function_constant(FC_FLASH_ATTN_EXT_VEC + 2)]]; constant bool FC_flash_attn_ext_vec_has_scap [[function_constant(FC_FLASH_ATTN_EXT_VEC + 3)]]; constant bool FC_flash_attn_ext_vec_has_kvpad [[function_constant(FC_FLASH_ATTN_EXT_VEC + 4)]]; //constant float FC_flash_attn_ext_vec_scale [[function_constant(FC_FLASH_ATTN_EXT_VEC + 10)]]; //constant float FC_flash_attn_ext_vec_max_bias [[function_constant(FC_FLASH_ATTN_EXT_VEC + 11)]]; //constant float FC_flash_attn_ext_vec_logit_softcap [[function_constant(FC_FLASH_ATTN_EXT_VEC + 12)]]; constant int32_t FC_flash_attn_ext_vec_ns10 [[function_constant(FC_FLASH_ATTN_EXT_VEC + 20)]]; constant int32_t FC_flash_attn_ext_vec_ns20 [[function_constant(FC_FLASH_ATTN_EXT_VEC + 21)]]; constant int32_t FC_flash_attn_ext_vec_nsg [[function_constant(FC_FLASH_ATTN_EXT_VEC + 22)]]; constant int32_t FC_flash_attn_ext_vec_nwg [[function_constant(FC_FLASH_ATTN_EXT_VEC + 23)]]; template< typename q4_t, // query types in shared memory typename k4_t, // key types in shared memory typename v4_t, // value types in shared memory typename qk_t, // Q*K types typename s_t, // soft-max types typename s4_t, typename o4_t, // attention accumulation types typename kd4_t, // key type in device memory short nl_k, void (*deq_k_t4)(device const kd4_t *, short, thread k4_t &), typename vd4_t, // value type in device memory short nl_v, void (*deq_v_t4)(device const vd4_t *, short, thread v4_t &), short DK, // K head size short DV, // V head size short NE, // head elements per thread short Q, // queries per threadgroup short C, // cache items per threadgroup short NSG> // number of simd groups void kernel_flash_attn_ext_vec_impl( constant ggml_metal_kargs_flash_attn_ext_vec & args, device const char * q, device const char * k, device const char * v, device const char * mask, device const char * sinks, device const char * pad, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { static_assert(DK % 32 == 0, "DK must be divisible by 32"); static_assert(DV % 32 == 0, "DV must be divisible by 32"); #define NWG (FC_flash_attn_ext_vec_nwg) #define NS10 (FC_flash_attn_ext_vec_ns10) #define NS20 (FC_flash_attn_ext_vec_ns20) const short iwg = tgpig[2]%NWG; const ushort iq3 = tgpig[2]/NWG; const ushort iq2 = tgpig[1]; const ushort iq1 = tgpig[0]; constexpr short DK4 = DK/4; constexpr short DV4 = DV/4; constexpr short PK = PAD2(DK, 128); constexpr short PK4 = PK/4; constexpr short PV = PAD2(DV, 128); constexpr short PV4 = PV/4; constexpr short NW = N_SIMDWIDTH; constexpr short NL = NW/NE; // note: this can be adjusted to support different head sizes and simdgroup work loads constexpr short SH = 4*C; // shared memory per simdgroup static_assert(DK4 % NL == 0, "DK4 must be divisible by NL"); static_assert(DV4 % NL == 0, "DV4 must be divisible by NL"); const short T = PK + NSG*SH; // shared memory size per query in (half) //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*PK); // holds the query data threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*PK); // same as above but in q4_t threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + Q*PK); // scratch buffer for attention threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + Q*PK); // same as above but in s4_t threadgroup half * sm = (threadgroup half *) (shmem_f16 + sgitg*SH + 2*C + Q*PK); // scratch buffer for mask threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 2*sgitg*PV + Q*T); // scratch buffer for the results // store the result for all queries in shared memory (the O matrix from the paper) so4 += tiisg; { q += iq1*args.nb01 + iq2*args.nb02 + iq3*args.nb03; const short ikv2 = iq2/(args.ne02/args.ne_12_2); const short ikv3 = iq3/(args.ne03/args.ne_12_3); k += ikv2*args.nb12 + ikv3*args.nb13; v += ikv2*args.nb22 + ikv3*args.nb23; } // load heads from Q to shared memory device const float4 * q4 = (device const float4 *) ((device const char *) q); for (short i = tiisg; i < PK4; i += NW) { if (iq1 < args.ne01 && i < DK4) { sq4[i] = (q4_t) q4[i]; } else { sq4[i] = (q4_t) 0.0f; } } // zero out so for (short i = 0; i < DV4/NL; ++i) { so4[i*NL] = (o4_t) 0.0f; } // zero out shared memory SH for (short i = tiisg; i < SH/4; i += NW) { ss4[i] = (s4_t) 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); { float S = 0.0f; float M = -FLT_MAX/2; // thread indices inside the simdgroup const short tx = tiisg%NL; const short ty = tiisg/NL; // pointer to the mask device const half * pm = (device const half *) (mask + iq1*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); float slope = 1.0f; // ALiBi if (FC_flash_attn_ext_vec_has_bias) { const short h = iq2; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const short exph = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; slope = pow(base, exph); } // loop over the KV cache // each simdgroup handles blocks of Q rows and C columns for (int ic0 = iwg*NSG + sgitg; ; ic0 += NWG*NSG) { int ic = ic0*C; if (ic >= args.ne11) { break; } // the last partial chunk uses the pad buffer as source if (FC_flash_attn_ext_vec_has_kvpad && ic + C > args.ne11) { k = pad; v = k + args.nb11*C*args.ne_12_2*args.ne_12_3; mask = v + args.nb21*C*args.ne_12_2*args.ne_12_3; const short ikv2 = iq2/(args.ne02/args.ne_12_2); const short ikv3 = iq3/(args.ne03/args.ne_12_3); k += (ikv2 + ikv3*args.ne_12_2)*args.nb11*C; v += (ikv2 + ikv3*args.ne_12_2)*args.nb21*C; if (!FC_flash_attn_ext_vec_has_mask) { if (ic + tiisg >= args.ne11) { sm[tiisg] = -MAXHALF; } } else { pm = (device const half *) (mask) + iq1*C + (iq2%args.ne32)*(C*args.ne31) + (iq3%args.ne33)*(C*args.ne31*args.ne32); } ic = 0; } if (FC_flash_attn_ext_vec_has_mask) { sm[tiisg] = pm[ic + tiisg]; } // skip -INF blocks if (simd_max(sm[tiisg]) == -INFINITY) { continue; } // Q*K^T { device const k4_t * pk4 = (device const k4_t *) (k + ic*args.nb11); threadgroup const q4_t * pq4 = sq4; pk4 += ty*NS10/4 + tx; pq4 += tx; qk_t mqk[C/NE] = { [ 0 ... C/NE - 1] = 0.0f }; // each simdgroup processes 1 query and NE (NW/NL) cache elements FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) { if (is_same::value) { FOR_UNROLL (short ii = 0; ii < DK4/NL; ++ii) { mqk[cc] += dot((float4) pk4[cc*NE*NS10/4 + ii*NL], (float4) pq4[ii*NL]); } } else { device const kd4_t * pk = (device const kd4_t *) (k + ((ic + NE*cc + ty)*args.nb11)); k4_t mk; FOR_UNROLL (short ii = 0; ii < DK4/NL; ++ii) { const short i = ii*NL + tx; deq_k_t4(pk + i/nl_k, i%nl_k, mk); mqk[cc] += dot((float4) mk, (float4) sq4[i]); } } if (NE == 1) { mqk[cc] = simd_sum(mqk[cc]); } else { // simdgroup reduce (NE = 4) // [ 0 .. 7] -> [ 0] // [ 8 .. 15] -> [ 8] // [16 .. 23] -> [16] // [24 .. 31] -> [24] if (NE <= 1) { mqk[cc] += simd_shuffle_down(mqk[cc], 16); } if (NE <= 2) { mqk[cc] += simd_shuffle_down(mqk[cc], 8); } if (NE <= 4) { mqk[cc] += simd_shuffle_down(mqk[cc], 4); } if (NE <= 8) { mqk[cc] += simd_shuffle_down(mqk[cc], 2); } if (NE <= 16) { mqk[cc] += simd_shuffle_down(mqk[cc], 1); } // broadcast mqk[cc] = simd_shuffle(mqk[cc], NL*ty); } } if (FC_flash_attn_ext_vec_has_mask && !FC_flash_attn_ext_vec_has_scap && !FC_flash_attn_ext_vec_has_bias) { ss[NE*tx + ty] = fma(mqk[tx], args.scale, (qk_t) sm[NE*tx + ty]); } else { mqk[tx] *= args.scale; if (FC_flash_attn_ext_vec_has_scap) { mqk[tx] = args.logit_softcap*precise::tanh(mqk[tx]); } if (FC_flash_attn_ext_vec_has_bias) { mqk[tx] += (qk_t) sm[NE*tx + ty]*slope; } else { mqk[tx] += (qk_t) sm[NE*tx + ty]; } ss[NE*tx + ty] = mqk[tx]; } } simdgroup_barrier(mem_flags::mem_threadgroup); // online softmax { const float m = M; const float s = ss[tiisg]; M = simd_max(max(M, s)); const float ms = exp(m - M); const float vs = exp(s - M); S = S*ms + simd_sum(vs); // the P matrix from the paper (Q rows, C columns) ss[tiisg] = vs; // O = diag(ms)*O if ((DV4/NL % NW == 0) || ty == 0) { FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { so4[ii*NL] *= ms; } } } simdgroup_barrier(mem_flags::mem_threadgroup); // O = O + (Q*K^T)*V { o4_t lo[DV4/NL]; FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { lo[ii] = 0.0f; } if (is_same::value) { device const v4_t * pv4 = (device const v4_t *) (v + ic*args.nb21); pv4 += ty*NS20/4 + tx; const auto sst = ss + ty; FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) { FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { lo[ii] += o4_t(float4(pv4[cc*NE*NS20/4 + ii*NL])*float4(sst[cc*NE])); } } } else { FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) { device const vd4_t * pv4 = (device const vd4_t *) (v + ((ic + NE*cc + ty)*args.nb21)); FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { const short i = ii*NL + tx; v4_t mv; deq_v_t4(pv4 + i/nl_v, i%nl_v, mv); lo[ii] += o4_t(float4(mv)*float4(ss[NE*cc + ty])); } } } FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { if (NE > 1) { lo[ii][0] += simd_shuffle_down(lo[ii][0], 16); lo[ii][1] += simd_shuffle_down(lo[ii][1], 16); lo[ii][2] += simd_shuffle_down(lo[ii][2], 16); lo[ii][3] += simd_shuffle_down(lo[ii][3], 16); } if (NE > 2) { lo[ii][0] += simd_shuffle_down(lo[ii][0], 8); lo[ii][1] += simd_shuffle_down(lo[ii][1], 8); lo[ii][2] += simd_shuffle_down(lo[ii][2], 8); lo[ii][3] += simd_shuffle_down(lo[ii][3], 8); } if (NE > 4) { lo[ii][0] += simd_shuffle_down(lo[ii][0], 4); lo[ii][1] += simd_shuffle_down(lo[ii][1], 4); lo[ii][2] += simd_shuffle_down(lo[ii][2], 4); lo[ii][3] += simd_shuffle_down(lo[ii][3], 4); } if (NE > 8) { lo[ii][0] += simd_shuffle_down(lo[ii][0], 2); lo[ii][1] += simd_shuffle_down(lo[ii][1], 2); lo[ii][2] += simd_shuffle_down(lo[ii][2], 2); lo[ii][3] += simd_shuffle_down(lo[ii][3], 2); } if (NE > 16) { lo[ii][0] += simd_shuffle_down(lo[ii][0], 1); lo[ii][1] += simd_shuffle_down(lo[ii][1], 1); lo[ii][2] += simd_shuffle_down(lo[ii][2], 1); lo[ii][3] += simd_shuffle_down(lo[ii][3], 1); } } if ((DV4/NL % NW == 0) || ty == 0) { FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { so4[ii*NL] += lo[ii]; } } } } if (FC_flash_attn_ext_vec_has_sinks && sgitg == 0 && iwg == 0) { const float m = M; const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; M = simd_max(max(M, s)); const float ms = exp(m - M); const float vs = exp(s - M); S = S*ms + simd_sum(vs); if ((DV4/NL % NW == 0) || ty == 0) { FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) { so4[ii*NL] *= ms; } } } // these are needed for reducing the results from the simdgroups (reuse the ss buffer) if (tiisg == 0) { ss[0] = (s_t) S; ss[1] = (s_t) M; } } so4 -= tiisg; threadgroup_barrier(mem_flags::mem_threadgroup); // parallel reduce for (short r = NSG/2; r > 0; r >>= 1) { if (sgitg < r) { const float S0 = ss[ 0]; const float S1 = ss[r*(SH/2) + 0]; const float M0 = ss[ 1]; const float M1 = ss[r*(SH/2) + 1]; const float M = max(M0, M1); const float ms0 = exp(M0 - M); const float ms1 = exp(M1 - M); const float S = S0*ms0 + S1*ms1; if (tiisg == 0) { ss[0] = S; ss[1] = M; } // O_0 = diag(ms0)*O_0 + diag(ms1)*O_1 for (short i = tiisg; i < DV4; i += NW) { so4[i] = so4[i]*ms0 + so4[i + r*PV4]*ms1; } } threadgroup_barrier(mem_flags::mem_threadgroup); } // final rescale with 1/S and store to global memory if (sgitg == 0) { const int64_t nrows = args.ne3*args.ne2*args.ne1; const int64_t rid = iq3*args.ne2*args.ne1 + iq2 + iq1*args.ne1; device float4 * dst4 = (device float4 *) dst; device float * dst1 = (device float *) dst + nrows*DV*NWG; // the S and M are stored after the results const float S = NWG == 1 ? (ss[0] == 0.0f ? 0.0f : 1.0f/ss[0]) : 1.0f; // interleave the workgroup data for (short i = tiisg; i < DV4; i += NW) { dst4[rid*DV4*NWG + NWG*i + iwg] = (float4) so4[i]*S; } // store S and M if (NWG > 1) { if (tiisg == 0) { dst1[rid*(2*NWG) + 2*iwg + 0] = ss[0]; dst1[rid*(2*NWG) + 2*iwg + 1] = ss[1]; } } } #undef NWG #undef NS10 #undef NS20 } template< typename q4_t, // query types in shared memory typename k4_t, // key types in shared memory typename v4_t, // value types in shared memory typename qk_t, // Q*K types typename s_t, // soft-max types typename s4_t, typename o4_t, // attention accumulation types typename kd4_t, // key type in device memory short nl_k, void (*deq_k_t4)(device const kd4_t *, short, thread k4_t &), typename vd4_t, // value type in device memory short nl_v, void (*deq_v_t4)(device const vd4_t *, short, thread v4_t &), short DK, // K head size short DV, // V head size short NE = 4, // head elements per thread short Q = OP_FLASH_ATTN_EXT_VEC_NQPTG, // queries per threadgroup short C = OP_FLASH_ATTN_EXT_VEC_NCPSG> // cache items per threadgroup kernel void kernel_flash_attn_ext_vec( constant ggml_metal_kargs_flash_attn_ext_vec & args, device const char * q, device const char * k, device const char * v, device const char * mask, device const char * sinks, device const char * pad, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { #define FWD_TMPL q4_t, k4_t, v4_t, qk_t, s_t, s4_t, o4_t, kd4_t, nl_k, deq_k_t4, vd4_t, nl_v, deq_v_t4, DK, DV, NE, Q, C #define FWD_ARGS args, q, k, v, mask, sinks, pad, dst, shmem_f16, tgpig, tiisg, sgitg switch (FC_flash_attn_ext_vec_nsg) { // note: disabled cases to reduce library load time case 1: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; case 2: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; case 4: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; //case 8: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; //case 16: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; //case 32: kernel_flash_attn_ext_vec_impl(FWD_ARGS); break; } #undef FWD_TMPL #undef FWD_ARGS } // note: I think the s_t can be half instead of float, because the Q*K scaling is done before storing to shared mem // in the other (non-vec) kernel, we need s_t to also be float because we scale during the soft_max // #define FA_TYPES \ half4, \ half4, \ half4, \ float, \ float, float4, \ float4 #define FA_TYPES_F32 \ half4, \ float4, \ float4, \ float, \ float, float4, \ float4 typedef decltype(kernel_flash_attn_ext_vec) flash_attn_ext_vec_t; template [[host_name("kernel_flash_attn_ext_vec_f32_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f32_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_f16_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #endif template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #undef FA_TYPES #undef FA_TYPES_F32 constant int32_t FC_flash_attn_ext_vec_reduce_DV [[function_constant(FC_FLASH_ATTN_EXT_VEC_REDUCE + 0)]]; constant int32_t FC_flash_attn_ext_vec_reduce_NWG [[function_constant(FC_FLASH_ATTN_EXT_VEC_REDUCE + 1)]]; kernel void kernel_flash_attn_ext_vec_reduce( constant ggml_metal_kargs_flash_attn_ext_vec_reduce & args, device const char * htmp, device char * dst, uint tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { #define NWG (FC_flash_attn_ext_vec_reduce_NWG) #define DV (FC_flash_attn_ext_vec_reduce_DV) const uint64_t rid = tgpig; const short iwg = tiisg; device const float * ss = (device const float *) htmp + (uint64_t)args.nrows*DV*NWG; float S = ss[rid*(2*NWG) + 2*iwg + 0]; float M = ss[rid*(2*NWG) + 2*iwg + 1]; const float m = simd_max(M); const float ms = exp(M - m); S = simd_sum(S*ms); S = S == 0.0f ? 0.0f : 1.0f/S; const short DV4 = DV/4; device const float4 * htmp4 = (device const float4 *) htmp + rid*DV4*NWG; device float4 * dst4 = (device float4 *) dst + rid*DV4; for (short i = sgitg; i < DV4; i += NWG) { const float4 v = simd_sum(htmp4[i*NWG + iwg]*ms); if (iwg == 0) { dst4[i] = v*S; } } #undef NWG #undef DV } template kernel void kernel_cpy_t_t( constant ggml_metal_kargs_cpy & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0]; const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0; const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; const int64_t i3 = n/(args.ne2*args.ne1*args.ne0); const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0)/(args.ne1*args.ne0); const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0)/args.ne0; const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0); device T1 * dst_data = (device T1 *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.ne00; ) { device const T0 * src = (device T0 *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); dst_data[i00] = (T1) src[0]; break; } } typedef decltype(kernel_cpy_t_t) kernel_cpy_t; template [[host_name("kernel_cpy_f32_f32")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_f32_f16")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_f32_i32")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_i32_f32")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_i32_i32")]] kernel kernel_cpy_t kernel_cpy_t_t; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_cpy_f32_bf16")]] kernel kernel_cpy_t kernel_cpy_t_t; #endif template [[host_name("kernel_cpy_f16_f32")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_f16_f16")]] kernel kernel_cpy_t kernel_cpy_t_t; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_cpy_bf16_f32")]] kernel kernel_cpy_t kernel_cpy_t_t; template [[host_name("kernel_cpy_bf16_bf16")]] kernel kernel_cpy_t kernel_cpy_t_t; #endif template kernel void kernel_cpy_f32_q( constant ggml_metal_kargs_cpy & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0]; const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0; const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; const int64_t i3 = n / (args.ne2*args.ne1*args.ne0); const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0) / (args.ne1*args.ne0); const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0) / args.ne0; const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0)/QK; device block_q * dst_data = (device block_q *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.nk0; ) { device const float * src = (device const float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + (i00*QK)*args.nb00); quantize_func(src, dst_data[i00]); break; } } typedef decltype(kernel_cpy_f32_q) cpy_f_q_t; template [[host_name("kernel_cpy_f32_q8_0")]] kernel cpy_f_q_t kernel_cpy_f32_q; template [[host_name("kernel_cpy_f32_q4_0")]] kernel cpy_f_q_t kernel_cpy_f32_q; template [[host_name("kernel_cpy_f32_q4_1")]] kernel cpy_f_q_t kernel_cpy_f32_q; template [[host_name("kernel_cpy_f32_q5_0")]] kernel cpy_f_q_t kernel_cpy_f32_q; template [[host_name("kernel_cpy_f32_q5_1")]] kernel cpy_f_q_t kernel_cpy_f32_q; template [[host_name("kernel_cpy_f32_iq4_nl")]] kernel cpy_f_q_t kernel_cpy_f32_q; template kernel void kernel_cpy_q_f32( constant ggml_metal_kargs_cpy & args, device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0]; const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0; const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; const int64_t i3 = n/(args.ne2*args.ne1*args.ne0); const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0)/(args.ne1*args.ne0); const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0)/args.ne0; const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0); device const block_q * src_data = (device const block_q *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01); device T4x4 * dst_data = (device T4x4 *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.nk0; ) { T4x4 temp; dequantize_func(src_data + i00/nl, i00%nl, temp); dst_data[i00] = temp; break; } } typedef decltype(kernel_cpy_q_f32) cpy_q_f_t; template [[host_name("kernel_cpy_q4_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q4_1_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q5_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q5_1_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q8_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q4_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q4_1_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q5_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q5_1_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32; template [[host_name("kernel_cpy_q8_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32; kernel void kernel_concat( constant ggml_metal_kargs_concat & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const int i3 = tgpig.z; const int i2 = tgpig.y; const int i1 = tgpig.x; int o[4] = {0, 0, 0, 0}; o[args.dim] = args.dim == 0 ? args.ne00 : (args.dim == 1 ? args.ne01 : (args.dim == 2 ? args.ne02 : args.ne03)); device const float * x; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { if (i0 < args.ne00 && i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) { x = (device const float *)(src0 + (i3 )*args.nb03 + (i2 )*args.nb02 + (i1 )*args.nb01 + (i0 )*args.nb00); } else { x = (device const float *)(src1 + (i3 - o[3])*args.nb13 + (i2 - o[2])*args.nb12 + (i1 - o[1])*args.nb11 + (i0 - o[0])*args.nb10); } device float * y = (device float *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); *y = *x; } } template void kernel_mul_mv_q2_K_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_q2_K * x = (device const block_q2_K *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const short ix = tiisg/8; // 0...3 const short it = tiisg%8; // 0...7 const short iq = it/4; // 0 or 1 const short ir = it%4; // 0...3 const short is = (8*ir)/16;// 0 or 1 device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (short i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+64]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+96]; sumy[3] += yl[i+24]; } device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (short row = 0; row < nr0; row++) { float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); } float dall = dh[0]; float dmin = dh[1] * 1.f/16.f; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f + (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f + (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) - dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0)); qs += args.nb01/2; sc += args.nb01; dh += args.nb01/2; } y4 += 4 * QK_K; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_q2_K_f32")]] kernel void kernel_mul_mv_q2_K_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q2_K_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_q3_K_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_q3_K * x = (device const block_q3_K *) (src0 + offset0); device const float * yy = (device const float *) (src1 + offset1); float yl[32]; //const uint16_t kmask1 = 0x3030; //const uint16_t kmask2 = 0x0f0f; const short tid = tiisg/4; const short ix = tiisg%4; const short ip = tid/4; // 0 or 1 const short il = 2*((tid%4)/2); // 0 or 2 const short ir = tid%2; const short l0 = 8*ir; // One would think that the Metal compiler would figure out that ip and il can only have // 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it // with these two tales. // // Possible masks for the high bit const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0 {0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2 {0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0 {0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2 // Possible masks for the low 2 bits const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}}; const ushort4 hm = mm[2*ip + il/2]; const short shift = 2*il; const float v1 = il == 0 ? 4.f : 64.f; const float v2 = 4.f * v1; const uint16_t s_shift1 = 4*ip; const uint16_t s_shift2 = s_shift1 + il; const short q_offset = 32*ip + l0; const short y_offset = 128*ip + 32*il + l0; device const float * y1 = yy + ix*QK_K + y_offset; uint32_t scales32, aux32; thread uint16_t * scales16 = (thread uint16_t *)&scales32; thread const int8_t * scales = (thread const int8_t *)&scales32; float sumf1[nr0] = {0.f}; float sumf2[nr0] = {0.f}; for (int i = ix; i < nb; i += 4) { for (short l = 0; l < 8; ++l) { yl[l+ 0] = y1[l+ 0]; yl[l+ 8] = y1[l+16]; yl[l+16] = y1[l+32]; yl[l+24] = y1[l+48]; } device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset); device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0); device const uint16_t * a = (device const uint16_t *)(x[i].scales); device const half * dh = &x[i].d; for (short row = 0; row < nr0; ++row) { const float d_all = (float)dh[0]; scales16[0] = a[4]; scales16[1] = a[5]; aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030; scales16[0] = a[il+0]; scales16[1] = a[il+1]; scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32; float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0; for (short l = 0; l < 8; l += 2) { const int32_t qs = q[l/2]; s1 += yl[l+0] * (qs & qm[il/2][0]); s2 += yl[l+1] * (qs & qm[il/2][1]); s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]); s4 += yl[l+16] * (qs & qm[il/2][2]); s5 += yl[l+17] * (qs & qm[il/2][3]); s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]); } float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[0] - 32); sumf2[row] += d2 * (scales[2] - 32); s1 = s2 = s3 = s4 = s5 = s6 = 0; for (short l = 0; l < 8; l += 2) { const int32_t qs = q[l/2+8]; s1 += yl[l+8] * (qs & qm[il/2][0]); s2 += yl[l+9] * (qs & qm[il/2][1]); s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]); s4 += yl[l+24] * (qs & qm[il/2][2]); s5 += yl[l+25] * (qs & qm[il/2][3]); s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]); } d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[1] - 32); sumf2[row] += d2 * (scales[3] - 32); q += args.nb01/2; h += args.nb01/2; a += args.nb01/2; dh += args.nb01/2; } y1 += 4 * QK_K; } for (int row = 0; row < nr0; ++row) { const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift); sumf1[row] = simd_sum(sumf); } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; if (tiisg == 0) { for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { dst_f32[first_row + row] = sumf1[row]; } } } [[host_name("kernel_mul_mv_q3_K_f32")]] kernel void kernel_mul_mv_q3_K_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q3_K_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_q4_K_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr uint16_t kmask1 = 0x3f3f; constexpr uint16_t kmask2 = 0x0f0f; constexpr uint16_t kmask3 = 0xc0c0; const short ix = tiisg/8; // 0...3 const short it = tiisg%8; // 0...7 const short iq = it/4; // 0 or 1 const short ir = it%4; // 0...3 const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_q4_K * x = (device const block_q4_K *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[16]; float yh[16]; float sumf[nr0]={0.f}; device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (short i = 0; i < 8; ++i) { yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0]; yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8]; yh[i+0] = y4[i+128]; sumy[2] += yh[i+0]; yh[i+8] = y4[i+160]; sumy[3] += yh[i+8]; } device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq; device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (short row = 0; row < nr0; row++) { sc16[0] = sc[0] & kmask1; sc16[1] = sc[2] & kmask1; sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2); sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2); device const uint16_t * q2 = q1 + 32; float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; FOR_UNROLL (short i = 0; i < 4; ++i) { acc1[0] += yl[2*i + 0] * (q1[i] & 0x000F); acc1[1] += yl[2*i + 1] * (q1[i] & 0x0F00); acc1[2] += yl[2*i + 8] * (q1[i] & 0x00F0); acc1[3] += yl[2*i + 9] * (q1[i] & 0xF000); acc2[0] += yh[2*i + 0] * (q2[i] & 0x000F); acc2[1] += yh[2*i + 1] * (q2[i] & 0x0F00); acc2[2] += yh[2*i + 8] * (q2[i] & 0x00F0); acc2[3] += yh[2*i + 9] * (q2[i] & 0xF000); } sumf[row] += dh[0] * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] + (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f + (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] + (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) - dh[1] * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += args.nb01/2; sc += args.nb01/2; dh += args.nb01/2; } y4 += 4 * QK_K; } device float * dst_f32 = (device float *) dst + (int64_t)im*args.ne0*args.ne1 + (int64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_q4_K_f32")]] kernel void kernel_mul_mv_q4_K_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q4_K_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_q5_K_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_q5_K * x = (device const block_q5_K *) (src0 + offset0); device const float * yy = (device const float *) (src1 + offset1); float sumf[nr0]={0.f}; float yl[16], yh[16]; constexpr uint16_t kmask1 = 0x3f3f; constexpr uint16_t kmask2 = 0x0f0f; constexpr uint16_t kmask3 = 0xc0c0; const short tid = tiisg/4; const short ix = tiisg%4; const short iq = tid/4; const short ir = tid%4; const short l0 = 8*ir; const short q_offset = 32*iq + l0; const short y_offset = 64*iq + l0; const uint8_t hm1 = 1u << (2*iq); const uint8_t hm2 = hm1 << 1; const uint8_t hm3 = hm1 << 4; const uint8_t hm4 = hm2 << 4; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; device const float * y1 = yy + ix*QK_K + y_offset; for (int i = ix; i < nb; i += 4) { device const uint8_t * q1 = x[i].qs + q_offset; device const uint8_t * qh = x[i].qh + l0; device const half * dh = &x[i].d; device const uint16_t * a = (device const uint16_t *)x[i].scales + iq; device const float * y2 = y1 + 128; float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (short l = 0; l < 8; ++l) { yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0]; yl[l+8] = y1[l+32]; sumy[1] += yl[l+8]; yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0]; yh[l+8] = y2[l+32]; sumy[3] += yh[l+8]; } for (short row = 0; row < nr0; ++row) { device const uint8_t * q2 = q1 + 64; sc16[0] = a[0] & kmask1; sc16[1] = a[2] & kmask1; sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2); sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2); float4 acc1 = {0.f}; float4 acc2 = {0.f}; FOR_UNROLL (short l = 0; l < 8; ++l) { uint8_t h = qh[l]; acc1[0] += yl[l+0] * (q1[l] & 0x0F); acc1[1] += yl[l+8] * (q1[l] & 0xF0); acc1[2] += yh[l+0] * (q2[l] & 0x0F); acc1[3] += yh[l+8] * (q2[l] & 0xF0); acc2[0] += h & hm1 ? yl[l+0] : 0.f; acc2[1] += h & hm2 ? yl[l+8] : 0.f; acc2[2] += h & hm3 ? yh[l+0] : 0.f; acc2[3] += h & hm4 ? yh[l+8] : 0.f; } sumf[row] += dh[0] * (sc8[0] * (acc1[0] + 16.f*acc2[0]) + sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) + sc8[4] * (acc1[2] + 16.f*acc2[2]) + sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) - dh[1] * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += args.nb01; qh += args.nb01; dh += args.nb01/2; a += args.nb01/2; } y1 += 4 * QK_K; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = tot; } } } [[host_name("kernel_mul_mv_q5_K_f32")]] kernel void kernel_mul_mv_q5_K_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q5_K_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_q6_K_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; constexpr uint8_t kmask1 = 0x03; constexpr uint8_t kmask2 = 0x0C; constexpr uint8_t kmask3 = 0x30; constexpr uint8_t kmask4 = 0xC0; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_q6_K * x = (device const block_q6_K *) (src0 + offset0); device const float * yy = (device const float *) (src1 + offset1); float sumf[nr0] = { 0.f }; float yl[16]; const short tid = tiisg/2; const short ix = tiisg%2; const short ip = tid/8; // 0 or 1 const short il = tid%8; const short l0 = 4*il; const short is = 8*ip + l0/16; const short y_offset = 128*ip + l0; const short q_offset_l = 64*ip + l0; const short q_offset_h = 32*ip + l0; for (int i = ix; i < nb; i += 2) { device const uint8_t * q1 = x[i].ql + q_offset_l; device const uint8_t * q2 = q1 + 32; device const uint8_t * qh = x[i].qh + q_offset_h; device const int8_t * sc = x[i].scales + is; device const half * dh = &x[i].d; device const float * y = yy + i * QK_K + y_offset; for (short l = 0; l < 4; ++l) { yl[4*l + 0] = y[l + 0]; yl[4*l + 1] = y[l + 32]; yl[4*l + 2] = y[l + 64]; yl[4*l + 3] = y[l + 96]; } for (short row = 0; row < nr0; ++row) { float4 sums = {0.f, 0.f, 0.f, 0.f}; FOR_UNROLL (short l = 0; l < 4; ++l) { sums[0] += yl[4*l + 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); sums[1] += yl[4*l + 1] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); sums[2] += yl[4*l + 2] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32); sums[3] += yl[4*l + 3] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); } sumf[row] += dh[0] * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]); q1 += args.nb01; q2 += args.nb01; qh += args.nb01; sc += args.nb01; dh += args.nb01/2; } } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_q6_K_f32")]] kernel void kernel_mul_mv_q6_K_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q6_K_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } // ======================= "True" 2-bit template void kernel_mul_mv_iq2_xxs_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq2_xxs * x = (device const block_iq2_xxs *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); threadgroup uint64_t * svalues = (threadgroup uint64_t *)(shmem); threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 256); { int nval = 4; int pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2xxs_grid[pos + i]; nval = 2; pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i]; threadgroup_barrier(mem_flags::mem_threadgroup); } const int ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq2_xxs * xr = x + ibl; device const uint16_t * q2 = xr->qs + 4 * ib; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { const float db = dh[0]; device const uint8_t * aux8 = (device const uint8_t *)q2; const uint32_t aux32 = q2[2] | (q2[3] << 16); const float d = db * (0.5f + (aux32 >> 28)); float sum = 0; for (short l = 0; l < 4; ++l) { const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + aux8[l]); const uint8_t signs = ssigns[(aux32 >> 7*l) & 127]; for (short j = 0; j < 8; ++j) { sum += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } } sumf[row] += d * sum; dh += args.nb01/2; q2 += args.nb01/2; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all * 0.25f; } } } [[host_name("kernel_mul_mv_iq2_xxs_f32")]] kernel void kernel_mul_mv_iq2_xxs_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq2_xxs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq2_xs_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq2_xs * x = (device const block_iq2_xs *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); threadgroup uint64_t * svalues = (threadgroup uint64_t *)(shmem); threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 512); { int nval = 8; int pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2xs_grid[pos + i]; nval = 2; pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i]; threadgroup_barrier(mem_flags::mem_threadgroup); } const int ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq2_xs * xr = x + ibl; device const uint16_t * q2 = xr->qs + 4 * ib; device const uint8_t * sc = xr->scales + ib; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { const float db = dh[0]; const uint8_t ls1 = sc[0] & 0xf; const uint8_t ls2 = sc[0] >> 4; const float d1 = db * (0.5f + ls1); const float d2 = db * (0.5f + ls2); float sum1 = 0, sum2 = 0; for (short l = 0; l < 2; ++l) { const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + (q2[l] & 511)); const uint8_t signs = ssigns[(q2[l] >> 9)]; for (short j = 0; j < 8; ++j) { sum1 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } } for (short l = 2; l < 4; ++l) { const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + (q2[l] & 511)); const uint8_t signs = ssigns[(q2[l] >> 9)]; for (short j = 0; j < 8; ++j) { sum2 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } } sumf[row] += d1 * sum1 + d2 * sum2; dh += args.nb01/2; q2 += args.nb01/2; sc += args.nb01; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all * 0.25f; } } } [[host_name("kernel_mul_mv_iq2_xs_f32")]] kernel void kernel_mul_mv_iq2_xs_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq2_xs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq3_xxs_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq3_xxs * x = (device const block_iq3_xxs *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); threadgroup uint32_t * svalues = (threadgroup uint32_t *)(shmem); threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 256); { int nval = 4; int pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) svalues[pos + i] = iq3xxs_grid[pos + i]; nval = 2; pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i]; threadgroup_barrier(mem_flags::mem_threadgroup); } const int ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq3_xxs * xr = x + ibl; device const uint8_t * q3 = xr->qs + 8 * ib; device const uint16_t * gas = (device const uint16_t *)(xr->qs + QK_K/4) + 2 * ib; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { const float db = dh[0]; const uint32_t aux32 = gas[0] | (gas[1] << 16); const float d = db * (0.5f + (aux32 >> 28)); float2 sum = {0}; for (short l = 0; l < 4; ++l) { const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(svalues + q3[2*l+0]); const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(svalues + q3[2*l+1]); const uint8_t signs = ssigns[(aux32 >> 7*l) & 127]; for (short j = 0; j < 4; ++j) { sum[0] += yl[8*l + j + 0] * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); sum[1] += yl[8*l + j + 4] * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } } sumf[row] += d * (sum[0] + sum[1]); dh += args.nb01/2; q3 += args.nb01; gas += args.nb01/2; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all * 0.5f; } } } [[host_name("kernel_mul_mv_iq3_xxs_f32")]] kernel void kernel_mul_mv_iq3_xxs_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq3_xxs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq3_s_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq3_s * x = (device const block_iq3_s *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); threadgroup uint32_t * svalues = (threadgroup uint32_t *) shmem; { int nval = 8; int pos = (32*sgitg + tiisg)*nval; for (int i = 0; i < nval; ++i) svalues[pos + i] = iq3s_grid[pos + i]; threadgroup_barrier(mem_flags::mem_threadgroup); } const int ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq3_s * xr = x + ibl; device const uint8_t * qs = xr->qs + 8 * ib; device const uint8_t * qh = xr->qh + ib; device const uint8_t * sc = xr->scales + (ib/2); device const uint8_t * signs = xr->signs + 4 * ib; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { const float db = dh[0]; const float d = db * (1 + 2*((sc[0] >> 4*(ib%2)) & 0xf)); float2 sum = {0}; for (short l = 0; l < 4; ++l) { const threadgroup uint32_t * table1 = qh[0] & kmask_iq2xs[2*l+0] ? svalues + 256 : svalues; const threadgroup uint32_t * table2 = qh[0] & kmask_iq2xs[2*l+1] ? svalues + 256 : svalues; const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(table1 + qs[2*l+0]); const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(table2 + qs[2*l+1]); for (short j = 0; j < 4; ++j) { sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l] & kmask_iq2xs[j+0]); sum[1] += yl[8*l + j + 4] * grid2[j] * select(1, -1, signs[l] & kmask_iq2xs[j+4]); } } sumf[row] += d * (sum[0] + sum[1]); dh += args.nb01/2; qs += args.nb01; qh += args.nb01; sc += args.nb01; signs += args.nb01; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_iq3_s_f32")]] kernel void kernel_mul_mv_iq3_s_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq3_s_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq2_s_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq2_s * x = (device const block_iq2_s *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); //threadgroup uint64_t * svalues = (threadgroup uint64_t *) shmem; //{ // int nval = 32; // int pos = (32*sgitg + tiisg)*nval; // for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2s_grid[pos + i]; // threadgroup_barrier(mem_flags::mem_threadgroup); //} const short ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq2_s * xr = x + ibl; device const uint8_t * qs = xr->qs + 4 * ib; device const uint8_t * qh = xr->qh + ib; device const uint8_t * sc = xr->scales + ib; device const uint8_t * signs = qs + QK_K/8; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { const float db = dh[0]; const float d1 = db * (0.5f + (sc[0] & 0xf)); const float d2 = db * (0.5f + (sc[0] >> 4)); float2 sum = {0}; for (short l = 0; l < 2; ++l) { //const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(svalues + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300))); //const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(svalues + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300))); constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300))); constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300))); for (short j = 0; j < 8; ++j) { sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l+0] & kmask_iq2xs[j]); sum[1] += yl[8*l + j + 16] * grid2[j] * select(1, -1, signs[l+2] & kmask_iq2xs[j]); } } sumf[row] += d1 * sum[0] + d2 * sum[1]; dh += args.nb01/2; qs += args.nb01; qh += args.nb01; sc += args.nb01; signs += args.nb01; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all * 0.25f; } } } [[host_name("kernel_mul_mv_iq2_s_f32")]] kernel void kernel_mul_mv_iq2_s_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq2_s_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq1_s_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq1_s * x = (device const block_iq1_s *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); const short ix = tiisg; device const float * y4 = y + 32 * ix; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { float sumy = 0; for (short i = 0; i < 32; ++i) { yl[i] = y4[i]; sumy += yl[i]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq1_s * xr = x + ibl; device const uint8_t * qs = xr->qs + 4 * ib; device const uint16_t * qh = xr->qh + ib; device const half * dh = &xr->d; for (short row = 0; row < nr0; row++) { constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 5) & 0x700))); constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[0] << 2) & 0x700))); constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[0] >> 1) & 0x700))); float sum = 0; for (short j = 0; j < 4; ++j) { sum += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4) + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4) + yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4) + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4); } sumf[row] += (float)dh[0] * (sum + sumy * (qh[0] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA)) * (2*((qh[0] >> 12) & 7) + 1); dh += args.nb01/2; qs += args.nb01; qh += args.nb01/2; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_iq1_s_f32")]] kernel void kernel_mul_mv_iq1_s_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq1_s_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq1_m_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; const int nb = args.ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq1_m * x = (device const block_iq1_m *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); float yl[32]; float sumf[nr0]={0.f}; const int nb32 = nb * (QK_K / 32); const short ix = tiisg; device const float * y4 = y + 32 * ix; iq1m_scale_t scale; for (int ib32 = ix; ib32 < nb32; ib32 += 32) { float4 sumy = {0.f}; for (short i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+ 8]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+16]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+24]; sumy[3] += yl[i+24]; } const int ibl = ib32 / (QK_K / 32); const int ib = ib32 % (QK_K / 32); device const block_iq1_m * xr = x + ibl; device const uint8_t * qs = xr->qs + 4 * ib; device const uint8_t * qh = xr->qh + 2 * ib; device const uint16_t * sc = (device const uint16_t *)xr->scales; for (short row = 0; row < nr0; row++) { scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700))); constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700))); constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[1] << 8) & 0x700))); constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[1] << 4) & 0x700))); float2 sum = {0.f}; for (short j = 0; j < 4; ++j) { sum[0] += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4) + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4); sum[1] += yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4) + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4); } const float delta1 = sumy[0] * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[1] * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); const float delta2 = sumy[2] * (qh[1] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[3] * (qh[1] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA); sumf[row] += (float)scale.f16 * ((sum[0] + delta1) * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 7) + 1) + (sum[1] + delta2) * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 7) + 1)); sc += args.nb01/2; qs += args.nb01; qh += args.nb01; } y4 += 32 * 32; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_iq1_m_f32")]] kernel void kernel_mul_mv_iq1_m_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq1_m_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq4_nl_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; threadgroup float * shmem_f32 = (threadgroup float *) shmem; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * NR0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq4_nl * x = (device const block_iq4_nl *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); const int nb = args.ne00/QK4_NL; const int ns01 = args.nb01/args.nb00; const short ix = tiisg/2; // 0...15 const short it = tiisg%2; // 0 or 1 shmem_f32[tiisg] = kvalues_iq4nl_f[tiisg%16]; threadgroup_barrier(mem_flags::mem_threadgroup); float4 yl[4]; float sumf[NR0]={0.f}; device const float * yb = y + ix*QK4_NL + it*8; uint32_t aux32[2]; thread const uint8_t * q8 = (thread const uint8_t *)aux32; float4 qf1, qf2; // [TAG_MUL_MV_WEIRD] for (int ib = ix; ib < nb && ib < ns01; ib += 16) { device const float4 * y4 = (device const float4 *)yb; yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; for (short row = 0; row < NR0; row++) { device const block_iq4_nl & xb = x[row*ns01 + ib]; device const uint16_t * q4 = (device const uint16_t *)(xb.qs + 8*it); float4 acc1 = {0.f}, acc2 = {0.f}; aux32[0] = q4[0] | (q4[1] << 16); aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f; aux32[0] &= 0x0f0f0f0f; qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]}; qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]}; acc1 += yl[0] * qf1; acc2 += yl[1] * qf2; aux32[0] = q4[2] | (q4[3] << 16); aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f; aux32[0] &= 0x0f0f0f0f; qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]}; qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]}; acc1 += yl[2] * qf1; acc2 += yl[3] * qf2; acc1 += acc2; sumf[row] += (float)xb.d * (acc1[0] + acc1[1] + acc1[2] + acc1[3]); } yb += 16 * QK4_NL; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_iq4_nl_f32")]] kernel void kernel_mul_mv_iq4_nl_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq4_nl_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_iq4_xs_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; threadgroup float * shmem_f32 = (threadgroup float *) shmem; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * NR0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_iq4_xs * x = (device const block_iq4_xs *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); const int nb = args.ne00/QK_K; const int ns01 = args.nb01/args.nb00; const short ix = tiisg/16; // 0 or 1 const short it = tiisg%16; // 0...15 const short ib = it/2; const short il = it%2; shmem_f32[tiisg] = kvalues_iq4nl_f[tiisg%16]; threadgroup_barrier(mem_flags::mem_threadgroup); float4 yl[4]; float sumf[NR0]={0.f}; device const float * yb = y + ix * QK_K + ib * 32 + il * 8; uint32_t aux32[2]; thread const uint8_t * q8 = (thread const uint8_t *)aux32; float4 qf1, qf2; // [TAG_MUL_MV_WEIRD] for (int ibl = ix; ibl < nb && ibl < ns01; ibl += 2) { device const float4 * y4 = (device const float4 *)yb; yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; for (short row = 0; row < NR0; ++row) { device const block_iq4_xs & xb = x[row*ns01 + ibl]; device const uint32_t * q4 = (device const uint32_t *)(xb.qs + 16*ib + 8*il); float4 acc1 = {0.f}, acc2 = {0.f}; aux32[0] = (q4[0] ) & 0x0f0f0f0f; aux32[1] = (q4[0] >> 4) & 0x0f0f0f0f; qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]}; qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]}; acc1 += yl[0] * qf1; acc2 += yl[1] * qf2; aux32[0] = (q4[1] ) & 0x0f0f0f0f; aux32[1] = (q4[1] >> 4) & 0x0f0f0f0f; qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]}; qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]}; acc1 += yl[2] * qf1; acc2 += yl[3] * qf2; acc1 += acc2; const int ls = (((xb.scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((xb.scales_h >> 2*ib) & 3) << 4)) - 32; sumf[row] += (float)xb.d * ls * (acc1[0] + acc1[1] + acc1[2] + acc1[3]); } yb += 2 * QK_K; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_iq4_xs_f32")]] kernel void kernel_mul_mv_iq4_xs_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_iq4_xs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template void kernel_mul_mv_mxfp4_f32_impl( args_t args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg) { const short NSG = FC_mul_mv_nsg; threadgroup float * shmem_f32 = (threadgroup float *) shmem; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * NSG + sgitg) * NR0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; device const block_mxfp4 * x = (device const block_mxfp4 *) (src0 + offset0); device const float * y = (device const float *) (src1 + offset1); const int nb = args.ne00/QK_MXFP4; const int ns01 = args.nb01/args.nb00; // this can be larger than nb for permuted src0 tensors const short ix = tiisg/2; // 0...15 const short it = tiisg%2; // 0 or 1 shmem_f32[tiisg] = kvalues_mxfp4_f[tiisg%16]; threadgroup_barrier(mem_flags::mem_threadgroup); float4 yl[4]; float sumf[NR0]={0.f}; device const float * yb = y + ix*QK_MXFP4 + it*8; // note: just the check `ib < nb` is enough, but adding the redundant `&& ib < ns01` check makes the kernel a bit faster // no idea why that is - needs some deeper investigation [TAG_MUL_MV_WEIRD] for (int ib = ix; ib < nb && ib < ns01; ib += 16) { device const float4 * y4 = (device const float4 *) yb; yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; FOR_UNROLL (short row = 0; row < NR0; row++) { device const block_mxfp4 & xb = x[row*ns01 + ib]; device const uint8_t * q2 = (device const uint8_t *)(xb.qs + 8*it); float4 acc1 = yl[0]*float4(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); float4 acc2 = yl[1]*float4(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); float4 acc3 = yl[2]*float4(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); float4 acc4 = yl[3]*float4(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); acc1 = (acc1 + acc3) + (acc2 + acc4); sumf[row] += e8m0_to_fp32(xb.e) * ((acc1[0] + acc1[1]) + (acc1[2] + acc1[3])); } yb += 16 * QK_MXFP4; } device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) { float sum_all = simd_sum(sumf[row]); if (tiisg == 0) { dst_f32[first_row + row] = sum_all; } } } [[host_name("kernel_mul_mv_mxfp4_f32")]] kernel void kernel_mul_mv_mxfp4_f32( constant ggml_metal_kargs_mul_mv & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } template kernel void kernel_get_rows_q( constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device void * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg [[threads_per_threadgroup]]) { const int32_t iw0 = tgpig.x/args.ne10; const int32_t i10 = tgpig.x%args.ne10; const int32_t i11 = tgpig.y; const int32_t i12 = tgpig.z; const int32_t r = ((const device int32_t *) ((const device char *) src1 + i12*args.nb12 + i11*args.nb11 + i10*args.nb10))[0]; const int32_t i02 = i11; const int32_t i03 = i12; auto psrc = (device const block_q *) ((const device char *) src0 + i03*args.nb03 + i02*args.nb02 + r*args.nb01); auto pdst = (device float4x4 *) (( device char *) dst + i12*args.nb3 + i11*args.nb2 + i10*args.nb1); for (int ind = iw0*ntg.x + tiitg; ind < args.ne00t;) { float4x4 temp; dequantize_func(psrc + ind/nl, ind%nl, temp); pdst[ind] = temp; break; } } template kernel void kernel_get_rows_f( constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device void * dst, uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort3 ntg [[threads_per_threadgroup]]) { const int32_t iw0 = tgpig.x/args.ne10; const int32_t i10 = tgpig.x%args.ne10; const int32_t i11 = tgpig.y; const int32_t i12 = tgpig.z; const int32_t r = ((const device int32_t *) ((const device char *) src1 + i12*args.nb12 + i11*args.nb11 + i10*args.nb10))[0]; const int32_t i02 = i11; const int32_t i03 = i12; auto psrc = (const device T0 *) ((const device char *) src0 + i03*args.nb03 + i02*args.nb02 + r*args.nb01); auto pdst = ( device T *) (( device char *) dst + i12*args.nb3 + i11*args.nb2 + i10*args.nb1); for (int ind = iw0*ntg.x + tiitg; ind < args.ne00t;) { pdst[ind] = psrc[ind]; break; } } template kernel void kernel_set_rows_q32( constant ggml_metal_kargs_set_rows & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int32_t i03 = tgpig.z; const int32_t i02 = tgpig.y; const int32_t i12 = i03%args.ne12; const int32_t i11 = i02%args.ne11; const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; if (i01 >= args.ne01) { return; } const int32_t i10 = i01; const TI i1 = ((const device TI *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; device block_q * dst_row = ( device block_q *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { quantize_func(src_row + 32*ind, dst_row[ind]); } } template kernel void kernel_set_rows_f( constant ggml_metal_kargs_set_rows & args, device const void * src0, device const void * src1, device float * dst, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int32_t i03 = tgpig.z; const int32_t i02 = tgpig.y; const int32_t i12 = i03%args.ne12; const int32_t i11 = i02%args.ne11; const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; if (i01 >= args.ne01) { return; } const int32_t i10 = i01; const TI i1 = ((const device TI *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; device T * dst_row = ( device T *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { dst_row[ind] = (T) src_row[ind]; } } constant bool FC_mul_mm_bc_inp [[function_constant(FC_MUL_MM + 0)]]; constant bool FC_mul_mm_bc_out [[function_constant(FC_MUL_MM + 1)]]; // each block_q contains 16*nl weights template kernel void kernel_mul_mm( constant ggml_metal_kargs_mul_mm & args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup S0 * sa = (threadgroup S0 *)(shmem); threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096); threadgroup float * sc = (threadgroup float *)(shmem); constexpr int NR0 = 64; constexpr int NR1 = 32; constexpr int NK = 32; constexpr int NL0 = NK/16; constexpr int NL1 = NK/8; const int im = tgpig.z; const int r0 = tgpig.y*NR0; const int r1 = tgpig.x*NR1; // if this block is of 64x32 shape or smaller const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0; const short nr1 = (args.ne1 - r1 < NR1) ? (args.ne1 - r1) : NR1; // a thread shouldn't load data outside of the matrix const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63 const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31 const short il0 = (tiitg % NL0); short il = il0; const int i12 = im%args.ne12; const int i13 = im/args.ne12; const uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; const short offset1 = il0/nl; device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1; const short iy = 8*(tiitg % NL1); device const T1 * y = (device const T1 *)(src1 + args.nb13*i13 + args.nb12*i12 + args.nb11*(r1 + lr1) + args.nb10*iy); #ifndef GGML_METAL_HAS_TENSOR S0_8x8 ma[4]; S1_8x8 mb[2]; simdgroup_float8x8 mc[8]; for (short i = 0; i < 8; i++){ mc[i] = make_filled_simdgroup_matrix(0.f); } #else auto tA = tensor, tensor_inline>(sa, dextents(NK, NR0)); auto tB = tensor, tensor_inline>(sb, dextents(NR1, NK )); mpp::tensor_ops::matmul2d< mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate), execution_simdgroups<4>> mm; auto cT = mm.get_destination_cooperative_tensor(); #endif for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) { #ifndef GGML_METAL_HAS_TENSOR // load data and store to threadgroup memory if (is_same::value && FC_mul_mm_bc_inp) { threadgroup_barrier(mem_flags::mem_threadgroup); // no need for dequantization for (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; //const short lx = i%8; //const short ly = (tiitg/NL0)%8; const short lx = (tiitg/NL0)%8; const short ly = i%8; const short ib = 8*sx + sy; *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0; } } else { S0_4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; //const short lx = i%8; //const short ly = (tiitg/NL0)%8; const short lx = (tiitg/NL0)%8; const short ly = i%8; const short ib = 8*sx + sy; // NOTE: this is massively slower.. WTF? //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4]; *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4]; } } if (FC_mul_mm_bc_inp) { for (short i = 0; i < 8; ++i) { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; const short ib = 4*sx + sy; *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0; } } else { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short dx = sx; const short dy = sy; const short ly = (tiitg/NL1)%8; const short ib = 4*sx + sy; *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y)); } #else // load data and store to threadgroup memory if (is_same::value && FC_mul_mm_bc_inp) { threadgroup_barrier(mem_flags::mem_threadgroup); // no need for dequantization for (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; const short lx = i%8; const short ly = (tiitg/NL0)%8; //const short lx = (tiitg/NL0)%8; //const short ly = i%8; *(sa + NK*(8*sy + ly) + 8*sx + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0; } } else { S0_4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; const short lx = i%8; const short ly = (tiitg/NL0)%8; //const short lx = (tiitg/NL0)%8; //const short ly = i%8; *(sa + NK*(8*sy + ly) + 8*sx + lx) = temp_a[i/4][i%4]; } } if (FC_mul_mm_bc_inp) { for (short i = 0; i < 8; ++i) { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; *(sb + NK*(8*sy + ly) + 8*sx + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0; } } else { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; //const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; *(threadgroup S1_2x4 *)(sb + NK*(8*sy + ly) + 8*sx) = (S1_2x4)(*((device T1_2x4 *) y)); } #endif il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2 + nl - 1)/nl : x; y += NK; threadgroup_barrier(mem_flags::mem_threadgroup); #ifndef GGML_METAL_HAS_TENSOR // load matrices from threadgroup memory and conduct outer products threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2)); threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2)); FOR_UNROLL (short ik = 0; ik < NK/8; ik++) { simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 4; i++) { simdgroup_load(ma[i], lsma + 64*i, 8, 0, false); } simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 2; i++) { simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false); } simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 8; i++){ simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]); } lsma += 8*64; lsmb += 4*64; } #else auto sA = tA.slice(0, 0); auto sB = tB.slice(0, 0); mm.run(sB, sA, cT); #endif } if (!FC_mul_mm_bc_out || (r0 + NR0 <= args.ne0 && r1 + NR1 <= args.ne1)) { // if no bounds checks on the output are needed, we can directly write to device memory #ifdef GGML_METAL_HAS_TENSOR device float * C = (device float *) dst + r0 + \ r1 * args.ne0 + im*args.ne1*args.ne0; auto tC = tensor, tensor_inline>(C, dextents(args.ne0, NR1)); cT.store(tC); #else device float * C = (device float *) dst + (r0 + 32*(sgitg & 1)) + \ (r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0; for (short i = 0; i < 8; i++) { simdgroup_store(mc[i], C + 8*(i%4) + 8*args.ne0*(i/4), args.ne0, 0, false); } #endif } else { // block is smaller than 64x32, we should avoid writing data outside of the matrix threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0; #ifdef GGML_METAL_HAS_TENSOR auto tC = tensor, tensor_inline>(sc, dextents(NR0, NR1)); cT.store(tC); #else for (short i = 0; i < 8; i++) { simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false); } #endif threadgroup_barrier(mem_flags::mem_threadgroup); if (sgitg == 0) { for (int j = tiitg; j < nr1; j += NR1) { device float * D = (device float *) dst + r0 + (r1 + j)*args.ne0 + im*args.ne1*args.ne0; device float4 * D4 = (device float4 *) D; threadgroup float * C = temp_str + (j*NR0); threadgroup float4 * C4 = (threadgroup float4 *) C; int i = 0; for (; i < nr0/4; i++) { *(D4 + i) = *(C4 + i); } i *= 4; for (; i < nr0; i++) { *(D + i) = *(C + i); } } } } } template // n_expert_used kernel void kernel_mul_mm_id_map0( constant ggml_metal_kargs_mul_mm_id_map0 & args, device const char * src2, device char * htpe, device char * hids, threadgroup char * shmem [[threadgroup(0)]], ushort tpitg[[thread_position_in_threadgroup]], ushort ntg[[threads_per_threadgroup]]) { const short ide = tpitg; // expert id uint32_t n_all = 0; device int32_t * ids_i32 = (device int32_t *) hids + ide*args.ne21; for (int i21 = 0; i21 < args.ne21; i21 += ntg) { // n_tokens if (i21 + tpitg < args.ne21) { device const int32_t * src2_i32 = (device const int32_t *) (src2 + (i21 + tpitg)*args.nb21); threadgroup uint16_t * sids = (threadgroup uint16_t *) shmem + tpitg*ne20; #pragma unroll(ne20) for (short i20 = 0; i20 < ne20; i20++) { sids[i20] = src2_i32[i20]; } } threadgroup_barrier(mem_flags::mem_threadgroup); for (short t = 0; t < ntg; t++) { if (i21 + t >= args.ne21) { break; } threadgroup const uint16_t * sids = (threadgroup const uint16_t *) shmem + t*ne20; short sel = 0; #pragma unroll(ne20) for (short i20 = 0; i20 < ne20; i20++) { sel += (sids[i20] == ide)*(i20 + 1); } ids_i32[n_all] = (i21 + t)*ne20 + sel - 1; n_all += sel > 0; } threadgroup_barrier(mem_flags::mem_threadgroup); } device uint32_t * tpe_u32 = (device uint32_t *) (htpe); tpe_u32[ide] = n_all; } typedef decltype(kernel_mul_mm_id_map0<1>) kernel_mul_mm_id_map0_t; template [[host_name("kernel_mul_mm_id_map0_ne20_1" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<1>; template [[host_name("kernel_mul_mm_id_map0_ne20_2" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<2>; template [[host_name("kernel_mul_mm_id_map0_ne20_4" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<4>; template [[host_name("kernel_mul_mm_id_map0_ne20_6" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<6>; template [[host_name("kernel_mul_mm_id_map0_ne20_8" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<8>; template [[host_name("kernel_mul_mm_id_map0_ne20_10")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<10>; template [[host_name("kernel_mul_mm_id_map0_ne20_16")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<16>; template kernel void kernel_mul_mm_id( constant ggml_metal_kargs_mul_mm_id & args, device const char * src0, device const char * src1, device const char * htpe, device const char * hids, device char * dst, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup S0 * sa = (threadgroup S0 *)(shmem); threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096); threadgroup float * sc = (threadgroup float *)(shmem); constexpr int NR0 = 64; constexpr int NR1 = 32; constexpr int NK = 32; constexpr int NL0 = NK/16; constexpr int NL1 = NK/8; const int im = tgpig.z; // expert const int r0 = tgpig.y*NR0; const int r1 = tgpig.x*NR1; device const uint32_t * tpe_u32 = (device const uint32_t *) (htpe); device const int32_t * ids_i32 = (device const int32_t *) (hids); const int32_t neh1 = tpe_u32[im]; if (r1 >= neh1) { return; } // if this block is of 64x32 shape or smaller const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0; const short nr1 = ( neh1 - r1 < NR1) ? ( neh1 - r1) : NR1; // a thread shouldn't load data outside of the matrix const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63 const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31 const short il0 = (tiitg % NL0); short il = il0; const int id = ids_i32[im*args.ne21 + r1 + lr1]; const short i11 = (id % args.ne20) % args.ne11; const short i12 = (id / args.ne20); const short i13 = 0; const uint64_t offset0 = im*args.nb02 + i13*args.nb03; const short offset1 = il0/nl; device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1; const short iy = 8*(tiitg % NL1); device const T1 * y = (device const T1 *)(src1 + args.nb13*i13 + args.nb12*i12 + args.nb11*i11 + args.nb10*iy); #ifndef GGML_METAL_HAS_TENSOR S0_8x8 ma[4]; S1_8x8 mb[2]; simdgroup_float8x8 mc[8]; for (short i = 0; i < 8; i++){ mc[i] = make_filled_simdgroup_matrix(0.f); } #else auto tA = tensor, tensor_inline>(sa, dextents(NK, NR0)); auto tB = tensor, tensor_inline>(sb, dextents(NR1, NK )); mpp::tensor_ops::matmul2d< mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate), execution_simdgroups<4>> mm; auto cT = mm.get_destination_cooperative_tensor(); #endif for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) { #ifndef GGML_METAL_HAS_TENSOR // load data and store to threadgroup memory if (is_same::value && FC_mul_mm_bc_inp) { threadgroup_barrier(mem_flags::mem_threadgroup); // no need for dequantization for (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; //const short lx = i%8; //const short ly = (tiitg/NL0)%8; const short lx = (tiitg/NL0)%8; const short ly = i%8; const short ib = 8*sx + sy; *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0; } } else { S0_4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; //const short lx = i%8; //const short ly = (tiitg/NL0)%8; const short lx = (tiitg/NL0)%8; const short ly = i%8; const short ib = 8*sx + sy; // NOTE: this is massively slower.. WTF? //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4]; *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4]; } } if (FC_mul_mm_bc_inp) { for (short i = 0; i < 8; ++i) { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; const short ib = 4*sx + sy; *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0; } } else { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short dx = sx; const short dy = sy; const short ly = (tiitg/NL1)%8; const short ib = 4*sx + sy; *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y)); } #else // load data and store to threadgroup memory if (is_same::value && FC_mul_mm_bc_inp) { threadgroup_barrier(mem_flags::mem_threadgroup); // no need for dequantization for (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; const short lx = i%8; const short ly = (tiitg/NL0)%8; //const short lx = (tiitg/NL0)%8; //const short ly = i%8; *(sa + NK*(8*sy + ly) + 8*sx + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0; } } else { S0_4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); FOR_UNROLL (short i = 0; i < 16; i++) { const short sx = 2*il0 + i/8; const short sy = (tiitg/NL0)/8; const short lx = i%8; const short ly = (tiitg/NL0)%8; //const short lx = (tiitg/NL0)%8; //const short ly = i%8; *(sa + NK*(8*sy + ly) + 8*sx + lx) = temp_a[i/4][i%4]; } } if (FC_mul_mm_bc_inp) { for (short i = 0; i < 8; ++i) { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; *(sb + NK*(8*sy + ly) + 8*sx + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0; } } else { const short sx = (tiitg%NL1); const short sy = (tiitg/NL1)/8; //const short lx = i; const short ly = (tiitg/NL1)%8; //const short lx = (tiitg/NL1)%8; //const short ly = i; *(threadgroup S1_2x4 *)(sb + NK*(8*sy + ly) + 8*sx) = (S1_2x4)(*((device T1_2x4 *) y)); } #endif il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2 + nl - 1)/nl : x; y += NK; threadgroup_barrier(mem_flags::mem_threadgroup); #ifndef GGML_METAL_HAS_TENSOR // load matrices from threadgroup memory and conduct outer products threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2)); threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2)); FOR_UNROLL (short ik = 0; ik < NK/8; ik++) { simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 4; i++) { simdgroup_load(ma[i], lsma + 64*i, 8, 0, false); } simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 2; i++) { simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false); } simdgroup_barrier(mem_flags::mem_none); FOR_UNROLL (short i = 0; i < 8; i++){ simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]); } lsma += 8*64; lsmb += 4*64; } #else auto sA = tA.slice(0, 0); auto sB = tB.slice(0, 0); mm.run(sB, sA, cT); #endif } // block is smaller than 64x32, we should avoid writing data outside of the matrix threadgroup_barrier(mem_flags::mem_threadgroup); #ifdef GGML_METAL_HAS_TENSOR auto tC = tensor, tensor_inline>(sc, dextents(NR0, NR1)); cT.store(tC); #else threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0; for (short i = 0; i < 8; i++) { simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false); } #endif threadgroup_barrier(mem_flags::mem_threadgroup); for (short j = sgitg; j < nr1; j += 4) { const int id = ids_i32[im*args.ne21 + r1 + j]; const short ide = id % args.ne20; const short idt = id / args.ne20; device float * D = (device float *) dst + r0 + ide*args.ne0 + idt*args.ne1*args.ne0; device float4 * D4 = (device float4 *) D; threadgroup float * C = (threadgroup float *) shmem + j*NR0; threadgroup float4 * C4 = (threadgroup float4 *) C; int i = tiisg; for (; i < nr0/4; i += 32) { *(D4 + i) = *(C4 + i); } i = (4*(nr0/4)) + tiisg; for (; i < nr0; i += 32) { *(D + i) = *(C + i); } } } #define QK_NL 16 // // get rows // typedef decltype(kernel_get_rows_f) get_rows_f_t; template [[host_name("kernel_get_rows_f32")]] kernel get_rows_f_t kernel_get_rows_f; template [[host_name("kernel_get_rows_f16")]] kernel get_rows_f_t kernel_get_rows_f; template [[host_name("kernel_get_rows_i32")]] kernel get_rows_f_t kernel_get_rows_f; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_get_rows_bf16")]] kernel get_rows_f_t kernel_get_rows_f; #endif typedef decltype(kernel_get_rows_q) get_rows_q_t; template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_mxfp4")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq2_xxs")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq2_xs")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq3_xxs")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq3_s")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq2_s")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq1_s")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_q_t kernel_get_rows_q; // // set rows // typedef decltype(kernel_set_rows_f) set_rows_f_t; template [[host_name("kernel_set_rows_f32_i64")]] kernel set_rows_f_t kernel_set_rows_f; template [[host_name("kernel_set_rows_f32_i32")]] kernel set_rows_f_t kernel_set_rows_f; template [[host_name("kernel_set_rows_f16_i64")]] kernel set_rows_f_t kernel_set_rows_f; template [[host_name("kernel_set_rows_f16_i32")]] kernel set_rows_f_t kernel_set_rows_f; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_set_rows_bf16_i64")]] kernel set_rows_f_t kernel_set_rows_f; template [[host_name("kernel_set_rows_bf16_i32")]] kernel set_rows_f_t kernel_set_rows_f; #endif typedef decltype(kernel_set_rows_q32) set_rows_q32_t; template [[host_name("kernel_set_rows_q8_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q8_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q4_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q4_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q4_1_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q4_1_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q5_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q5_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q5_1_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_q5_1_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_iq4_nl_i64")]] kernel set_rows_q32_t kernel_set_rows_q32; template [[host_name("kernel_set_rows_iq4_nl_i32")]] kernel set_rows_q32_t kernel_set_rows_q32; // // matrix-matrix multiplication // typedef decltype(kernel_mul_mm) mul_mm_t; template [[host_name("kernel_mul_mm_f32_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_f16_f32")]] kernel mul_mm_t kernel_mul_mm; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mm_bf16_f32")]] kernel mul_mm_t kernel_mul_mm; #endif template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_xxs_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_xs_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq3_xxs_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq3_s_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_s_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq1_s_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_f32_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_f16_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_0_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_1_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_0_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_1_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q8_0_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_mxfp4_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q2_K_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q3_K_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_K_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_K_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q6_K_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_xxs_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_xs_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq3_xxs_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq3_s_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq2_s_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq1_s_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq1_m_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_nl_f16")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_xs_f16")]] kernel mul_mm_t kernel_mul_mm; // // indirect matrix-matrix multiplication // typedef decltype(kernel_mul_mm_id) mul_mm_id; template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mul_mm_id kernel_mul_mm_id; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mm_id_bf16_f32")]] kernel mul_mm_id kernel_mul_mm_id; #endif template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_mxfp4_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_xxs_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_xs_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq3_xxs_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq3_s_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_s_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq1_s_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq1_m_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_nl_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_xs_f32")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_f32_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_f16_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_1_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_1_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q8_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q2_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q3_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q6_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_xxs_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq3_xxs_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq3_s_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq2_s_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq1_s_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq1_m_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_nl_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id; // // matrix-vector multiplication // typedef void (kernel_mul_mv_disp_t)( ggml_metal_kargs_mul_mv args, device const char * src0, device const char * src1, device char * dst, uint3 tgpig, ushort tiisg); typedef void (kernel_mul_mv2_disp_t)( ggml_metal_kargs_mul_mv args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiisg, ushort sgitg); template void mmv_fn( ggml_metal_kargs_mul_mv args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiitg, ushort tiisg, ushort sgitg) { disp_fn(args, src0, src1, dst, tgpig, tiisg); } template void mmv_fn( ggml_metal_kargs_mul_mv args, device const char * src0, device const char * src1, device char * dst, threadgroup char * shmem, uint3 tgpig, ushort tiitg, ushort tiisg, ushort sgitg) { disp_fn(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } typedef decltype(mmv_fn>) mul_mv_disp_fn_t; template kernel void kernel_mul_mv_id( constant ggml_metal_kargs_mul_mv_id & args, device const char * src0s, device const char * src1, device char * dst, device const char * ids, threadgroup char * shmem [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort tiitg[[thread_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]]) { const int iid1 = tgpig.z/args.nei0; const int idx = tgpig.z%args.nei0; tgpig.z = 0; const int32_t i02 = ((device const int32_t *) (ids + iid1*args.nbi1))[idx]; const int64_t i11 = idx % args.ne11; const int64_t i12 = iid1; const int64_t i1 = idx; const int64_t i2 = i12; device const char * src0_cur = src0s + i02*args.nb02; device const char * src1_cur = src1 + i11*args.nb11 + i12*args.nb12; device char * dst_cur = dst + (i1*args.ne0 + i2*args.ne1*args.ne0)*sizeof(float); ggml_metal_kargs_mul_mv args0 = { /*.ne00 =*/ args.ne00, /*.ne01 =*/ args.ne01, /*.ne02 =*/ 1, // args.ne02, /*.nb00 =*/ args.nb00, /*.nb01 =*/ args.nb01, /*.nb02 =*/ args.nb02, /*.nb03 =*/ args.nb02, // args.ne02 == 1 /*.ne10 =*/ args.ne10, /*.ne11 =*/ 1, // args.ne11, /*.ne12 =*/ 1, // args.ne12, /*.nb10 =*/ args.nb10, /*.nb11 =*/ args.nb11, /*.nb12 =*/ args.nb12, /*.nb13 =*/ args.nb12, // ne12 == 1 /*.ne0 =*/ args.ne0, /*.ne1 =*/ 1, // args.ne1, /*.nr0 =*/ args.nr0, /*.r2 =*/ 1, /*.r3 =*/ 1, }; disp_fn( args0, /* src0 */ src0_cur, /* src1 */ src1_cur, /* dst */ dst_cur, shmem, tgpig, tiitg, tiisg, sgitg); } typedef decltype(kernel_mul_mv_id>>) kernel_mul_mv_id_t; typedef decltype(kernel_mul_mv_id>>) kernel_mul_mv_id_4_t; template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_f16_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mv_id_bf16_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; #endif template [[host_name("kernel_mul_mv_id_f32_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_f16_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id>>; #if defined(GGML_METAL_HAS_BF16) template [[host_name("kernel_mul_mv_id_bf16_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id>>; #endif template [[host_name("kernel_mul_mv_id_q8_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q4_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q5_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q6_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq1_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq1_m_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq2_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq2_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq3_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; kernel void kernel_pool_2d_max_f32( constant ggml_metal_kargs_pool_2d & args, device const float * src0, device float * dst, uint gid[[thread_position_in_grid]]) { if (gid >= args.np) { return; } const int idx = gid; const int I_HW = args.IH * args.IW; const int O_HW = args.OH * args.OW; const int nc = idx / O_HW; const int cur_oh = idx % O_HW / args.OW; const int cur_ow = idx % O_HW % args.OW; device const float * i_ptr = src0 + nc * I_HW; device float * o_ptr = dst + nc * O_HW; const int start_h = cur_oh * args.s1 - args.p1; const int bh = MAX(0, start_h); const int eh = MIN(args.IH, start_h + args.k1); const int start_w = cur_ow * args.s0 - args.p0; const int bw = MAX(0, start_w); const int ew = MIN(args.IW, start_w + args.k0); float res = -INFINITY; for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { res = MAX(res, i_ptr[i * args.IW + j]); } } o_ptr[cur_oh * args.OW + cur_ow] = res; } kernel void kernel_pool_2d_avg_f32( constant ggml_metal_kargs_pool_2d & args, device const float * src0, device float * dst, uint gid[[thread_position_in_grid]]) { if (gid >= args.np) { return; } const int idx = gid; const int I_HW = args.IH * args.IW; const int O_HW = args.OH * args.OW; const int nc = idx / O_HW; const int cur_oh = idx % O_HW / args.OW; const int cur_ow = idx % O_HW % args.OW; device const float * i_ptr = src0 + nc * I_HW; device float * o_ptr = dst + nc * O_HW; const int start_h = cur_oh * args.s1 - args.p1; const int bh = MAX(0, start_h); const int eh = MIN(args.IH, start_h + args.k1); const int start_w = cur_ow * args.s0 - args.p0; const int bw = MAX(0, start_w); const int ew = MIN(args.IW, start_w + args.k0); // const float scale = 1. / ((eh - bh) * (ew - bw)); const float scale = 1. / (args.k0 * args.k1); float res = 0; for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { float cur = i_ptr[i * args.IW + j]; res += cur * scale; } } o_ptr[cur_oh * args.OW + cur_ow] = res; } kernel void kernel_opt_step_adamw_f32( constant ggml_metal_kargs_opt_step_adamw & args, device float * x, device const float * g, device float * g_m, device float * g_v, device const float * pars, uint gid[[thread_position_in_grid]]) { if (gid >= args.np) { return; } const float alpha = pars[0]; const float beta1 = pars[1]; const float beta2 = pars[2]; const float eps = pars[3]; const float wd = pars[4]; const float beta1h = pars[5]; const float beta2h = pars[6]; const float gi = g[gid]; const float gmi = g_m[gid] * beta1 + gi * (1.0f - beta1); const float gvi = g_v[gid] * beta2 + gi * gi * (1.0f - beta2); g_m[gid] = gmi; g_v[gid] = gvi; const float mh = gmi * beta1h; const float vh = sqrt(gvi * beta2h) + eps; x[gid] = x[gid] * (1.0f - alpha * wd) - alpha * mh / vh; } kernel void kernel_opt_step_sgd_f32( constant ggml_metal_kargs_opt_step_sgd & args, device float * x, device const float * g, device const float * pars, uint gid[[thread_position_in_grid]]) { if (gid >= args.np) { return; } x[gid] = x[gid] * (1.0f - pars[0] * pars[1]) - pars[0] * g[gid]; } template kernel void kernel_memset( constant ggml_metal_kargs_fill & args, device T * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = args.val; } typedef decltype(kernel_memset) kernel_memset_t; template [[host_name("kernel_memset_i64")]] kernel kernel_memset_t kernel_memset; constant short FC_count_equal_nsg [[function_constant(FC_COUNT_EQUAL + 0)]]; template kernel void kernel_count_equal( constant ggml_metal_kargs_count_equal & args, device const char * src0, device const char * src1, device atomic_int * dst, threadgroup int32_t * shmem_i32 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], ushort3 tpitg[[thread_position_in_threadgroup]], ushort sgitg[[simdgroup_index_in_threadgroup]], ushort tiisg[[thread_index_in_simdgroup]], ushort3 ntg[[threads_per_threadgroup]]) { const short NSG = FC_count_equal_nsg; const int i3 = tgpig.z; const int i2 = tgpig.y; const int i1 = tgpig.x; if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { return; } int sum = 0; device const char * base0 = src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03; device const char * base1 = src1 + i1*args.nb11 + i2*args.nb12 + i3*args.nb13; for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) { const T v0 = *(device const T *)(base0 + i0*args.nb00); const T v1 = *(device const T *)(base1 + i0*args.nb10); sum += (v0 == v1); } sum = simd_sum(sum); if (tiisg == 0) { shmem_i32[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); if (sgitg == 0) { float v = 0.0f; if (tpitg.x < NSG) { v = shmem_i32[tpitg.x]; } float total = simd_sum(v); if (tpitg.x == 0) { atomic_fetch_add_explicit(dst, (int32_t) total, memory_order_relaxed); } } } typedef decltype(kernel_count_equal) kernel_count_equal_t; template [[host_name("kernel_count_equal_i32")]] kernel kernel_count_equal_t kernel_count_equal; ggml-org-ggml-3678254/src/ggml-musa/000077500000000000000000000000001512524704700170445ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-musa/CMakeLists.txt000066400000000000000000000103621512524704700216060ustar00rootroot00000000000000if (NOT EXISTS $ENV{MUSA_PATH}) if (NOT EXISTS /opt/musa) set(MUSA_PATH /usr/local/musa) else() set(MUSA_PATH /opt/musa) endif() else() set(MUSA_PATH $ENV{MUSA_PATH}) endif() set(CMAKE_C_COMPILER "${MUSA_PATH}/bin/clang") set(CMAKE_C_EXTENSIONS OFF) set(CMAKE_CXX_COMPILER "${MUSA_PATH}/bin/clang++") set(CMAKE_CXX_EXTENSIONS OFF) list(APPEND CMAKE_MODULE_PATH "${MUSA_PATH}/cmake") find_package(MUSAToolkit) if (MUSAToolkit_FOUND) message(STATUS "MUSA Toolkit found") if (NOT DEFINED MUSA_ARCHITECTURES) set(MUSA_ARCHITECTURES "21;22;31") endif() message(STATUS "Using MUSA architectures: ${MUSA_ARCHITECTURES}") file(GLOB GGML_HEADERS_MUSA "../ggml-cuda/*.cuh") list(APPEND GGML_HEADERS_MUSA "../../include/ggml-cuda.h") list(APPEND GGML_HEADERS_MUSA "../ggml-musa/mudnn.cuh") file(GLOB GGML_SOURCES_MUSA "../ggml-cuda/*.cu") file(GLOB SRCS "../ggml-cuda/template-instances/fattn-tile*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) if (GGML_MUSA_MUDNN_COPY) file(GLOB SRCS "../ggml-musa/*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) add_compile_definitions(GGML_MUSA_MUDNN_COPY) endif() if (GGML_CUDA_FA_ALL_QUANTS) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) else() file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*f16-f16.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) endif() set_source_files_properties(${GGML_SOURCES_MUSA} PROPERTIES LANGUAGE CXX) foreach(SOURCE ${GGML_SOURCES_MUSA}) set(COMPILE_FLAGS "-Od3 -fno-strict-aliasing -ffast-math -fsigned-char -x musa -mtgpu -fmusa-flush-denormals-to-zero") foreach(ARCH ${MUSA_ARCHITECTURES}) set(COMPILE_FLAGS "${COMPILE_FLAGS} --cuda-gpu-arch=mp_${ARCH}") endforeach() set_property(SOURCE ${SOURCE} PROPERTY COMPILE_FLAGS ${COMPILE_FLAGS}) endforeach() ggml_add_backend_library(ggml-musa ${GGML_HEADERS_MUSA} ${GGML_SOURCES_MUSA} ) # TODO: do not use CUDA definitions for MUSA if (NOT GGML_BACKEND_DL) target_compile_definitions(ggml PUBLIC GGML_USE_CUDA) endif() add_compile_definitions(GGML_USE_MUSA) add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE}) if (GGML_MUSA_GRAPHS) add_compile_definitions(GGML_MUSA_GRAPHS) endif() if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() if (GGML_CUDA_FORCE_CUBLAS) add_compile_definitions(GGML_CUDA_FORCE_CUBLAS) endif() if (GGML_CUDA_NO_VMM) add_compile_definitions(GGML_CUDA_NO_VMM) endif() if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() if (GGML_CUDA_NO_PEER_COPY) add_compile_definitions(GGML_CUDA_NO_PEER_COPY) endif() if (GGML_STATIC) target_link_libraries(ggml-musa PRIVATE MUSA::musart_static MUSA::mublas_static) # TODO: mudnn has not provided static libraries yet # if (GGML_MUSA_MUDNN_COPY) # target_link_libraries(ggml-musa PRIVATE mudnn_static) # endif() else() target_link_libraries(ggml-musa PRIVATE MUSA::musart MUSA::mublas) if (GGML_MUSA_MUDNN_COPY) target_link_libraries(ggml-musa PRIVATE mudnn) endif() endif() if (GGML_CUDA_NO_VMM) # No VMM requested, no need to link directly with the musa driver lib (libmusa.so) else() target_link_libraries(ggml-musa PRIVATE MUSA::musa_driver) endif() else() message(FATAL_ERROR "MUSA Toolkit not found") endif() ggml-org-ggml-3678254/src/ggml-musa/mudnn.cu000066400000000000000000000073221512524704700205220ustar00rootroot00000000000000#include #include #include "mudnn.cuh" namespace mudnn = musa::dnn; // Returns a human-readable error string for mudnn::Status const char* mudnnGetErrorString(mudnn::Status err) { switch (err) { case mudnn::Status::SUCCESS: return "Success"; case mudnn::Status::INVALID_PARAMETER: return "Invalid parameter"; case mudnn::Status::NOT_INITIALIZED: return "Not initialized"; case mudnn::Status::ALLOC_FAILED: return "Allocation failed"; case mudnn::Status::NOT_SUPPORTED: return "Not supported"; case mudnn::Status::INTERNAL_ERROR: return "Internal error"; case mudnn::Status::ARCH_MISMATCH: return "Architecture mismatch"; case mudnn::Status::EXECUTION_FAILED: return "Execution failed"; default: return "Unknown mudnn status"; } } // Error checking macro for MUDNN calls #define MUDNN_CHECK(err) CUDA_CHECK_GEN(err, mudnn::Status::SUCCESS, mudnnGetErrorString) namespace { // Thread-safe cache for mudnn::Handle objects per device std::unordered_map> handle_cache; std::mutex handle_cache_mutex; mudnn::Handle* get_cached_handle(int device_id) { std::lock_guard lock(handle_cache_mutex); auto it = handle_cache.find(device_id); if (it != handle_cache.end()) { return it->second.get(); } auto handle = std::make_unique(device_id); mudnn::Handle* handle_ptr = handle.get(); handle_cache[device_id] = std::move(handle); return handle_ptr; } } // Extracts dimensions and strides from a ggml_tensor int get_ggml_dims_and_strides(const ggml_tensor* tensor, std::vector& dims, std::vector& strides) { const int ndims = ggml_n_dims(tensor); const size_t element_size = ggml_element_size(tensor); dims.resize(ndims); strides.resize(ndims); for (int i = 0; i < ndims; ++i) { dims[i] = tensor->ne[i]; strides[i] = tensor->nb[i] / static_cast(element_size); } return ndims; } // Converts ggml_type to mudnn::Tensor::Type mudnn::Tensor::Type ggml_type_to_mudnn_type(ggml_type type) { switch (type) { case GGML_TYPE_F32: return mudnn::Tensor::Type::FLOAT; case GGML_TYPE_F16: return mudnn::Tensor::Type::HALF; // TODO: Add support for other types default: MUDNN_CHECK(mudnn::Status::NOT_SUPPORTED); } return mudnn::Tensor::Type::FLOAT; // Default fallback } // Asynchronous memory copy using mudnn::Unary::IDENTITY musaError_t mudnnMemcpyAsync(ggml_backend_cuda_context& ctx, const ggml_tensor* dst, const ggml_tensor* src) { mudnn::Tensor tensor_dst, tensor_src; MUDNN_CHECK(tensor_dst.SetType(ggml_type_to_mudnn_type(dst->type))); MUDNN_CHECK(tensor_src.SetType(ggml_type_to_mudnn_type(src->type))); std::vector dims, strides; const int ndims = get_ggml_dims_and_strides(src, dims, strides); MUDNN_CHECK(tensor_dst.SetNdInfo(ndims, dims.data(), strides.data())); MUDNN_CHECK(tensor_src.SetNdInfo(ndims, dims.data(), strides.data())); MUDNN_CHECK(tensor_dst.SetAddr(dst->data)); MUDNN_CHECK(tensor_src.SetAddr(src->data)); mudnn::Unary op; MUDNN_CHECK(op.SetMode(mudnn::Unary::Mode::IDENTITY)); MUDNN_CHECK(op.SetAlpha(0.0f)); MUDNN_CHECK(op.SetBeta(0.0f)); mudnn::Handle* handle = get_cached_handle(ctx.device); MUDNN_CHECK(handle->SetStream(ctx.stream())); MUDNN_CHECK(op.Run(*handle, tensor_dst, tensor_src)); return musaSuccess; } ggml-org-ggml-3678254/src/ggml-musa/mudnn.cuh000066400000000000000000000005151512524704700206670ustar00rootroot00000000000000#pragma once #include "ggml-cuda/common.cuh" #include "ggml.h" // Asynchronously copies data from src tensor to dst tensor using the provided context. // Returns a musaError_t indicating success or failure. musaError_t mudnnMemcpyAsync( ggml_backend_cuda_context &ctx, const ggml_tensor *dst, const ggml_tensor *src ); ggml-org-ggml-3678254/src/ggml-opencl/000077500000000000000000000000001512524704700173575ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-opencl/CMakeLists.txt000066400000000000000000000064751512524704700221330ustar00rootroot00000000000000find_package(OpenCL REQUIRED) find_package(Python3 REQUIRED) set(TARGET_NAME ggml-opencl) ggml_add_backend_library(${TARGET_NAME} ggml-opencl.cpp ../../include/ggml-opencl.h) target_link_libraries(${TARGET_NAME} PRIVATE ${OpenCL_LIBRARIES}) target_include_directories(${TARGET_NAME} PRIVATE ${OpenCL_INCLUDE_DIRS}) if (GGML_OPENCL_PROFILING) message(STATUS "OpenCL profiling enabled (increases CPU overhead)") add_compile_definitions(GGML_OPENCL_PROFILING) endif () add_compile_definitions(GGML_OPENCL_SOA_Q) add_compile_definitions(GGML_OPENCL_TARGET_VERSION=${GGML_OPENCL_TARGET_VERSION}) if (GGML_OPENCL_USE_ADRENO_KERNELS) message(STATUS "OpenCL will use matmul kernels optimized for Adreno") add_compile_definitions(GGML_OPENCL_USE_ADRENO_KERNELS) endif () if (GGML_OPENCL_EMBED_KERNELS) add_compile_definitions(GGML_OPENCL_EMBED_KERNELS) set(EMBED_KERNEL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/kernels/embed_kernel.py") file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/autogenerated") target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/autogenerated") endif () function(ggml_opencl_add_kernel KNAME) set(KERN_HDR ${CMAKE_CURRENT_BINARY_DIR}/autogenerated/${KNAME}.cl.h) set(KERN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/kernels/${KNAME}.cl) if (GGML_OPENCL_EMBED_KERNELS) message(STATUS "opencl: embedding kernel ${KNAME}") # Python must be accessible from command line add_custom_command( OUTPUT ${KERN_HDR} COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} ${KERN_SRC} ${KERN_HDR} DEPENDS ${KERN_SRC} ${EMBED_KERNEL_SCRIPT} COMMENT "Generate ${KERN_HDR}" ) target_sources(${TARGET_NAME} PRIVATE ${KERN_HDR}) else () message(STATUS "opencl: adding kernel ${KNAME}") configure_file(${KERN_SRC} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${KNAME}.cl COPYONLY) endif () endfunction() set(GGML_OPENCL_KERNELS add add_id argsort clamp cpy cvt diag_mask_inf div gelu gemv_noshuffle_general gemv_noshuffle get_rows glu group_norm im2col_f32 im2col_f16 mean mul_mat_Ab_Bi_8x4 mul_mv_f16_f16 mul_mv_f16_f32_1row mul_mv_f16_f32_l4 mul_mv_f16_f32 mul_mv_f32_f32 mul_mv_q4_0_f32 mul_mv_q4_0_f32_v mul_mv_q4_0_f32_8x_flat mul_mv_q4_0_f32_1d_8x_flat mul_mv_q4_0_f32_1d_16x_flat mul_mv_q6_k mul_mv_q8_0_f32 mul_mv_q8_0_f32_flat mul_mv_mxfp4_f32 mul_mv_mxfp4_f32_flat mul_mv_id_q4_0_f32_8x_flat mul_mv_id_q8_0_f32 mul_mv_id_q8_0_f32_flat mul_mv_id_mxfp4_f32 mul_mv_id_mxfp4_f32_flat gemm_moe_mxfp4_f32 gemv_moe_mxfp4_f32 mul_mm_f32_f32_l4_lm mul_mm_f16_f32_l4_lm mul_mm_q8_0_f32_l4_lm mul norm relu rms_norm rope scale set_rows sigmoid silu softmax_4_f32 softmax_4_f16 softmax_f32 softmax_f16 sqr sqrt ssm_conv sub sum_rows transpose concat tsembd upscale tanh pad repeat mul_mat_f16_f32 mul_mm_f16_f32_kq_kqv conv2d conv2d_f16_f32 flash_attn_f32_f16 flash_attn_f16 flash_attn_f32 ) foreach (K ${GGML_OPENCL_KERNELS}) ggml_opencl_add_kernel(${K}) endforeach() ggml-org-ggml-3678254/src/ggml-opencl/ggml-opencl.cpp000066400000000000000000014667121512524704700223100ustar00rootroot00000000000000#define CL_TARGET_OPENCL_VERSION GGML_OPENCL_TARGET_VERSION #define CL_USE_DEPRECATED_OPENCL_1_2_APIS // suppress warnings in CL headers for GCC and Clang #pragma GCC diagnostic ignored "-Woverlength-strings" #ifdef __clang__ #pragma GCC diagnostic ignored "-Wgnu-anonymous-struct" #endif #include "ggml-opencl.h" #include "ggml-backend.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #undef MIN #undef MAX #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define CEIL_DIV(M, N) (((M) + (N)-1) / (N)) #define UNUSED(x) (void)(x) #define CL_CHECK(err) \ do { \ cl_int err_ = (err); \ if (err_ != CL_SUCCESS) { \ GGML_LOG_ERROR("ggml_opencl: %s error %d at %s:%d\n", \ #err, err_, __FILE__, __LINE__); \ GGML_ASSERT(0); \ } \ } while (0) //------------------------------------------------------------------------------ // OpenCL //------------------------------------------------------------------------------ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor); // See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. // Precompute mp (m' in the paper) and L such that division // can be computed using a multiply (high 32b of 64b result) // and a shift: // // n/d = (mulhi(n, mp) + n) >> L; struct fastdiv_vals { uint32_t mp; uint32_t L; uint32_t d; uint32_t pad; }; static_assert(sizeof(fastdiv_vals) == 16, "fastdiv_vals size incorrect"); static fastdiv_vals init_fastdiv_values(uint64_t d_64) { GGML_ASSERT(d_64 != 0); GGML_ASSERT(d_64 <= std::numeric_limits::max()); uint32_t d = (uint32_t)d_64; // compute L = ceil(log2(d)); uint32_t L = 0; while (L < 32 && (uint32_t{ 1 } << L) < d) { L++; } uint32_t mp = (uint32_t) ((uint64_t{ 1 } << 32) * ((uint64_t{ 1 } << L) - d) / d + 1); // pack divisor as well to reduce error surface return { mp, L, d, 0 }; } enum GPU_FAMILY { ADRENO, INTEL, UNKNOWN, }; enum ADRENO_GPU_GEN { ADRENO_UNKNOWN, A7X, A8X, X1E, }; enum ADRENO_CL_COMPILER_TYPE { E031, DX, }; struct ggml_cl_version { cl_uint major = 0; cl_uint minor = 0; }; struct ggml_cl_compiler_version { ADRENO_CL_COMPILER_TYPE type; int major = -1; int minor = -1; int patch = -1; bool same(ADRENO_CL_COMPILER_TYPE t, int x, int y, int z) const { return major == x && minor == y && patch == z && type == t; } bool newer_than(ADRENO_CL_COMPILER_TYPE t, int x, int y, int z) const { return major*10000 + minor*100 + patch > x*10000 + y*100 + z && type == t; } bool newer_than_or_same(ADRENO_CL_COMPILER_TYPE t, int x, int y, int z) const { return same(t, x, y, z) || newer_than(t, x, y, z); } }; static size_t align_to(size_t value, size_t to_alignment) { GGML_ASSERT(to_alignment && "Invalid alignment (must be non-zero)"); GGML_ASSERT((to_alignment & (to_alignment - 1)) == 0 && "to_alignment must be power-of-two"); return ((value + to_alignment - 1) / to_alignment) * to_alignment; } // Parses a version string of form "XX.YY ". On an error returns ggml_cl_version with all zeroes. static ggml_cl_version parse_cl_version(std::string_view str) { size_t major_str_begin = 0; size_t major_str_end = str.find(".", major_str_begin); if (major_str_end == std::string::npos) { return {}; } size_t minor_str_begin = major_str_end + 1; size_t minor_str_end = str.find(" ", minor_str_begin); if (minor_str_end == std::string::npos) { return {}; } cl_uint version_major; if (std::from_chars(str.data() + major_str_begin, str.data() + major_str_end, version_major).ec != std::errc{}) { return {}; } cl_uint version_minor; if (std::from_chars(str.data() + minor_str_begin, str.data() + minor_str_end, version_minor).ec != std::errc{}) { return {}; } return { version_major, version_minor }; } // Returns OpenCL platform's version. On an error returns ggml_cl_version with all zeroes. static ggml_cl_version get_opencl_platform_version(cl_platform_id platform) { size_t param_size; CL_CHECK(clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, nullptr, ¶m_size)); std::unique_ptr param_storage(new char[param_size]); CL_CHECK(clGetPlatformInfo(platform, CL_PLATFORM_VERSION, param_size, param_storage.get(), nullptr)); auto param_value = std::string_view(param_storage.get(), param_size); const std::string version_prefix = "OpenCL "; // Suffix: "XX.YY " if (param_value.find(version_prefix) != 0) { return {}; } param_value.remove_prefix(version_prefix.length()); return parse_cl_version(param_value); } // Return a version to use in OpenCL C compilation. On an error returns ggml_cl_version with all zeroes. static ggml_cl_version get_opencl_c_version(ggml_cl_version platform_version, cl_device_id device) { size_t param_size; #if CL_TARGET_OPENCL_VERSION >= 300 if (platform_version.major >= 3) { CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_ALL_VERSIONS, 0, nullptr, ¶m_size)); if (!param_size) { return {}; } std::unique_ptr versions(new cl_name_version[param_size]); CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_ALL_VERSIONS, param_size, versions.get(), nullptr)); unsigned versions_count = param_size / sizeof(cl_name_version); cl_version version_max = 0; for (unsigned i = 0; i < versions_count; i++) { version_max = std::max(versions[i].version, version_max); } return { CL_VERSION_MAJOR(version_max), CL_VERSION_MINOR(version_max) }; } #else GGML_UNUSED(platform_version); #endif // CL_TARGET_OPENCL_VERSION >= 300 CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, 0, nullptr, ¶m_size)); if (!param_size) { return {}; } std::unique_ptr param_storage(new char[param_size]); CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_OPENCL_C_VERSION, param_size, param_storage.get(), nullptr)); auto param_value = std::string_view(param_storage.get(), param_size); const std::string version_prefix = "OpenCL C "; // Suffix: "XX.YY " if (param_value.find(version_prefix) != 0) { return {}; } param_value.remove_prefix(version_prefix.length()); return parse_cl_version(param_value); } static ADRENO_GPU_GEN get_adreno_gpu_gen(const char *device_name) { if (strstr(device_name, "730") || strstr(device_name, "740") || strstr(device_name, "750")) { return ADRENO_GPU_GEN::A7X; } if (strstr(device_name, "830")) { return ADRENO_GPU_GEN::A8X; } if (strstr(device_name, "X1")) { return ADRENO_GPU_GEN::X1E; } return ADRENO_GPU_GEN::ADRENO_UNKNOWN; } static ggml_cl_compiler_version get_adreno_cl_compiler_version(const char *driver_version) { std::string driver_ver_str(driver_version); ADRENO_CL_COMPILER_TYPE type = ADRENO_CL_COMPILER_TYPE::E031; size_t compiler_ver_pos = driver_ver_str.find("E031"); size_t compiler_ver_len = 13; size_t compiler_major_offset = 5; size_t compiler_minor_offset = 8; size_t compiler_patch_offset = 11; if (compiler_ver_pos == std::string::npos) { compiler_ver_pos = driver_ver_str.find("DX"); if (compiler_ver_pos == std::string::npos) { return {}; } type = ADRENO_CL_COMPILER_TYPE::DX; compiler_ver_len = 11; compiler_major_offset = 3; } std::string compiler_ver_str = driver_ver_str.substr(compiler_ver_pos, compiler_ver_len); int major = std::atoi(compiler_ver_str.substr(compiler_major_offset, 2).c_str()); int minor = std::atoi(compiler_ver_str.substr(compiler_minor_offset, 2).c_str()); int patch = std::atoi(compiler_ver_str.substr(compiler_patch_offset, 2).c_str()); return { type, major, minor, patch }; } // cl buffer wrapper struct ggml_cl_buffer { cl_mem buffer; size_t size; ggml_cl_buffer() : buffer(nullptr), size(0) {} ~ggml_cl_buffer() { if (buffer) { CL_CHECK(clReleaseMemObject(buffer)); } } void allocate(cl_context context, size_t new_size) { if (new_size > size) { size = new_size; if (buffer) { CL_CHECK(clReleaseMemObject(buffer)); } cl_int err; CL_CHECK((buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err), err)); } } }; // Profiling struct ProfilingInfo { std::string op_name; std::string kernel_name; cl_kernel kernel; cl_event evt; cl_ulong cmd_queued; cl_ulong cmd_submit; cl_ulong cmd_start; cl_ulong cmd_end; cl_ulong overhead_start; cl_ulong overhead_end; // For the times below, see spec for clGetEventProfilingInfo // The time kernel spent in cmd queue - SUBMIT - QUEUED cl_ulong cmd_queued_duration_ns; // The time kernel spent for submission - START - SUBMIT cl_ulong cmd_submit_duration_ns; // Kernel execution time in nanoseconds - END - START cl_ulong cmd_duration_ns; // The time for the kernel to complete - COMPLETE - END cl_ulong cmd_complete_duration_ns; // Total time to finish the kernel - COMPELTE - QUEUED cl_ulong cmd_total_duration_ns; // Global and local work sizes. size_t global_size[3]; size_t local_size[3]; // Op output size. size_t output_size[4]; }; static void populateProfilingInfo( ProfilingInfo& info, cl_event evt, cl_kernel kernel, cl_uint work_dim, size_t global_size[3], size_t local_size[3], const ggml_tensor * tensor) { info.op_name = tensor->name; info.kernel = kernel; info.evt = evt; // 0 means not specified, e.g., 2D workgroup, or NULL for driver to choose info.local_size[0] = 0; info.local_size[1] = 0; info.local_size[2] = 0; info.global_size[0] = 0; info.global_size[1] = 0; info.global_size[2] = 0; if (local_size) { for (cl_uint i = 0; i < work_dim; ++i) { info.local_size[i] = local_size[i]; } } for (cl_uint i = 0; i < work_dim; ++i) { info.global_size[i] = global_size[i]; } info.output_size[0] = tensor->ne[0]; info.output_size[1] = tensor->ne[1]; info.output_size[2] = tensor->ne[2]; info.output_size[3] = tensor->ne[3]; } struct ggml_backend_opencl_context; // backend device context struct ggml_backend_opencl_device_context { cl_platform_id platform; std::string platform_name; cl_device_id device; std::string device_name; cl_device_type device_type; std::string device_version; // Initialized by ggml_cl2_init(). ggml_backend_opencl_context * backend_ctx = nullptr; // Initialized by ggml_backend_opencl_device_get_buffer_type() ggml_backend_buffer_type buffer_type; cl_context context = nullptr; }; // backend context struct ggml_backend_opencl_context { int ref_count; cl_device_id device; std::string device_name; std::string driver_version; GPU_FAMILY gpu_family; ADRENO_GPU_GEN adreno_gen; cl_int alignment; size_t max_alloc_size; size_t max_workgroup_size; bool fp16_support; bool has_vector_subgroup_broadcast; bool disable_fusion; ggml_cl_compiler_version adreno_cl_compiler_version; int adreno_wave_size; cl_bool non_uniform_workgroups; cl_context context; cl_command_queue queue; // prealloc buffers for transposing weights and activations ggml_cl_buffer prealloc_quant_trans; ggml_cl_buffer prealloc_scales_trans; ggml_cl_buffer prealloc_act_trans; cl_program program_add; cl_program program_add_id; cl_program program_clamp; cl_program program_cpy; cl_program program_cvt; cl_program program_diag_mask_inf; cl_program program_gelu; cl_program program_gemv_noshuffle_general; cl_program program_gemv_noshuffle; cl_program program_get_rows; cl_program program_set_rows; cl_program program_glu; cl_program program_im2col_f16; cl_program program_im2col_f32; cl_program program_mul_mat_Ab_Bi_8x4; cl_program program_mul_mv_q4_0_f32; cl_program program_mul_mv_q4_0_f32_v; cl_program program_mul_mv_q4_0_f32_8x_flat; cl_program program_mul_mv_q4_0_f32_1d_8x_flat; cl_program program_mul_mv_q4_0_f32_1d_16x_flat; cl_program program_mul_mv_q6_K; cl_program program_mul_mv_q8_0_f32, program_mul_mv_q8_0_f32_flat; cl_program program_mul_mv_mxfp4_f32; cl_program program_mul_mv_mxfp4_f32_flat; cl_program program_mul_mv_f16_f16; cl_program program_mul_mv_f16_f32_1row; cl_program program_mul_mv_f16_f32_l4; cl_program program_mul_mv_f16_f32; cl_program program_mul_mv_f32_f32; cl_program program_mul; cl_program program_mul_mat_f16_f32_tiled; cl_program program_mul_mm_f16_f32_kqv; cl_program program_mul_mm_f16_f32_kq; cl_program program_div; cl_program program_sub; cl_program program_norm; cl_program program_relu; cl_program program_rms_norm; cl_program program_group_norm; cl_program program_rope; cl_program program_scale; cl_program program_silu; cl_program program_sigmoid; cl_program program_softmax_f32; cl_program program_softmax_f16; cl_program program_softmax_4_f32; cl_program program_softmax_4_f16; cl_program program_argsort_f32_i32; cl_program program_sum_rows_f32; cl_program program_repeat; cl_program program_pad; cl_program program_tanh; cl_program program_upscale; cl_program program_concat; cl_program program_conv_2d_f16; cl_program program_conv_2d_f32; cl_program program_conv_2d_f16_f32; cl_program program_tsembd; cl_program program_gemv_moe_mxfp4_f32, program_gemm_moe_mxfp4_f32; cl_program program_mul_mv_id_q4_0_f32_8x_flat; cl_program program_mul_mv_id_q8_0_f32, program_mul_mv_id_q8_0_f32_flat; cl_program program_mul_mv_id_mxfp4_f32; cl_program program_mul_mv_id_mxfp4_f32_flat; cl_program program_mul_mm_f32_f32_l4_lm; cl_program program_mul_mm_f16_f32_l4_lm; cl_program program_mul_mm_q8_0_f32_l4_lm; cl_kernel kernel_add, kernel_add_row, kernel_add_f16, kernel_add_row_f16; cl_kernel kernel_mul, kernel_mul_row, kernel_mul_f16, kernel_mul_row_f16; cl_kernel kernel_div, kernel_div_row, kernel_div_f16, kernel_div_row_f16; cl_kernel kernel_sub, kernel_sub_row, kernel_sub_f16, kernel_sub_row_f16; cl_kernel kernel_add_id; cl_kernel kernel_scale; cl_kernel kernel_sqr_cont_f32, kernel_sqr_cont_f32_4, kernel_sqr_cont_f16, kernel_sqr_cont_f16_4; cl_kernel kernel_sqrt_cont_f32, kernel_sqrt_cont_f32_4, kernel_sqrt_cont_f16, kernel_sqrt_cont_f16_4; cl_kernel kernel_mean_f32; cl_kernel kernel_silu, kernel_silu_4; cl_kernel kernel_gelu, kernel_gelu_4; cl_kernel kernel_gelu_erf, kernel_gelu_erf_4; cl_kernel kernel_gelu_quick, kernel_gelu_quick_4; cl_kernel kernel_relu; cl_kernel kernel_sigmoid_f32, kernel_sigmoid_f16; cl_kernel kernel_clamp; cl_kernel kernel_geglu, kernel_reglu, kernel_swiglu, kernel_swiglu_oai, kernel_geglu_erf, kernel_geglu_quick, kernel_geglu_f16, kernel_reglu_f16, kernel_swiglu_f16, kernel_geglu_erf_f16, kernel_geglu_quick_f16; cl_kernel kernel_norm, kernel_norm_mul_add; cl_kernel kernel_rms_norm, kernel_rms_norm_mul; cl_kernel kernel_group_norm, kernel_group_norm_mul_add; cl_kernel kernel_diag_mask_inf, kernel_diag_mask_inf_8; cl_kernel kernel_soft_max, kernel_soft_max_4; cl_kernel kernel_soft_max_f16, kernel_soft_max_4_f16; std::map, cl_kernel> kernels_flash_attn_f16; std::map, cl_kernel> kernels_flash_attn_f16_q1; std::map, cl_kernel> kernels_flash_attn_f32; std::map, cl_kernel> kernels_flash_attn_f32_q1; std::map, cl_kernel> kernels_flash_attn_f32_f16; std::map, cl_kernel> kernels_flash_attn_f32_f16_q1; std::map, int> kernels_flash_attn_bm; std::map, int> kernels_flash_attn_bn; cl_kernel kernel_get_rows_f32, kernel_get_rows_f16, kernel_get_rows_q4_0; cl_kernel kernel_set_rows_f32_i64, kernel_set_rows_f32_i32, kernel_set_rows_f16_i64, kernel_set_rows_f16_i32; cl_kernel kernel_rope_norm_f32, kernel_rope_norm_f16, kernel_rope_neox_f32, kernel_rope_neox_f16; cl_kernel kernel_rope_multi_f32, kernel_rope_multi_f16, kernel_rope_vision_f32, kernel_rope_vision_f16; cl_kernel kernel_cpy_f16_f16, kernel_cpy_f16_f32, kernel_cpy_f32_f16, kernel_cpy_f32_f32; cl_kernel kernel_mul_mat_f32_f32; cl_kernel kernel_mul_mat_f16_f16; cl_kernel kernel_mul_mat_f16_f32_1row; cl_kernel kernel_mul_mat_f16_f32; cl_kernel kernel_mul_mat_f16_f32_l4; cl_kernel kernel_mul_mat_f16_f32_tiled; cl_kernel kernel_mul_mm_f16_f32_kqv; cl_kernel kernel_mul_mm_f16_f32_kq; cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v; cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0; cl_kernel kernel_convert_block_mxfp4, kernel_convert_block_mxfp4_trans, kernel_restore_block_mxfp4, kernel_restore_block_mxfp4_trans; cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0; cl_kernel kernel_mul_mat_q4_0_f32_8x_flat; cl_kernel kernel_convert_block_q4_0_noshuffle; cl_kernel kernel_restore_block_q4_0_noshuffle; cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat; cl_kernel kernel_mul_mv_q6_K_f32; cl_kernel kernel_mul_mv_mxfp4_f32, kernel_mul_mv_mxfp4_f32_flat; cl_kernel kernel_mul_mv_q8_0_f32, kernel_mul_mv_q8_0_f32_flat; cl_kernel kernel_im2col_f32, kernel_im2col_f16; cl_kernel kernel_argsort_f32_i32; cl_kernel kernel_sum_rows_f32; cl_kernel kernel_repeat; cl_kernel kernel_pad; cl_kernel kernel_tanh_f32_nd; cl_kernel kernel_tanh_f16_nd; cl_kernel kernel_upscale; cl_kernel kernel_upscale_bilinear; cl_kernel kernel_concat_f32_contiguous; cl_kernel kernel_concat_f32_non_contiguous; cl_kernel kernel_conv_2d_f16; cl_kernel kernel_conv_2d_f32; cl_kernel kernel_conv_2d_f16_f32; cl_kernel kernel_ssm_conv_f32_f32, kernel_ssm_conv_f32_f32_4; cl_kernel kernel_timestep_embedding; cl_kernel kernel_gemv_moe_mxfp4_f32, kernel_gemm_moe_mxfp4_f32; cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat; cl_kernel kernel_mul_mv_id_q8_0_f32, kernel_mul_mv_id_q8_0_f32_flat; cl_kernel kernel_mul_mv_id_mxfp4_f32; cl_kernel kernel_mul_mv_id_mxfp4_f32_flat; cl_kernel kernel_mul_mm_f32_f32_l4_lm; cl_kernel kernel_mul_mm_f16_f32_l4_lm; cl_kernel kernel_mul_mm_q8_0_f32_l4_lm; std::vector profiling_info; void write_profiling_info() { FILE * fperf = fopen("cl_profiling.csv", "w"); if (!fperf) { GGML_LOG_ERROR("Failed to open cl_profiling.csv\n"); return; } // Populate profiling info for (ProfilingInfo & info : profiling_info) { cl_ulong cmd_queued; cl_ulong cmd_submit; cl_ulong cmd_start; cl_ulong cmd_end; cl_ulong cmd_complete; CL_CHECK(clWaitForEvents(1, &info.evt)); CL_CHECK(clGetEventProfilingInfo( info.evt, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &cmd_queued, NULL)); CL_CHECK(clGetEventProfilingInfo( info.evt, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &cmd_submit, NULL)); CL_CHECK(clGetEventProfilingInfo( info.evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &cmd_start, NULL)); CL_CHECK(clGetEventProfilingInfo( info.evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &cmd_end, NULL)); CL_CHECK(clGetEventProfilingInfo( info.evt, CL_PROFILING_COMMAND_COMPLETE, sizeof(cl_ulong), &cmd_complete, NULL)); CL_CHECK(clReleaseEvent(info.evt)); char kernel_name[512]; CL_CHECK(clGetKernelInfo(info.kernel, CL_KERNEL_FUNCTION_NAME, sizeof(kernel_name), kernel_name, NULL)); info.kernel_name = kernel_name; info.cmd_queued = cmd_queued; info.cmd_submit = cmd_submit; info.cmd_start = cmd_start; info.cmd_end = cmd_end; info.cmd_queued_duration_ns = cmd_submit - cmd_queued; info.cmd_submit_duration_ns = cmd_start - cmd_submit; info.cmd_duration_ns = cmd_end - cmd_start; info.cmd_complete_duration_ns = cmd_complete - cmd_end; info.cmd_total_duration_ns = cmd_complete - cmd_queued; } // Dump a csv fprintf(fperf, "op name, kernel name, exec duration (ms), global size, local size, output size\n"); for (const ProfilingInfo & info : profiling_info) { fprintf(fperf, "%s,%s,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", info.op_name.c_str(), info.kernel_name.c_str(), info.cmd_duration_ns/1.e6f, info.global_size[0], info.global_size[1], info.global_size[2], info.local_size[0], info.local_size[1], info.local_size[2], info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); } fclose(fperf); // Dump a simple chrome trace FILE* ftrace = fopen("cl_trace.json", "w"); if (!ftrace) { GGML_LOG_ERROR("Failed to open cl_trace.json\n"); return; } fprintf(ftrace, "[\n"); for (const ProfilingInfo & info : profiling_info) { fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Host\"},\n", info.kernel_name.c_str(), info.cmd_queued/1000); fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Host\"},\n", info.kernel_name.c_str(), info.cmd_submit/1000); fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Device\"},\n", info.kernel_name.c_str(), info.cmd_start/1000); fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Device\"},\n", info.kernel_name.c_str(), info.cmd_end/1000); } fclose(ftrace); } size_t get_kernel_workgroup_size(cl_kernel kernel) const { size_t workgroup_size = 0; size_t ret_size = 0; CL_CHECK( clGetKernelWorkGroupInfo(kernel, device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &workgroup_size, &ret_size)); GGML_ASSERT(sizeof(size_t) == ret_size); return workgroup_size; } void enqueue_ndrange_kernel(cl_kernel kernel, cl_uint work_dim, size_t *global_work_size, size_t *local_work_size, const ggml_tensor * tensor) { #ifdef GGML_OPENCL_PROFILING cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, &evt)); profiling_info.emplace_back(); populateProfilingInfo(profiling_info.back(), evt, kernel, work_dim, global_work_size, local_work_size, tensor); #else GGML_UNUSED(tensor); CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, NULL)); #endif } #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // Transpose kernels cl_program program_transpose; cl_kernel kernel_transpose_32; cl_kernel kernel_transpose_32_16; cl_kernel kernel_transpose_16; cl_kernel kernel_transpose_16_buf; cl_kernel kernel_transpose_16_4x1; // Gemm and Gemv related programs, kernels, etc cl_program program_CL_gemm; cl_program program_CL_gemv_general; cl_program program_CL_gemv_4096_1_11008; cl_program program_CL_gemv_4096_1_4096; cl_program program_CL_gemv_11008_1_4096; cl_program program_CL_gemv_32000_1_4096; cl_kernel CL_mul_mat_Ab_Bi_8x4; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096; #endif // GGML_OPENCL_USE_ADRENO_KERNELS void free() { ref_count--; if (ref_count == 0) { #ifdef GGML_OPENCL_PROFILING write_profiling_info(); profiling_info.clear(); #endif } } }; // All registered devices with a default device in the front. static std::vector g_ggml_backend_opencl_devices; inline std::string read_file(const std::string &path) { std::ifstream ifs(path); if (!ifs) { return ""; } std::string text; ifs.seekg(0, std::ios::end); text.resize(ifs.tellg()); ifs.seekg(0, std::ios::beg); ifs.read(&text[0], text.size()); return text; } static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer, const std::string &compile_opts) { cl_program p; char *program_log; size_t program_size; size_t log_size; int err; program_size = strlen(program_buffer); p = clCreateProgramWithSource(ctx, 1, (const char**)&program_buffer, &program_size, &err); if(err < 0) { GGML_LOG_ERROR("OpenCL error creating program"); exit(1); } err = clBuildProgram(p, 0, NULL, compile_opts.c_str(), NULL, NULL); if(err < 0) { clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size); program_log = (char*) malloc(log_size + 1); program_log[log_size] = '\0'; clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL); GGML_LOG_ERROR("ggml_opencl: kernel compile error:\n\n%s\n", program_log); free(program_log); exit(1); } return p; } static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_version opencl_c_version) { cl_int err; // compiler options for general kernels auto opencl_c_std = std::string("CL") + std::to_string(opencl_c_version.major) + "." + std::to_string(opencl_c_version.minor); std::string compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable -cl-unsafe-math-optimizations" " -cl-finite-math-only -cl-fast-relaxed-math"; GGML_LOG_INFO("ggml_opencl: loading OpenCL kernels"); // add { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "add.cl.h" }; #else const std::string kernel_src = read_file("add.cl"); #endif backend_ctx->program_add = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_add = clCreateKernel(backend_ctx->program_add, "kernel_add", &err), err)); CL_CHECK((backend_ctx->kernel_add_row = clCreateKernel(backend_ctx->program_add, "kernel_add_row", &err), err)); CL_CHECK((backend_ctx->kernel_add_f16 = clCreateKernel(backend_ctx->program_add, "kernel_add_f16", &err), err)); CL_CHECK((backend_ctx->kernel_add_row_f16 = clCreateKernel(backend_ctx->program_add, "kernel_add_row_f16", &err), err)); GGML_LOG_CONT("."); } // add_id { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "add_id.cl.h" }; #else const std::string kernel_src = read_file("add_id.cl"); #endif backend_ctx->program_add_id = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_add_id = clCreateKernel(backend_ctx->program_add_id, "kernel_add_id", &err), err)); GGML_LOG_CONT("."); } // clamp { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "clamp.cl.h" }; #else const std::string kernel_src = read_file("clamp.cl"); #endif backend_ctx->program_clamp = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_clamp = clCreateKernel(backend_ctx->program_clamp, "kernel_clamp", &err), err)); GGML_LOG_CONT("."); } // cpy { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "cpy.cl.h" }; #else const std::string kernel_src = read_file("cpy.cl"); #endif backend_ctx->program_cpy = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_cpy_f16_f16 = clCreateKernel(backend_ctx->program_cpy, "kernel_cpy_f16_f16", &err), err)); CL_CHECK((backend_ctx->kernel_cpy_f16_f32 = clCreateKernel(backend_ctx->program_cpy, "kernel_cpy_f16_f32", &err), err)); CL_CHECK((backend_ctx->kernel_cpy_f32_f16 = clCreateKernel(backend_ctx->program_cpy, "kernel_cpy_f32_f16", &err), err)); CL_CHECK((backend_ctx->kernel_cpy_f32_f32 = clCreateKernel(backend_ctx->program_cpy, "kernel_cpy_f32_f32", &err), err)); GGML_LOG_CONT("."); } // cvt { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "cvt.cl.h" }; #else const std::string kernel_src = read_file("cvt.cl"); #endif backend_ctx->program_cvt = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0_noshuffle", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0_noshuffle", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4_trans", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4_trans", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q8_0", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q8_0", &err), err)); GGML_LOG_CONT("."); } // diag_mask_inf { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "diag_mask_inf.cl.h" }; #else const std::string kernel_src = read_file("diag_mask_inf.cl"); #endif backend_ctx->program_diag_mask_inf = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_diag_mask_inf_8 = clCreateKernel(backend_ctx->program_diag_mask_inf, "kernel_diag_mask_inf_8", &err), err)); CL_CHECK((backend_ctx->kernel_diag_mask_inf = clCreateKernel(backend_ctx->program_diag_mask_inf, "kernel_diag_mask_inf", &err), err)); GGML_LOG_CONT("."); } // gelu { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "gelu.cl.h" }; #else const std::string kernel_src = read_file("gelu.cl"); #endif backend_ctx->program_gelu = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_gelu = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu", &err), err)); CL_CHECK((backend_ctx->kernel_gelu_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_4", &err), err)); CL_CHECK((backend_ctx->kernel_gelu_erf = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_erf", &err), err)); CL_CHECK((backend_ctx->kernel_gelu_erf_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_erf_4", &err), err)); CL_CHECK((backend_ctx->kernel_gelu_quick = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_quick", &err), err)); CL_CHECK((backend_ctx->kernel_gelu_quick_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_quick_4", &err), err)); GGML_LOG_CONT("."); } // glu { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "glu.cl.h" }; #else const std::string kernel_src = read_file("glu.cl"); #endif backend_ctx->program_glu = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_geglu = clCreateKernel(backend_ctx->program_glu, "kernel_geglu", &err), err)); CL_CHECK((backend_ctx->kernel_reglu = clCreateKernel(backend_ctx->program_glu, "kernel_reglu", &err), err)); CL_CHECK((backend_ctx->kernel_swiglu = clCreateKernel(backend_ctx->program_glu, "kernel_swiglu", &err), err)); CL_CHECK((backend_ctx->kernel_swiglu_oai = clCreateKernel(backend_ctx->program_glu, "kernel_swiglu_oai", &err), err)); CL_CHECK((backend_ctx->kernel_geglu_erf = clCreateKernel(backend_ctx->program_glu, "kernel_geglu_erf", &err), err)); CL_CHECK((backend_ctx->kernel_geglu_quick = clCreateKernel(backend_ctx->program_glu, "kernel_geglu_quick", &err), err)); CL_CHECK((backend_ctx->kernel_geglu_f16 = clCreateKernel(backend_ctx->program_glu, "kernel_geglu_f16", &err), err)); CL_CHECK((backend_ctx->kernel_reglu_f16 = clCreateKernel(backend_ctx->program_glu, "kernel_reglu_f16", &err), err)); CL_CHECK((backend_ctx->kernel_swiglu_f16 = clCreateKernel(backend_ctx->program_glu, "kernel_swiglu_f16", &err), err)); CL_CHECK((backend_ctx->kernel_geglu_erf_f16 = clCreateKernel(backend_ctx->program_glu, "kernel_geglu_erf_f16", &err), err)); CL_CHECK((backend_ctx->kernel_geglu_quick_f16 = clCreateKernel(backend_ctx->program_glu, "kernel_geglu_quick_f16", &err), err)); GGML_LOG_CONT("."); } // get_rows { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "get_rows.cl.h" }; #else const std::string kernel_src = read_file("get_rows.cl"); #endif backend_ctx->program_get_rows = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_get_rows_f32 = clCreateKernel(backend_ctx->program_get_rows, "kernel_get_rows_f32", &err), err)); CL_CHECK((backend_ctx->kernel_get_rows_f16 = clCreateKernel(backend_ctx->program_get_rows, "kernel_get_rows_f16", &err), err)); CL_CHECK((backend_ctx->kernel_get_rows_q4_0 = clCreateKernel(backend_ctx->program_get_rows, "kernel_get_rows_q4_0", &err), err)); GGML_LOG_CONT("."); } // im2col_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "im2col_f32.cl.h" }; #else const std::string kernel_src = read_file("im2col_f32.cl"); #endif backend_ctx->program_im2col_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_im2col_f32 = clCreateKernel(backend_ctx->program_im2col_f32, "kernel_im2col_f32", &err), err)); GGML_LOG_CONT("."); } // im2col_f16 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "im2col_f16.cl.h" }; #else const std::string kernel_src = read_file("im2col_f16.cl"); #endif backend_ctx->program_im2col_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_im2col_f16 = clCreateKernel(backend_ctx->program_im2col_f16, "kernel_im2col_f16", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q4_0_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q4_0_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q4_0_f32.cl"); #endif backend_ctx->program_mul_mv_q4_0_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_q4_0_f32, "kernel_mul_mat_q4_0_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q4_0_f32_v { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q4_0_f32_v.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q4_0_f32_v.cl"); #endif backend_ctx->program_mul_mv_q4_0_f32_v = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_v = clCreateKernel(backend_ctx->program_mul_mv_q4_0_f32_v, "kernel_mul_mat_q4_0_f32_v", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q4_0_f32_8x_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q4_0_f32_8x_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q4_0_f32_8x_flat.cl"); #endif backend_ctx->program_mul_mv_q4_0_f32_8x_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat = clCreateKernel(backend_ctx->program_mul_mv_q4_0_f32_8x_flat, "kernel_mul_mat_q4_0_f32_8x_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q4_0_f32_1d_8x_flat // This kernel does not compiler on Adreno cl compiler 38.01. Skip it for // those compiler versions since it is anyway not used for Adreno. if (backend_ctx->gpu_family != ADRENO || backend_ctx->adreno_cl_compiler_version.newer_than_or_same(E031, 38, 11, 0) || backend_ctx->adreno_cl_compiler_version.type == DX) { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q4_0_f32_1d_8x_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q4_0_f32_1d_8x_flat.cl"); #endif backend_ctx->program_mul_mv_q4_0_f32_1d_8x_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_1d_8x_flat = clCreateKernel(backend_ctx->program_mul_mv_q4_0_f32_1d_8x_flat, "kernel_mul_mat_q4_0_f32_1d_8x_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q4_0_f32_1d_16x_flat // This kernel does not compiler on Adreno cl compiler 38.01. Skip it for // those compiler versions since it is anyway not used for Adreno. if (backend_ctx->gpu_family != ADRENO || backend_ctx->adreno_cl_compiler_version.newer_than_or_same(E031, 38, 11, 0) || backend_ctx->adreno_cl_compiler_version.type == DX) { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q4_0_f32_1d_16x_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q4_0_f32_1d_16x_flat.cl"); #endif backend_ctx->program_mul_mv_q4_0_f32_1d_16x_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_1d_16x_flat = clCreateKernel(backend_ctx->program_mul_mv_q4_0_f32_1d_16x_flat, "kernel_mul_mat_q4_0_f32_1d_16x_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q6_k { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q6_k.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q6_k.cl"); #endif backend_ctx->program_mul_mv_q6_K = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_q6_K_f32 = clCreateKernel(backend_ctx->program_mul_mv_q6_K, "kernel_mul_mv_q6_K_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q8_0_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q8_0_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q8_0_f32.cl"); #endif backend_ctx->program_mul_mv_q8_0_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32, "kernel_mul_mv_q8_0_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_q8_0_f32_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_q8_0_f32_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_q8_0_f32_flat.cl"); #endif backend_ctx->program_mul_mv_q8_0_f32_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32_flat, "kernel_mul_mv_q8_0_f32_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_mxfp4_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_mxfp4_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_mxfp4_f32.cl"); #endif backend_ctx->program_mul_mv_mxfp4_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_mxfp4_f32 = clCreateKernel(backend_ctx->program_mul_mv_mxfp4_f32, "kernel_mul_mv_mxfp4_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_mxfp4_f32_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_mxfp4_f32_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_mxfp4_f32_flat.cl"); #endif backend_ctx->program_mul_mv_mxfp4_f32_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_mxfp4_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_mxfp4_f32_flat, "kernel_mul_mv_mxfp4_f32_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_f16_f16 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_f16_f16.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_f16_f16.cl"); #endif backend_ctx->program_mul_mv_f16_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f16_f16 = clCreateKernel(backend_ctx->program_mul_mv_f16_f16, "kernel_mul_mat_f16_f16", &err), err)); GGML_LOG_CONT("."); } // mul_mv_f16_f32_1row { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_f16_f32_1row.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_f16_f32_1row.cl"); #endif backend_ctx->program_mul_mv_f16_f32_1row = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32_1row = clCreateKernel(backend_ctx->program_mul_mv_f16_f32_1row, "kernel_mul_mat_f16_f32_1row", &err), err)); GGML_LOG_CONT("."); } // mul_mv_f16_f32_l4 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_f16_f32_l4.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_f16_f32_l4.cl"); #endif backend_ctx->program_mul_mv_f16_f32_l4 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32_l4 = clCreateKernel(backend_ctx->program_mul_mv_f16_f32_l4, "kernel_mul_mat_f16_f32_l4", &err), err)); GGML_LOG_CONT("."); } // mul_mv_f16_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_f16_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_f16_f32.cl"); #endif backend_ctx->program_mul_mv_f16_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32 = clCreateKernel(backend_ctx->program_mul_mv_f16_f32, "kernel_mul_mat_f16_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_f32_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_f32_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_f32_f32.cl"); #endif backend_ctx->program_mul_mv_f32_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f32_f32 = clCreateKernel(backend_ctx->program_mul_mv_f32_f32, "kernel_mul_mat_f32_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mat_f16_f32_tiled { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mat_f16_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mat_f16_f32.cl"); #endif backend_ctx->program_mul_mat_f16_f32_tiled = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32_tiled = clCreateKernel(backend_ctx->program_mul_mat_f16_f32_tiled, "mul_mat_f16_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mm_f32_f32_l4_lm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mm_f32_f32_l4_lm.cl.h" }; #else const std::string kernel_src = read_file("mul_mm_f32_f32_l4_lm.cl"); #endif backend_ctx->program_mul_mm_f32_f32_l4_lm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mm_f32_f32_l4_lm = clCreateKernel(backend_ctx->program_mul_mm_f32_f32_l4_lm, "kernel_mul_mm_f32_f32_l4_lm", &err), err)); GGML_LOG_CONT("."); } // mul_mm_f16_f32_l4_lm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mm_f16_f32_l4_lm.cl.h" }; #else const std::string kernel_src = read_file("mul_mm_f16_f32_l4_lm.cl"); #endif backend_ctx->program_mul_mm_f16_f32_l4_lm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mm_f16_f32_l4_lm = clCreateKernel(backend_ctx->program_mul_mm_f16_f32_l4_lm, "kernel_mul_mm_f16_f32_l4_lm", &err), err)); GGML_LOG_CONT("."); } // mul_mm_q8_0_f32_l4_lm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mm_q8_0_f32_l4_lm.cl.h" }; #else const std::string kernel_src = read_file("mul_mm_q8_0_f32_l4_lm.cl"); #endif backend_ctx->program_mul_mm_q8_0_f32_l4_lm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mm_q8_0_f32_l4_lm = clCreateKernel(backend_ctx->program_mul_mm_q8_0_f32_l4_lm, "kernel_mul_mm_q8_0_f32_l4_lm", &err), err)); GGML_LOG_CONT("."); } // mul_mm_f16_f32_kq_kqv { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mm_f16_f32_kq_kqv.cl.h" }; #else const std::string kernel_src = read_file("mul_mm_f16_f32_kq_kqv.cl"); #endif backend_ctx->program_mul_mm_f16_f32_kqv = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts+" -DKQV "); backend_ctx->program_mul_mm_f16_f32_kq = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mm_f16_f32_kqv = clCreateKernel(backend_ctx->program_mul_mm_f16_f32_kqv, "mul_mm_f16_f32_kqv", &err), err)); CL_CHECK((backend_ctx->kernel_mul_mm_f16_f32_kq = clCreateKernel(backend_ctx->program_mul_mm_f16_f32_kq, "mul_mm_f16_f32_kq", &err), err)); GGML_LOG_CONT("."); } // mul { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul.cl.h" }; #else const std::string kernel_src = read_file("mul.cl"); #endif backend_ctx->program_mul = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul = clCreateKernel(backend_ctx->program_mul, "kernel_mul", &err), err)); CL_CHECK((backend_ctx->kernel_mul_row = clCreateKernel(backend_ctx->program_mul, "kernel_mul_row", &err), err)); CL_CHECK((backend_ctx->kernel_mul_f16 = clCreateKernel(backend_ctx->program_mul, "kernel_mul_f16", &err), err)); CL_CHECK((backend_ctx->kernel_mul_row_f16 = clCreateKernel(backend_ctx->program_mul, "kernel_mul_row_f16", &err), err)); GGML_LOG_CONT("."); } // norm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "norm.cl.h" }; #else const std::string kernel_src = read_file("norm.cl"); #endif backend_ctx->program_norm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_norm = clCreateKernel(backend_ctx->program_norm, "kernel_norm", &err), err)); CL_CHECK((backend_ctx->kernel_norm_mul_add = clCreateKernel(backend_ctx->program_norm, "kernel_norm_mul_add", &err), err)); GGML_LOG_CONT("."); } // relu { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "relu.cl.h" }; #else const std::string kernel_src = read_file("relu.cl"); #endif backend_ctx->program_relu = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_relu = clCreateKernel(backend_ctx->program_relu, "kernel_relu", &err), err)); GGML_LOG_CONT("."); } // rms_norm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "rms_norm.cl.h" }; #else const std::string kernel_src = read_file("rms_norm.cl"); #endif backend_ctx->program_rms_norm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_rms_norm = clCreateKernel(backend_ctx->program_rms_norm, "kernel_rms_norm", &err), err)); CL_CHECK((backend_ctx->kernel_rms_norm_mul = clCreateKernel(backend_ctx->program_rms_norm, "kernel_rms_norm_mul", &err), err)); GGML_LOG_CONT("."); } // rope { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "rope.cl.h" }; #else const std::string kernel_src = read_file("rope.cl"); #endif backend_ctx->program_rope = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_rope_norm_f32 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_norm_f32", &err), err)); CL_CHECK((backend_ctx->kernel_rope_norm_f16 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_norm_f16", &err), err)); CL_CHECK((backend_ctx->kernel_rope_neox_f32 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_neox_f32", &err), err)); CL_CHECK((backend_ctx->kernel_rope_neox_f16 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_neox_f16", &err), err)); CL_CHECK((backend_ctx->kernel_rope_multi_f32 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_multi_f32", &err), err)); CL_CHECK((backend_ctx->kernel_rope_multi_f16 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_multi_f16", &err), err)); CL_CHECK((backend_ctx->kernel_rope_vision_f32 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_vision_f32", &err), err)); CL_CHECK((backend_ctx->kernel_rope_vision_f16 = clCreateKernel(backend_ctx->program_rope, "kernel_rope_vision_f16", &err), err)); GGML_LOG_CONT("."); } // scale { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "scale.cl.h" }; #else const std::string kernel_src = read_file("scale.cl"); #endif backend_ctx->program_scale = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_scale = clCreateKernel(backend_ctx->program_scale, "kernel_scale", &err), err)); GGML_LOG_CONT("."); } // silu { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "silu.cl.h" }; #else const std::string kernel_src = read_file("silu.cl"); #endif backend_ctx->program_silu = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_silu = clCreateKernel(backend_ctx->program_silu, "kernel_silu", &err), err)); CL_CHECK((backend_ctx->kernel_silu_4 = clCreateKernel(backend_ctx->program_silu, "kernel_silu_4", &err), err)); GGML_LOG_CONT("."); } // softmax_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "softmax_f32.cl.h" }; #else const std::string kernel_src = read_file("softmax_f32.cl"); #endif backend_ctx->program_softmax_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_soft_max = clCreateKernel(backend_ctx->program_softmax_f32, "kernel_soft_max", &err), err)); GGML_LOG_CONT("."); } // softmax_f16 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "softmax_f16.cl.h" }; #else const std::string kernel_src = read_file("softmax_f16.cl"); #endif backend_ctx->program_softmax_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_soft_max_f16 = clCreateKernel(backend_ctx->program_softmax_f16, "kernel_soft_max_f16", &err), err)); GGML_LOG_CONT("."); } // softmax_4_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "softmax_4_f32.cl.h" }; #else const std::string kernel_src = read_file("softmax_4_f32.cl"); #endif backend_ctx->program_softmax_4_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_soft_max_4 = clCreateKernel(backend_ctx->program_softmax_4_f32, "kernel_soft_max_4", &err), err)); GGML_LOG_CONT("."); } // softmax_4_f16 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "softmax_4_f16.cl.h" }; #else const std::string kernel_src = read_file("softmax_4_f16.cl"); #endif backend_ctx->program_softmax_4_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_soft_max_4_f16 = clCreateKernel(backend_ctx->program_softmax_4_f16, "kernel_soft_max_4_f16", &err), err)); GGML_LOG_CONT("."); } // flash_attn { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src_f16 { #include "flash_attn_f16.cl.h" }; const std::string kernel_src_f32 { #include "flash_attn_f32.cl.h" }; const std::string kernel_src_f32_f16 { #include "flash_attn_f32_f16.cl.h" }; #else const std::string kernel_src_f16 = read_file("flash_attn_f16.cl"); const std::string kernel_src_f32 = read_file("flash_attn_f32.cl"); const std::string kernel_src_f32_f16 = read_file("flash_attn_f32_f16.cl"); #endif if (!kernel_src_f16.empty() && !kernel_src_f32.empty() && !kernel_src_f32_f16.empty()) { const struct { int dk; int dv; int bm; int bn; } fa_dims[] = { { 40, 40, 32, 32}, { 64, 64, 64, 64}, { 80, 80, 64, 32}, { 96, 96, 64, 32}, {112, 112, 32, 32}, {128, 128, 32, 32}, {192, 128, 16, 16}, {192, 192, 16, 16}, {256, 256, 16, 16}, }; for (size_t i = 0; i < sizeof(fa_dims)/sizeof(fa_dims[0]); ++i) { const int dk = fa_dims[i].dk; const int dv = fa_dims[i].dv; const int bm = fa_dims[i].bm; const int bn = fa_dims[i].bn; std::string OPTS = compile_opts + " -D DK=" + std::to_string(dk) + " -D DV=" + std::to_string(dv) + " -D BLOCK_M=" + std::to_string(bm) + " -D BLOCK_N=" + std::to_string(bn); cl_program prog_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src_f16.c_str(), OPTS); cl_kernel k_f16, k_f16_q1; CL_CHECK((k_f16 = clCreateKernel(prog_f16, "flash_attn_f16", &err), err)); CL_CHECK((k_f16_q1 = clCreateKernel(prog_f16, "flash_attn_f16_q1", &err), err)); backend_ctx->kernels_flash_attn_f16[{dk, dv}] = k_f16; backend_ctx->kernels_flash_attn_f16_q1[{dk, dv}] = k_f16_q1; CL_CHECK(clReleaseProgram(prog_f16)); cl_program prog_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src_f32.c_str(), OPTS); cl_kernel k_f32, k_f32_q1; CL_CHECK((k_f32 = clCreateKernel(prog_f32, "flash_attn_f32", &err), err)); CL_CHECK((k_f32_q1 = clCreateKernel(prog_f32, "flash_attn_f32_q1", &err), err)); backend_ctx->kernels_flash_attn_f32[{dk, dv}] = k_f32; backend_ctx->kernels_flash_attn_f32_q1[{dk, dv}] = k_f32_q1; CL_CHECK(clReleaseProgram(prog_f32)); cl_program prog_f32_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src_f32_f16.c_str(), OPTS); cl_kernel k_f32_f16, k_f32_f16_q1; CL_CHECK((k_f32_f16 = clCreateKernel(prog_f32_f16, "flash_attn_f32_f16", &err), err)); CL_CHECK((k_f32_f16_q1 = clCreateKernel(prog_f32_f16, "flash_attn_f32_f16_q1", &err), err)); backend_ctx->kernels_flash_attn_f32_f16[{dk, dv}] = k_f32_f16; backend_ctx->kernels_flash_attn_f32_f16_q1[{dk, dv}] = k_f32_f16_q1; CL_CHECK(clReleaseProgram(prog_f32_f16)); backend_ctx->kernels_flash_attn_bm[{dk, dv}] = bm; backend_ctx->kernels_flash_attn_bn[{dk, dv}] = bn; } GGML_LOG_CONT("."); } } // argsort { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "argsort.cl.h" }; #else const std::string kernel_src = read_file("argsort.cl"); #endif backend_ctx->program_argsort_f32_i32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_argsort_f32_i32 = clCreateKernel(backend_ctx->program_argsort_f32_i32, "kernel_argsort_f32_i32", &err), err)); GGML_LOG_CONT("."); } // div { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "div.cl.h" }; #else const std::string kernel_src = read_file("div.cl"); #endif std::string compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable -cl-finite-math-only "; backend_ctx->program_div = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_div = clCreateKernel(backend_ctx->program_div, "kernel_div", &err), err)); CL_CHECK((backend_ctx->kernel_div_row = clCreateKernel(backend_ctx->program_div, "kernel_div_row", &err), err)); CL_CHECK((backend_ctx->kernel_div_f16 = clCreateKernel(backend_ctx->program_div, "kernel_div_f16", &err), err)); CL_CHECK((backend_ctx->kernel_div_row_f16 = clCreateKernel(backend_ctx->program_div, "kernel_div_row_f16", &err), err)); GGML_LOG_CONT("."); } // sqr { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "sqr.cl.h" }; #else const std::string kernel_src = read_file("sqr.cl"); #endif cl_program prog = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_sqr_cont_f32 = clCreateKernel(prog, "kernel_sqr_cont_f32", &err), err)); CL_CHECK((backend_ctx->kernel_sqr_cont_f32_4 = clCreateKernel(prog, "kernel_sqr_cont_f32_4", &err), err)); CL_CHECK((backend_ctx->kernel_sqr_cont_f16 = clCreateKernel(prog, "kernel_sqr_cont_f16", &err), err)); CL_CHECK((backend_ctx->kernel_sqr_cont_f16_4 = clCreateKernel(prog, "kernel_sqr_cont_f16_4", &err), err)); CL_CHECK(clReleaseProgram(prog)); GGML_LOG_CONT("."); } // sqrt { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "sqrt.cl.h" }; #else const std::string kernel_src = read_file("sqrt.cl"); #endif cl_program prog = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_sqrt_cont_f32 = clCreateKernel(prog, "kernel_sqrt_cont_f32", &err), err)); CL_CHECK((backend_ctx->kernel_sqrt_cont_f32_4 = clCreateKernel(prog, "kernel_sqrt_cont_f32_4", &err), err)); CL_CHECK((backend_ctx->kernel_sqrt_cont_f16 = clCreateKernel(prog, "kernel_sqrt_cont_f16", &err), err)); CL_CHECK((backend_ctx->kernel_sqrt_cont_f16_4 = clCreateKernel(prog, "kernel_sqrt_cont_f16_4", &err), err)); CL_CHECK(clReleaseProgram(prog)); GGML_LOG_CONT("."); } // mean { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mean.cl.h" }; #else const std::string kernel_src = read_file("mean.cl"); #endif cl_program prog = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mean_f32 = clCreateKernel(prog, "kernel_mean_f32", &err), err)); CL_CHECK(clReleaseProgram(prog)); GGML_LOG_CONT("."); } // sub { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "sub.cl.h" }; #else const std::string kernel_src = read_file("sub.cl"); #endif backend_ctx->program_sub = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_sub = clCreateKernel(backend_ctx->program_sub, "kernel_sub", &err), err)); CL_CHECK((backend_ctx->kernel_sub_row = clCreateKernel(backend_ctx->program_sub, "kernel_sub_row", &err), err)); CL_CHECK((backend_ctx->kernel_sub_f16 = clCreateKernel(backend_ctx->program_sub, "kernel_sub_f16", &err), err)); CL_CHECK((backend_ctx->kernel_sub_row_f16 = clCreateKernel(backend_ctx->program_sub, "kernel_sub_row_f16", &err), err)); GGML_LOG_CONT("."); } // sum_rows { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "sum_rows.cl.h" }; #else const std::string kernel_src = read_file("sum_rows.cl"); #endif backend_ctx->program_sum_rows_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_sum_rows_f32 = clCreateKernel(backend_ctx->program_sum_rows_f32, "kernel_sum_rows_f32", &err), err)); GGML_LOG_CONT("."); } // sigmoid { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "sigmoid.cl.h" }; #else const std::string kernel_src = read_file("sigmoid.cl"); #endif backend_ctx->program_sigmoid = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_sigmoid_f32 = clCreateKernel(backend_ctx->program_sigmoid, "kernel_sigmoid_f32", &err), err)); CL_CHECK((backend_ctx->kernel_sigmoid_f16 = clCreateKernel(backend_ctx->program_sigmoid, "kernel_sigmoid_f16", &err), err)); GGML_LOG_CONT("."); } // group_norm { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "group_norm.cl.h" }; #else const std::string kernel_src = read_file("group_norm.cl"); #endif backend_ctx->program_group_norm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_group_norm = clCreateKernel(backend_ctx->program_group_norm, "kernel_group_norm", &err), err)); CL_CHECK((backend_ctx->kernel_group_norm_mul_add = clCreateKernel(backend_ctx->program_group_norm, "kernel_group_norm_mul_add", &err), err)); GGML_LOG_CONT("."); } // repeat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "repeat.cl.h" }; #else const std::string kernel_src = read_file("repeat.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_repeat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_repeat = clCreateKernel(backend_ctx->program_repeat, "kernel_repeat", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: repeat kernel source not found or empty. Repeat operations will not be available.\n"); backend_ctx->program_repeat = nullptr; backend_ctx->kernel_repeat = nullptr; } } // pad { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "pad.cl.h" }; #else const std::string kernel_src = read_file("pad.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_pad = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_pad = clCreateKernel(backend_ctx->program_pad, "kernel_pad", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: pad kernel source not found or empty. Pad operations will not be available.\n"); backend_ctx->program_pad = nullptr; backend_ctx->kernel_pad = nullptr; } } // tanh { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "tanh.cl.h" }; #else const std::string kernel_src = read_file("tanh.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_tanh = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_tanh_f32_nd = clCreateKernel(backend_ctx->program_tanh, "kernel_tanh_f32_nd", &err), err)); CL_CHECK((backend_ctx->kernel_tanh_f16_nd = clCreateKernel(backend_ctx->program_tanh, "kernel_tanh_f16_nd", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: tanh kernel source not found or empty. Tanh operation will not be available.\n"); backend_ctx->program_tanh = nullptr; backend_ctx->kernel_tanh_f32_nd = nullptr; backend_ctx->kernel_tanh_f16_nd = nullptr; } } // upscale { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "upscale.cl.h" }; #else const std::string kernel_src = read_file("upscale.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_upscale = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_upscale = clCreateKernel(backend_ctx->program_upscale, "kernel_upscale", &err), err)); if (backend_ctx->program_upscale) { cl_int err_bilinear; backend_ctx->kernel_upscale_bilinear = clCreateKernel(backend_ctx->program_upscale, "kernel_upscale_bilinear", &err_bilinear); if (err_bilinear != CL_SUCCESS) { GGML_LOG_WARN("ggml_opencl: kernel_upscale_bilinear not found in upscale.cl. Bilinear upscale will not be available. Error: %d\n", err_bilinear); backend_ctx->kernel_upscale_bilinear = nullptr; } } else { backend_ctx->kernel_upscale_bilinear = nullptr; } GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: upscale kernel source not found or empty. Upscale operations will not be available.\n"); backend_ctx->program_upscale = nullptr; backend_ctx->kernel_upscale = nullptr; backend_ctx->kernel_upscale_bilinear = nullptr; } } // concat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "concat.cl.h" }; #else const std::string kernel_src = read_file("concat.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_concat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_concat_f32_contiguous = clCreateKernel(backend_ctx->program_concat, "kernel_concat_f32_contiguous", &err), err)); CL_CHECK((backend_ctx->kernel_concat_f32_non_contiguous = clCreateKernel(backend_ctx->program_concat, "kernel_concat_f32_non_contiguous", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: concat kernel source not found or empty. Concat operations will not be available.\n"); backend_ctx->program_concat = nullptr; backend_ctx->kernel_concat_f32_contiguous = nullptr; backend_ctx->kernel_concat_f32_non_contiguous = nullptr; } } // timestep_embedding { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "tsembd.cl.h" }; #else const std::string kernel_src = read_file("tsembd.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_tsembd = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_timestep_embedding = clCreateKernel(backend_ctx->program_tsembd, "kernel_timestep_embedding", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: timestep_embedding kernel source not found or empty. This op will not be available.\n"); backend_ctx->program_tsembd = nullptr; backend_ctx->kernel_timestep_embedding = nullptr; } } // set_rows { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "set_rows.cl.h" }; #else const std::string kernel_src = read_file("set_rows.cl"); #endif backend_ctx->program_set_rows = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_set_rows_f32_i64 = clCreateKernel(backend_ctx->program_set_rows, "kernel_set_rows_f32_i64", &err), err)); CL_CHECK((backend_ctx->kernel_set_rows_f32_i32 = clCreateKernel(backend_ctx->program_set_rows, "kernel_set_rows_f32_i32", &err), err)); CL_CHECK((backend_ctx->kernel_set_rows_f16_i64 = clCreateKernel(backend_ctx->program_set_rows, "kernel_set_rows_f16_i64", &err), err)); CL_CHECK((backend_ctx->kernel_set_rows_f16_i32 = clCreateKernel(backend_ctx->program_set_rows, "kernel_set_rows_f16_i32", &err), err)); GGML_LOG_CONT("."); } // conv2d { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "conv2d.cl.h" }; const std::string kernel_src_f16_f32 { #include "conv2d_f16_f32.cl.h" }; #else const std::string kernel_src = read_file("conv2d.cl"); const std::string kernel_src_f16_f32 = read_file("conv2d_f16_f32.cl"); #endif if (!kernel_src.empty()) { backend_ctx->program_conv_2d_f16 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), (std::string(compile_opts) + " -DUSE_FP16=1").c_str()); CL_CHECK((backend_ctx->kernel_conv_2d_f16 = clCreateKernel(backend_ctx->program_conv_2d_f16, "kernel_conv_2d", &err), err)); GGML_LOG_CONT("."); backend_ctx->program_conv_2d_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_conv_2d_f32 = clCreateKernel(backend_ctx->program_conv_2d_f32, "kernel_conv_2d", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: conv2d kernel source not found or empty. This op will not be available.\n"); backend_ctx->program_conv_2d_f16 = nullptr; backend_ctx->kernel_conv_2d_f16 = nullptr; backend_ctx->program_conv_2d_f32 = nullptr; backend_ctx->kernel_conv_2d_f32 = nullptr; } if (!kernel_src_f16_f32.empty()) { backend_ctx->program_conv_2d_f16_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src_f16_f32.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_conv_2d_f16_f32 = clCreateKernel(backend_ctx->program_conv_2d_f16_f32, "kernel_conv_2d", &err), err)); GGML_LOG_CONT("."); } else { GGML_LOG_WARN("ggml_opencl: conv2d_f16_f32 kernel source not found or empty. This op will not be available.\n"); backend_ctx->program_conv_2d_f16_f32 = nullptr; backend_ctx->kernel_conv_2d_f16_f32 = nullptr; } } // ssm_conv { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "ssm_conv.cl.h" }; #else const std::string kernel_src = read_file("ssm_conv.cl"); #endif cl_program prog = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_ssm_conv_f32_f32 = clCreateKernel(prog, "kernel_ssm_conv_f32_f32", &err), err)); CL_CHECK((backend_ctx->kernel_ssm_conv_f32_f32_4 = clCreateKernel(prog, "kernel_ssm_conv_f32_f32_4", &err), err)); CL_CHECK(clReleaseProgram(prog)); GGML_LOG_CONT("."); } // mul_mv_id_q4_0_f32_8x_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_id_q4_0_f32_8x_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_id_q4_0_f32_8x_flat.cl"); #endif backend_ctx->program_mul_mv_id_q4_0_f32_8x_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_id_q4_0_f32_8x_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q4_0_f32_8x_flat, "kernel_mul_mv_id_q4_0_f32_8x_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_id_q8_0_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_id_q8_0_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_id_q8_0_f32.cl"); #endif backend_ctx->program_mul_mv_id_q8_0_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32, "kernel_mul_mv_id_q8_0_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_id_q8_0_f32_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_id_q8_0_f32_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_id_q8_0_f32_flat.cl"); #endif backend_ctx->program_mul_mv_id_q8_0_f32_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32_flat, "kernel_mul_mv_id_q8_0_f32_flat", &err), err)); GGML_LOG_CONT("."); } // mul_mv_id_mxfp4_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_id_mxfp4_f32.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_id_mxfp4_f32.cl"); #endif backend_ctx->program_mul_mv_id_mxfp4_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_id_mxfp4_f32 = clCreateKernel(backend_ctx->program_mul_mv_id_mxfp4_f32, "kernel_mul_mv_id_mxfp4_f32", &err), err)); GGML_LOG_CONT("."); } // mul_mv_id_mxfp4_f32_flat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "mul_mv_id_mxfp4_f32_flat.cl.h" }; #else const std::string kernel_src = read_file("mul_mv_id_mxfp4_f32_flat.cl"); #endif backend_ctx->program_mul_mv_id_mxfp4_f32_flat = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_mxfp4_f32_flat, "kernel_mul_mv_id_mxfp4_f32_flat", &err), err)); GGML_LOG_CONT("."); } // Adreno kernels #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // transpose { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "transpose.cl.h" }; #else const std::string kernel_src = read_file("transpose.cl"); #endif backend_ctx->program_transpose = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); CL_CHECK((backend_ctx->kernel_transpose_32_16 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_32_16", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_32 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_32", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_16 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_16", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_16_buf = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_16_buf", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_16_4x1 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_16_4x1", &err), err)); GGML_LOG_CONT("."); } // gemv_noshuffle_general { std::string CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); if (backend_ctx->has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src_CL_gemv_general { #include "gemv_noshuffle_general.cl.h" }; #else const std::string kernel_src_CL_gemv_general = read_file("gemv_noshuffle_general.cl"); #endif backend_ctx->program_CL_gemv_general = build_program_from_source( backend_ctx->context, backend_ctx->device, kernel_src_CL_gemv_general.c_str(), CL_gemv_compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general = clCreateKernel(backend_ctx->program_CL_gemv_general, "kernel_gemv_noshuffle", &err), err)); GGML_LOG_CONT("."); } // gemv_noshuffle { // Gemv 2048, 16384 std::string CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -DLINE_STRIDE_A=2048 " " -DBLOCK_STRIDE_A=16384 " " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); if (backend_ctx->has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src_CL_gemv { #include "gemv_noshuffle.cl.h" }; #else const std::string kernel_src_CL_gemv = read_file("gemv_noshuffle.cl"); #endif backend_ctx->program_CL_gemv_4096_1_4096 = build_program_from_source( backend_ctx->context, backend_ctx->device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_4096, "kernel_gemv_noshuffle", &err), err)); GGML_LOG_CONT("."); // Gemv 2048, 16384 CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -DLINE_STRIDE_A=2048 " " -DBLOCK_STRIDE_A=16384 " " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); if (backend_ctx->has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } backend_ctx->program_CL_gemv_4096_1_11008 = build_program_from_source( backend_ctx->context, backend_ctx->device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_11008, "kernel_gemv_noshuffle", &err), err)); GGML_LOG_CONT("."); // Gemv 5504, 44032 CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -DLINE_STRIDE_A=5504 " " -DBLOCK_STRIDE_A=44032 " " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); if (backend_ctx->has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } backend_ctx->program_CL_gemv_11008_1_4096 = build_program_from_source( backend_ctx->context, backend_ctx->device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_11008_1_4096, "kernel_gemv_noshuffle", &err), err)); GGML_LOG_CONT("."); // Gemv 16000, 128000 CL_gemv_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -DLINE_STRIDE_A=16000 " " -DBLOCK_STRIDE_A=128000 " " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); if (backend_ctx->has_vector_subgroup_broadcast) { CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; } backend_ctx->program_CL_gemv_32000_1_4096 = build_program_from_source( backend_ctx->context, backend_ctx->device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_32000_1_4096, "kernel_gemv_noshuffle", &err), err)); GGML_LOG_CONT("."); } // mul_mat_Ab_Bi_8x4 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src_CL_gemm { #include "mul_mat_Ab_Bi_8x4.cl.h" }; #else const std::string kernel_src_CL_gemm = read_file("mul_mat_Ab_Bi_8x4.cl"); #endif backend_ctx->program_CL_gemm = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src_CL_gemm.c_str(), compile_opts); CL_CHECK((backend_ctx->CL_mul_mat_Ab_Bi_8x4 = clCreateKernel(backend_ctx->program_CL_gemm, "kernel_mul_mat_Ab_Bi_8x4", &err), err)); GGML_LOG_CONT("."); } std::string CL_moe_compile_opts = std::string("-cl-std=") + opencl_c_std + " -cl-mad-enable " " -cl-fast-relaxed-math"; // gemv_moe_mxfp4_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "gemv_moe_mxfp4_f32.cl.h" }; #else const std::string kernel_src = read_file("gemv_moe_mxfp4_f32.cl"); #endif backend_ctx->program_gemv_moe_mxfp4_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_moe_compile_opts); CL_CHECK((backend_ctx->kernel_gemv_moe_mxfp4_f32 = clCreateKernel(backend_ctx->program_gemv_moe_mxfp4_f32, "kernel_gemv_moe_mxfp4_f32", &err), err)); GGML_LOG_CONT("."); } // gemm_moe_mxfp4_f32 { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { #include "gemm_moe_mxfp4_f32.cl.h" }; #else const std::string kernel_src = read_file("gemm_moe_mxfp4_f32.cl"); #endif backend_ctx->program_gemm_moe_mxfp4_f32 = build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_moe_compile_opts); CL_CHECK((backend_ctx->kernel_gemm_moe_mxfp4_f32 = clCreateKernel(backend_ctx->program_gemm_moe_mxfp4_f32, "kernel_gemm_moe_mxfp4_f32", &err), err)); GGML_LOG_CONT("."); } #endif // GGML_OPENCL_USE_ADRENO_KERNELS GGML_LOG_CONT("\n"); } // XXX static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { // XXX static bool initialized = false; // XXX static ggml_backend_opencl_context *backend_ctx = nullptr; static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev); namespace /* anonymous */ { extern struct ggml_backend_device_i ggml_backend_opencl_device_i; } // Look for available and suitable devices. static std::vector ggml_opencl_probe_devices(ggml_backend_reg * reg) { std::vector found_devices; #ifdef GGML_OPENCL_PROFILING GGML_LOG_INFO("ggml_opencl: OpenCL profiling enabled\n"); #endif struct cl_device; struct cl_platform { cl_platform_id id; unsigned number; char name[128]; char vendor[128]; struct cl_device * devices; unsigned n_devices; struct cl_device * default_device; }; struct cl_device { struct cl_platform * platform; cl_device_id id; unsigned number; cl_device_type type; char name[128]; char version[128]; }; enum { NPLAT = 16, NDEV = 16 }; struct cl_platform platforms[NPLAT]; unsigned n_platforms = 0; struct cl_device devices[NDEV]; unsigned n_devices = 0; struct cl_device * default_device = NULL; unsigned default_platform_number = 0; cl_platform_id platform_ids[NPLAT]; if (clGetPlatformIDs(NPLAT, platform_ids, &n_platforms) != CL_SUCCESS) { GGML_LOG_ERROR("ggml_opencl: plaform IDs not available.\n"); return found_devices; } for (unsigned i = 0; i < n_platforms; i++) { struct cl_platform * p = &platforms[i]; p->number = i; p->id = platform_ids[i]; CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_NAME, sizeof(p->name), &p->name, NULL)); CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_VENDOR, sizeof(p->vendor), &p->vendor, NULL)); cl_device_id device_ids[NDEV]; cl_int clGetDeviceIDsError = clGetDeviceIDs(p->id, CL_DEVICE_TYPE_ALL, NDEV, device_ids, &p->n_devices); if (clGetDeviceIDsError == CL_DEVICE_NOT_FOUND) { p->n_devices = 0; } else { CL_CHECK(clGetDeviceIDsError); } p->devices = p->n_devices > 0 ? &devices[n_devices] : NULL; p->default_device = NULL; for (unsigned j = 0; j < p->n_devices; j++) { struct cl_device * d = &devices[n_devices]; d->number = n_devices++; d->id = device_ids[j]; d->platform = p; CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_NAME, sizeof(d->name), &d->name, NULL)); CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_TYPE, sizeof(d->type), &d->type, NULL)); CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_VERSION, sizeof(d->version), &d->version, NULL)); if (p->default_device == NULL && d->type == CL_DEVICE_TYPE_GPU) { p->default_device = d; } } if (default_device == NULL && p->default_device != NULL) { default_device = p->default_device; default_platform_number = i; } } if (n_devices == 0) { GGML_LOG_ERROR("ggml_opencl: could find any OpenCL devices.\n"); return found_devices; } char * user_platform_string = getenv("GGML_OPENCL_PLATFORM"); char * user_device_string = getenv("GGML_OPENCL_DEVICE"); int user_platform_number = -1; int user_device_number = -1; cl_device * candidate_devices = nullptr; unsigned n_candidate_devices = 0; unsigned n; if (user_platform_string != NULL && sscanf(user_platform_string, " %u", &n) == 1 && n < n_platforms) { user_platform_number = (int)n; } if (user_device_string != NULL && sscanf(user_device_string, " %u", &n) == 1 && n < n_devices) { user_device_number = (int)n; } if (user_platform_number != -1 && user_device_number != -1) { cl_platform* platform = &platforms[user_platform_number]; if ((unsigned)user_device_number >= platform->n_devices) { GGML_LOG_ERROR("ggml_opencl: invalid device number %d\n", user_device_number); exit(1); } default_device = &platform->devices[user_device_number]; candidate_devices = platform->devices; n_candidate_devices = platform->n_devices; } else { // Choose a platform by matching a substring. if (user_platform_number == -1 && user_platform_string != NULL && user_platform_string[0] != 0) { for (unsigned i = 0; i < n_platforms; i++) { struct cl_platform * p = &platforms[i]; if (strstr(p->name, user_platform_string) != NULL || strstr(p->vendor, user_platform_string) != NULL) { user_platform_number = (int)i; break; } } if (user_platform_number == -1) { GGML_LOG_ERROR("ggml_opencl: no platform matching '%s' was found.\n", user_platform_string); exit(1); } } int platform_idx = user_platform_number != -1 ? user_platform_number : default_platform_number; struct cl_platform * p = &platforms[platform_idx]; candidate_devices = p->devices; n_candidate_devices = p->n_devices; default_device = p->default_device; if (n_candidate_devices == 0) { GGML_LOG_ERROR("ggml_opencl: selected platform '%s' does not have any devices.\n", p->name); exit(1); } if (user_device_number == -1 && user_device_string != NULL && user_device_string[0] != 0) { for (unsigned i = 0; i < n_candidate_devices; i++) { struct cl_device * d = &candidate_devices[i]; if (strstr(d->name, user_device_string) != NULL) { user_device_number = d->number; break; } } if (user_device_number == -1) { GGML_LOG_ERROR("ggml_opencl: no device matching '%s' was found.\n", user_device_string); exit(1); } } if (user_device_number != -1) { candidate_devices = &devices[user_device_number]; n_candidate_devices = 1; default_device = &candidate_devices[0]; } GGML_ASSERT(n_candidate_devices > 0); if (default_device == NULL) { default_device = &candidate_devices[0]; } } GGML_ASSERT(n_candidate_devices != 0 && candidate_devices); // Put the default device in front. for (unsigned i = 1; i < n_candidate_devices; i++) { if (&candidate_devices[i] == default_device) { std::swap(candidate_devices[0], candidate_devices[i]); default_device = &candidate_devices[0]; break; } } GGML_LOG_INFO("ggml_opencl: selected platform: '%s'\n", default_device->platform->name); std::vector device_ids; for (auto dev = candidate_devices, dev_end = candidate_devices + n_candidate_devices; dev != dev_end; dev++) { device_ids.push_back(dev->id); } cl_int err; cl_context shared_context; cl_context_properties properties[] = { (intptr_t) CL_CONTEXT_PLATFORM, (intptr_t) default_device->platform->id, 0 }; CL_CHECK( (shared_context = clCreateContext(properties, device_ids.size(), device_ids.data(), NULL, NULL, &err), err)); for (auto dev = candidate_devices, dev_end = candidate_devices + n_candidate_devices; dev != dev_end; dev++) { GGML_LOG_INFO("\nggml_opencl: device: '%s (%s)'\n", dev->name, dev->version); auto dev_ctx = std::unique_ptr(new ggml_backend_opencl_device_context{ /*.platform =*/dev->platform->id, /*.platform_nane =*/dev->platform->name, /*.device =*/dev->id, /*.device_name =*/dev->name, /*.device_type =*/dev->type, /*.device_version =*/dev->version, /*.backend_ctx =*/nullptr, /*.buffer_type =*/{}, /*.context =*/shared_context, }); found_devices.push_back(ggml_backend_device{ /* .iface = */ ggml_backend_opencl_device_i, /* .reg = */ reg, /* .context = */ dev_ctx.get(), }); if (!ggml_cl2_init(&found_devices.back())) { found_devices.pop_back(); GGML_LOG_INFO("ggml_opencl: drop unsupported device.\n"); continue; } dev_ctx.release(); } if (found_devices.size()) { auto * dev_ctx = static_cast(found_devices.front().context); GGML_LOG_INFO("ggml_opencl: default device: '%s (%s)'\n", dev_ctx->device_name.c_str(), dev_ctx->device_version.c_str()); if (dev_ctx->device_type != CL_DEVICE_TYPE_GPU) { GGML_LOG_WARN("ggml_opencl: warning, the default device is not a GPU: '%s'.\n", dev_ctx->device_name.c_str()); } } return found_devices; } // Initialize device if it is supported (returns nullptr if it is not). static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { GGML_ASSERT(dev); GGML_ASSERT(dev->context); ggml_backend_opencl_device_context * dev_ctx = (ggml_backend_opencl_device_context *) dev->context; GGML_ASSERT(dev_ctx->platform); GGML_ASSERT(dev_ctx->device); if (dev_ctx->backend_ctx) { return dev_ctx->backend_ctx; } auto backend_ctx = std::make_unique(); backend_ctx->device = dev_ctx->device; backend_ctx->gpu_family = GPU_FAMILY::UNKNOWN; // ref_count get increased in ggml_backend_opencl_device_init // This function is also used to retrieve backend context, so we don't want // to increase ref_count for each call. We only want to increase ref_count // when the associated device is initialized backend_ctx->ref_count = 0; if (strstr(dev_ctx->device_name.c_str(), "Adreno") || strstr(dev_ctx->device_name.c_str(), "Qualcomm") || strstr(dev_ctx->device_version.c_str(), "Adreno")) { backend_ctx->gpu_family = GPU_FAMILY::ADRENO; // Usually device version contains the detailed device name backend_ctx->adreno_gen = get_adreno_gpu_gen(dev_ctx->device_version.c_str()); if (backend_ctx->adreno_gen == ADRENO_GPU_GEN::ADRENO_UNKNOWN) { backend_ctx->adreno_gen = get_adreno_gpu_gen(dev_ctx->device_name.c_str()); } // Use wave size of 64 for all Adreno GPUs. backend_ctx->adreno_wave_size = 64; } else if (strstr(dev_ctx->device_name.c_str(), "Intel")) { backend_ctx->gpu_family = GPU_FAMILY::INTEL; } else { GGML_LOG_ERROR("Unsupported GPU: %s\n", dev_ctx->device_name.c_str()); backend_ctx->gpu_family = GPU_FAMILY::UNKNOWN; return nullptr; } #ifdef GGML_OPENCL_USE_ADRENO_KERNELS if (backend_ctx->gpu_family != GPU_FAMILY::ADRENO) { GGML_LOG_ERROR("ggml_opencl: Adreno-specific kernels should not be enabled for non-Adreno GPUs; " "run on an Adreno GPU or recompile with CMake option `-DGGML_OPENCL_USE_ADRENO_KERNELS=OFF`\n"); return nullptr; } #endif // Populate backend device name backend_ctx->device_name = dev_ctx->device_name; // A local ref of cl_device_id for convenience cl_device_id device = backend_ctx->device; ggml_cl_version platform_version = get_opencl_platform_version(dev_ctx->platform); // Check device OpenCL version, OpenCL 2.0 or above is required ggml_cl_version opencl_c_version = get_opencl_c_version(platform_version, device); if (opencl_c_version.major < 2) { GGML_LOG_ERROR("ggml_opencl: OpenCL 2.0 or above is required\n"); return nullptr; } // Check driver version size_t driver_version_str_size; clGetDeviceInfo(device, CL_DRIVER_VERSION, 0, NULL, &driver_version_str_size); char *driver_version = (char *)alloca(driver_version_str_size + 1); clGetDeviceInfo(device, CL_DRIVER_VERSION, driver_version_str_size, driver_version, NULL); driver_version[driver_version_str_size] = '\0'; GGML_LOG_INFO("ggml_opencl: OpenCL driver: %s\n", driver_version); backend_ctx->driver_version = driver_version; backend_ctx->adreno_cl_compiler_version = get_adreno_cl_compiler_version(driver_version); backend_ctx->has_vector_subgroup_broadcast = (backend_ctx->adreno_cl_compiler_version.type == E031 && backend_ctx->adreno_cl_compiler_version.major >= 47) || (backend_ctx->adreno_cl_compiler_version.type == DX && backend_ctx->adreno_cl_compiler_version.major >= 17); GGML_LOG_INFO("ggml_opencl: vector subgroup broadcast support: %s\n", backend_ctx->has_vector_subgroup_broadcast ? "true" : "false"); size_t ext_str_size; clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, 0, NULL, &ext_str_size); char *ext_buffer = (char *)alloca(ext_str_size + 1); clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, ext_str_size, ext_buffer, NULL); ext_buffer[ext_str_size] = '\0'; // ensure it is null terminated // Check if ext_buffer contains cl_khr_fp16 backend_ctx->fp16_support = strstr(ext_buffer, "cl_khr_fp16") != NULL; GGML_LOG_INFO("ggml_opencl: device FP16 support: %s\n", backend_ctx->fp16_support ? "true" : "false"); // fp16 is required if (!backend_ctx->fp16_support) { GGML_LOG_ERROR("ggml_opencl: device does not support FP16\n"); return nullptr; } // If OpenCL 3.0 is supported, then check for cl_khr_subgroups, which becomes // optional in OpenCL 3.0 (cl_khr_subgroup is mandatory in OpenCL 2.x) if (opencl_c_version.major == 3 && strstr(ext_buffer, "cl_khr_subgroups") == NULL && strstr(ext_buffer, "cl_intel_subgroups") == NULL) { GGML_LOG_ERROR("ggml_opencl: device does not support subgroups (cl_khr_subgroups or cl_intel_subgroups) " "(note that subgroups is an optional feature in OpenCL 3.0)\n"); return nullptr; } cl_uint base_align_in_bits; CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &base_align_in_bits, NULL)); GGML_ASSERT(base_align_in_bits % 8u == 0); backend_ctx->alignment = base_align_in_bits / 8u; GGML_LOG_INFO("ggml_opencl: mem base addr align: %u\n", backend_ctx->alignment); clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &backend_ctx->max_alloc_size, NULL); GGML_LOG_INFO("ggml_opencl: max mem alloc size: %zu MB\n", backend_ctx->max_alloc_size/1024/1024); clGetDeviceInfo(device, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), &backend_ctx->max_workgroup_size, NULL); GGML_LOG_INFO("ggml_opencl: device max workgroup size: %lu\n", backend_ctx->max_workgroup_size); // Check SVM. cl_device_svm_capabilities svm_caps; CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_SVM_CAPABILITIES, sizeof(cl_device_svm_capabilities), &svm_caps, 0)); GGML_LOG_INFO("ggml_opencl: SVM coarse grain buffer support: %s\n", svm_caps & CL_DEVICE_SVM_COARSE_GRAIN_BUFFER ? "true" : "false"); GGML_LOG_INFO("ggml_opencl: SVM fine grain buffer support: %s\n", svm_caps & CL_DEVICE_SVM_FINE_GRAIN_BUFFER ? "true" : "false"); GGML_LOG_INFO("ggml_opencl: SVM fine grain system support: %s\n", svm_caps & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM ? "true" : "false"); GGML_LOG_INFO("ggml_opencl: SVM atomics support: %s\n", svm_caps & CL_DEVICE_SVM_ATOMICS ? "true" : "false"); if (opencl_c_version.major >= 3) { // Assume it is not available for 3.0, since it is optional in 3.0. // If compiling against 3.0, then we can query. backend_ctx->non_uniform_workgroups = false; #if CL_TARGET_OPENCL_VERSION >= 300 CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_NON_UNIFORM_WORK_GROUP_SUPPORT, sizeof(cl_bool), &backend_ctx->non_uniform_workgroups, 0)); #endif } else { GGML_ASSERT(opencl_c_version.major == 2); // Non-uniform workgroup sizes is mandatory feature in v2.x. backend_ctx->non_uniform_workgroups = true; } // Print out configurations #ifdef GGML_OPENCL_SOA_Q GGML_LOG_INFO("ggml_opencl: flattening quantized weights representation as struct of arrays (GGML_OPENCL_SOA_Q)\n"); #endif // GGML_OPENCL_SOA_Q #ifdef GGML_OPENCL_USE_ADRENO_KERNELS GGML_LOG_INFO("ggml_opencl: using kernels optimized for Adreno (GGML_OPENCL_USE_ADRENO_KERNELS)\n"); #endif // GGML_OPENCL_USE_ADRENO_KERNELS cl_int err; // A local ref of cl_context for convenience cl_context context = backend_ctx->context = dev_ctx->context; //CL_CHECK((queue = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err), // (err != CL_INVALID_QUEUE_PROPERTIES && err != CL_INVALID_VALUE ? err : // (queue = clCreateCommandQueue(context, device, 0, &err), err) //))); cl_command_queue_properties command_queue_props = 0; #ifdef GGML_OPENCL_PROFILING command_queue_props |= CL_QUEUE_PROFILING_ENABLE; #endif CL_CHECK((backend_ctx->queue = clCreateCommandQueue(context, device, command_queue_props, &err), err)); // Load kernels load_cl_kernels(backend_ctx.get(), opencl_c_version); #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // Allocate intermediate buffers and images size_t required_A_q_d_bytes = 311164928; size_t required_A_s_d_bytes = 38895616; size_t required_B_d_bytes = 45088768; // Ensure buffer sizes do not exceed the maximum allocation size size_t max_A_q_d_bytes = MIN(required_A_q_d_bytes, backend_ctx->max_alloc_size); size_t max_A_s_d_bytes = MIN(required_A_s_d_bytes, backend_ctx->max_alloc_size); size_t max_B_d_bytes = MIN(required_B_d_bytes, backend_ctx->max_alloc_size); if (required_A_q_d_bytes > backend_ctx->max_alloc_size) { GGML_LOG_WARN("ggml_opencl: A_q_d buffer size reduced from %zu to %zu due to device limitations.\n", required_A_q_d_bytes, max_A_q_d_bytes); } if (required_A_s_d_bytes > backend_ctx->max_alloc_size) { GGML_LOG_WARN("ggml_opencl: A_s_d buffer size reduced from %zu to %zu due to device limitations.\n", required_A_s_d_bytes, max_A_s_d_bytes); } if (required_B_d_bytes > backend_ctx->max_alloc_size) { GGML_LOG_WARN("ggml_opencl: B_d buffer size reduced from %zu to %zu due to device limitations.\n", required_B_d_bytes, max_B_d_bytes); } backend_ctx->prealloc_quant_trans.allocate(context, max_A_q_d_bytes); backend_ctx->prealloc_scales_trans.allocate(context, max_A_s_d_bytes); backend_ctx->prealloc_act_trans.allocate(context, max_B_d_bytes); #endif // GGML_OPENCL_USE_ADRENO_KERNELS backend_ctx->disable_fusion = getenv("GGML_OPENCL_DISABLE_FUSION") != nullptr; dev_ctx->backend_ctx = backend_ctx.release(); return dev_ctx->backend_ctx; } static void ggml_cl2_free(ggml_backend_t backend) { ggml_backend_opencl_context * ctx = (ggml_backend_opencl_context *) backend->context; ctx->free(); // The CL context is shared by all backends, release it if all backends have been released bool should_release_opencl = true; for (auto device : g_ggml_backend_opencl_devices) { ggml_backend_opencl_device_context * ctx_dev = (ggml_backend_opencl_device_context *) device.context; if (ctx_dev->backend_ctx->ref_count > 0) { should_release_opencl = false; } } if (should_release_opencl) { CL_CHECK(clReleaseContext(ctx->context)); } } //------------------------------------------------------------------------------ // Tensor extra management //------------------------------------------------------------------------------ struct ggml_tensor_extra_cl { // The buffer object that holds the data. cl_mem data_device; // The offset into the buffer object. This is primarily for scratch buffer // and view operation. // NB: this offset no longer includes view offset (view_offs). Whenever this // offset is used, view_offs should be considered. cl_ulong offset; // The actual size of the cl_mem object. This is needed when returning the // block to the pool. size_t actual_size; void reset() { data_device = nullptr; offset = 0; actual_size = 0; } }; // Additional tensor extra structs for quantized tensors. // These tensors are loaded from files and should not be allocated in scratch -- // they should always be allocated from the pool. Hence, they do not have an // `offset`, which indicate their locations in the scratch buffer. struct ggml_tensor_extra_cl_q4_0 { // Quantized values. cl_mem q = nullptr; // Quantized values in image1d_buffer_t. cl_mem q_img = nullptr; // Scales. cl_mem d = nullptr; // Scales in image1d_buffer_t. cl_mem d_img = nullptr; // Size of quantized values. size_t size_q = 0; // Size of scales. size_t size_d = 0; ~ggml_tensor_extra_cl_q4_0() { reset(); } void reset() { // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer. // They must be properly released so that the original buffer can be // properly released to avoid memory leak. if (q != nullptr) { CL_CHECK(clReleaseMemObject(q)); q = nullptr; } if (d != nullptr) { CL_CHECK(clReleaseMemObject(d)); d = nullptr; } // Currently, q_img and d_img are only initialized when SMALL_ALLOC is // enabled. They point to the images in ggml_backend_opencl_buffer_context. // So, there is no need to release them here. // TODO: initialize them for non SMALL_PATH path, or remove them. q_img = nullptr; d_img = nullptr; size_q = 0; size_d = 0; } }; struct ggml_tensor_extra_cl_mxfp4 { // Quantized values. cl_mem q = nullptr; // Quantized values in image1d_buffer_t. cl_mem q_img = nullptr; // Scales in E8M0. cl_mem e = nullptr; // Scales in image1d_buffer_t. cl_mem e_img = nullptr; // Size of quantized values. size_t size_q = 0; // Size of scales. size_t size_e = 0; ~ggml_tensor_extra_cl_mxfp4() { reset(); } void reset() { // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer. // They must be properly released so that the original buffer can be // properly released to avoid memory leak. if (q != nullptr) { CL_CHECK(clReleaseMemObject(q)); q = nullptr; } if (e != nullptr) { CL_CHECK(clReleaseMemObject(e)); e = nullptr; } if (q != nullptr) { CL_CHECK(clReleaseMemObject(q_img)); q = nullptr; } // Currently, q_img and d_img are not used. They can be image1d_buffer_t // that wraps around q and d to utilize image access path. q_img = nullptr; e_img = nullptr; size_q = 0; size_e = 0; } }; struct ggml_tensor_extra_cl_q8_0 { cl_mem q = nullptr; cl_mem q_img = nullptr; cl_mem d = nullptr; cl_mem d_img = nullptr; size_t size_q = 0; size_t size_d = 0; ~ggml_tensor_extra_cl_q8_0() { reset(); } void reset() { // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer. // They must be properly released so that the original buffer can be // properly released to avoid memory leak. if (q != nullptr) { CL_CHECK(clReleaseMemObject(q)); q = nullptr; } if (d != nullptr) { CL_CHECK(clReleaseMemObject(d)); d = nullptr; } // Currently, q_img and d_img are not used. They can be image1d_buffer_t // that wraps around q and d to utilize image access path. q_img = nullptr; d_img = nullptr; size_q = 0; size_d = 0; } }; //------------------------------------------------------------------------------ // Backend API //------------------------------------------------------------------------------ // // backend // static const char * ggml_backend_opencl_name(ggml_backend_t backend) { return "OpenCL"; UNUSED(backend); } static void ggml_backend_opencl_free(ggml_backend_t backend) { ggml_cl2_free(backend); } static void ggml_backend_opencl_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_UNUSED(backend); GGML_UNUSED(tensor); GGML_UNUSED(data); GGML_UNUSED(offset); GGML_UNUSED(size); } static void ggml_backend_opencl_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_UNUSED(backend); GGML_UNUSED(tensor); GGML_UNUSED(data); GGML_UNUSED(offset); GGML_UNUSED(size); } static bool ggml_backend_opencl_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) { GGML_UNUSED(backend); GGML_UNUSED(src); GGML_UNUSED(dst); return false; } static void ggml_backend_opencl_synchronize(ggml_backend_t backend) { auto * backend_ctx = static_cast(backend->context); cl_event evt; CL_CHECK(clEnqueueBarrierWithWaitList(backend_ctx->queue, 0, nullptr, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clReleaseEvent(evt)); } // Syncronizes the 'backend_ctx's device with others so that commands // enqueued to it won't start until commands in the other devices have // completed. static void sync_with_other_backends(ggml_backend_opencl_context * backend_ctx) { if (g_ggml_backend_opencl_devices.size() < 2) return; // No other devices to synchronize with. std::vector events; events.reserve(g_ggml_backend_opencl_devices.size()); for (ggml_backend_device & backend_dev : g_ggml_backend_opencl_devices) { auto * other_backend_ctx = ggml_cl2_init(&backend_dev); if (backend_ctx != other_backend_ctx) { cl_event ev; CL_CHECK(clEnqueueMarkerWithWaitList(other_backend_ctx->queue, 0, nullptr, &ev)); CL_CHECK(clFlush(other_backend_ctx->queue)); events.push_back(ev); } } CL_CHECK(clEnqueueBarrierWithWaitList(backend_ctx->queue, events.size(), events.data(), nullptr)); for (auto ev : events) { CL_CHECK(clReleaseEvent(ev)); } } static void sync_with_other_backends(ggml_backend_t backend) { auto * backend_ctx = static_cast(backend->context); sync_with_other_backends(backend_ctx); } static bool ggml_opencl_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops) { if (!ggml_can_fuse(cgraph, node_idx, ops)) { return false; } if (ops.size() == 2 && ops.begin()[0] == GGML_OP_RMS_NORM && ops.begin()[1] == GGML_OP_MUL) { const ggml_tensor *rms_norm = cgraph->nodes[node_idx]; const ggml_tensor *mul = cgraph->nodes[node_idx+1]; GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(rms_norm->type == GGML_TYPE_F32); // rms_norm only supports f32 if (mul->src[0]->type != GGML_TYPE_F32 || mul->src[1]->type != GGML_TYPE_F32 || mul->type != GGML_TYPE_F32) { return false; } // if rms_norm is the B operand, then we don't handle broadcast if (rms_norm == mul->src[1] && !ggml_are_same_shape(mul->src[0], rms_norm)) { return false; } // rms_norm assumes contiguous rows if (!ggml_is_contiguous_rows(mul->src[0]) || !ggml_is_contiguous_rows(mul->src[1])) { return false; } } else if (ops.size() == 3 && ops.begin()[0] == GGML_OP_NORM && ops.begin()[1] == GGML_OP_MUL && ops.begin()[2] == GGML_OP_ADD) { const ggml_tensor *norm = cgraph->nodes[node_idx]; const ggml_tensor *mul = cgraph->nodes[node_idx+1]; const ggml_tensor *add = cgraph->nodes[node_idx+2]; const ggml_tensor *w = mul->src[0] == norm ? mul->src[1] : mul->src[0]; const ggml_tensor *b = add->src[0] == mul ? add->src[1] : add->src[0]; // norm fusion only supports F32 if (norm->src[0]->type != GGML_TYPE_F32 || w->type != GGML_TYPE_F32 || b->type != GGML_TYPE_F32) { return false; } if (norm->src[0]->ne[0] % 4 != 0) { return false; } if (!ggml_is_contiguous(norm->src[0]) || !ggml_is_contiguous(w) || !ggml_is_contiguous(b)) { return false; } } else if (ops.size() == 3 && ops.begin()[0] == GGML_OP_GROUP_NORM && ops.begin()[1] == GGML_OP_MUL && ops.begin()[2] == GGML_OP_ADD) { const ggml_tensor *gn = cgraph->nodes[node_idx]; const ggml_tensor *mul = cgraph->nodes[node_idx+1]; const ggml_tensor *add = cgraph->nodes[node_idx+2]; const ggml_tensor *w = mul->src[0] == gn ? mul->src[1] : mul->src[0]; const ggml_tensor *b = add->src[0] == mul ? add->src[1] : add->src[0]; if (gn->src[0]->type != GGML_TYPE_F32 || w->type != GGML_TYPE_F32 || b->type != GGML_TYPE_F32) { return false; } if (!ggml_is_contiguous(gn->src[0]) || !ggml_is_contiguous(w) || !ggml_is_contiguous(b)) { return false; } } return true; } static void ggml_opencl_op_rms_norm_fused(ggml_backend_t backend, ggml_tensor * rms_norm_tensor, ggml_tensor * mul_tensor); static void ggml_opencl_op_norm_fused(ggml_backend_t backend, ggml_tensor * norm_tensor, ggml_tensor * mul_tensor, ggml_tensor * add_tensor); static void ggml_opencl_op_group_norm_fused(ggml_backend_t backend, ggml_tensor * gn_tensor, ggml_tensor * mul_tensor, ggml_tensor * add_tensor); static ggml_status ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; // NOTE: this may oversynchronize by synchronizing with // backends/devices which don't compute 'cgraph's // dependencies. sync_with_other_backends(backend); if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } if (!backend_ctx->disable_fusion && ggml_opencl_can_fuse(cgraph, i, { GGML_OP_NORM, GGML_OP_MUL, GGML_OP_ADD })) { ggml_opencl_op_norm_fused(backend, node, cgraph->nodes[i+1], cgraph->nodes[i+2]); i += 2; continue; } if (!backend_ctx->disable_fusion && ggml_opencl_can_fuse(cgraph, i, { GGML_OP_GROUP_NORM, GGML_OP_MUL, GGML_OP_ADD })) { ggml_opencl_op_group_norm_fused(backend, node, cgraph->nodes[i+1], cgraph->nodes[i+2]); i += 2; continue; } if (!backend_ctx->disable_fusion && ggml_opencl_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) { ggml_opencl_op_rms_norm_fused(backend, node, cgraph->nodes[i+1]); i++; continue; } bool ok = ggml_cl_compute_forward(backend, node); if (!ok) { GGML_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); } GGML_ASSERT(ok); } return GGML_STATUS_SUCCESS; } static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { ggml_backend_opencl_device_context * dev_ctx = (ggml_backend_opencl_device_context *)dev->context; ggml_backend_opencl_context * backend_ctx = dev_ctx->backend_ctx; switch (op->op) { case GGML_OP_NONE: return true; case GGML_OP_GET_ROWS: switch (op->src[0]->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: return true; case GGML_TYPE_Q4_0: #ifdef GGML_OPENCL_SOA_Q // We do not support flattened Q4_0 (and possibly other Q's) return false; #else // GGML_OPENCL_SOA_Q return true; #endif // GGML_OPENCL_SOA_Q default: return false; } case GGML_OP_SET_ROWS: { // TODO: add support // ref: https://github.com/ggml-org/llama.cpp/pull/14274 #pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)") if (op->src[0]->type != GGML_TYPE_F32) { return false; } switch (op->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return (op->src[1]->type == GGML_TYPE_I64 || op->src[1]->type == GGML_TYPE_I32); default: return false; } } case GGML_OP_CPY: case GGML_OP_DUP: case GGML_OP_CONT: switch (op->src[0]->type) { case GGML_TYPE_F32: switch (op->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return true; default: return false; } case GGML_TYPE_F16: switch (op->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: return true; default: return false; } default: return false; } case GGML_OP_SCALE: return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]); case GGML_OP_ADD: if (op->type == GGML_TYPE_F16) { const bool src0_ok = op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32; const bool src1_ok = op->src[1]->type == GGML_TYPE_F16 || op->src[1]->type == GGML_TYPE_F32; if (src0_ok && src1_ok) { return true; } } case GGML_OP_MUL: case GGML_OP_DIV: case GGML_OP_SUB: return (op->src[0]->type == op->src[1]->type) && (op->src[0]->type == op->type) && (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16); case GGML_OP_ADD_ID: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SQR: case GGML_OP_SQRT: return (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) && ggml_is_contiguous(op->src[0]); case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; case GGML_UNARY_OP_SIGMOID: return ggml_is_contiguous(op->src[0]); case GGML_UNARY_OP_TANH: return (op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32) || (op->src[0]->type == GGML_TYPE_F16 && op->type == GGML_TYPE_F16); default: return false; } case GGML_OP_GLU: switch (ggml_get_glu_op(op)) { case GGML_GLU_OP_GEGLU: case GGML_GLU_OP_REGLU: case GGML_GLU_OP_SWIGLU: case GGML_GLU_OP_SWIGLU_OAI: case GGML_GLU_OP_GEGLU_ERF: case GGML_GLU_OP_GEGLU_QUICK: return ggml_is_contiguous_1(op->src[0]) && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16); default: return false; } case GGML_OP_CLAMP: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SOFT_MAX: case GGML_OP_NORM: return true; case GGML_OP_RMS_NORM: return op->ne[0] % 4 == 0 && ggml_is_contiguous_rows(op->src[0]); case GGML_OP_REPEAT: return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; // Assuming F32 for now, can be expanded case GGML_OP_PAD: // TODO: add circular padding support for opencl, see https://github.com/ggml-org/llama.cpp/pull/16985 if (ggml_get_op_params_i32(op, 8) != 0) { return false; } return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; case GGML_OP_UPSCALE: { ggml_scale_mode mode = (ggml_scale_mode)(ggml_get_op_params_i32(op, 0) & 0xFF); const bool antialias = (ggml_scale_mode)(ggml_get_op_params_i32(op, 0) & GGML_SCALE_FLAG_ANTIALIAS); return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32 && (mode == GGML_SCALE_MODE_NEAREST || mode == GGML_SCALE_MODE_BILINEAR) && !antialias; } case GGML_OP_CONV_2D: return (op->src[0]->type == GGML_TYPE_F16 && op->src[1]->type == GGML_TYPE_F16 && op->type == GGML_TYPE_F16) || (op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32) || (op->src[0]->type == GGML_TYPE_F16 && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32); case GGML_OP_SSM_CONV: return (op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32); case GGML_OP_CONCAT: return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; case GGML_OP_TIMESTEP_EMBEDDING: return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; case GGML_OP_GROUP_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_MUL_MAT: if (op->src[0]->type == GGML_TYPE_F16) { return true; } else if (op->src[0]->type == GGML_TYPE_F32) { return op->src[1]->type == GGML_TYPE_F32; } else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 || op->src[0]->type == GGML_TYPE_Q6_K) { return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); } else if (op->src[0]->type == GGML_TYPE_Q8_0) { return op->src[1]->type == GGML_TYPE_F32; } return false; case GGML_OP_MUL_MAT_ID: if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_Q8_0 || op->src[0]->type == GGML_TYPE_MXFP4) { if (op->src[1]->type == GGML_TYPE_F32) { return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); } } return false; case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: return true; case GGML_OP_DIAG_MASK_INF: return op->ne[3] == 1; case GGML_OP_ROPE: { const int mode = ((const int32_t *) op->op_params)[2]; const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; if (is_mrope && !is_vision) { if (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) { return true; } return false; } if (is_vision) { if (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) { return true; } return false; } return true; } case GGML_OP_IM2COL: return true; case GGML_OP_ARGSORT: { cl_kernel kernel = backend_ctx->kernel_argsort_f32_i32; int max_workgroup_size = backend_ctx->get_kernel_workgroup_size(kernel); int cols = 1; while (cols < op->ne[0]) { cols *= 2; } return cols <= max_workgroup_size && op->src[0]->type == GGML_TYPE_F32; } case GGML_OP_SUM_ROWS: case GGML_OP_MEAN: return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]); case GGML_OP_FLASH_ATTN_EXT: { const ggml_tensor * q = op->src[0]; const ggml_tensor * k = op->src[1]; const ggml_tensor * v = op->src[2]; const int dk = q->ne[0]; const int dv = v->ne[0]; const struct { int dk; int dv; } supported_dims[] = { { 40, 40}, { 64, 64}, { 80, 80}, { 96, 96}, {112, 112}, {128, 128}, {192, 128}, {192, 192}, {256, 256}, }; bool dims_supported = false; for (size_t i = 0; i < sizeof(supported_dims)/sizeof(supported_dims[0]); ++i) { if (supported_dims[i].dk == dk && supported_dims[i].dv == dv) { dims_supported = true; break; } } if (!dims_supported) { return false; } const bool is_f32_f32 = q->type == GGML_TYPE_F32 && k->type == GGML_TYPE_F32 && v->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; const bool is_f16_f16 = q->type == GGML_TYPE_F16 && k->type == GGML_TYPE_F16 && v->type == GGML_TYPE_F16 && op->type == GGML_TYPE_F16; const bool is_f32_f16 = q->type == GGML_TYPE_F32 && k->type == GGML_TYPE_F16 && v->type == GGML_TYPE_F16 && op->type == GGML_TYPE_F32; return is_f32_f32 || is_f16_f16 || is_f32_f16; } default: return false; } } // Forward declaration - implementation appears later in the file. static const char * ggml_backend_opencl_buffer_type_get_name(ggml_backend_buffer_type_t buffer_type); static ggml_guid_t ggml_backend_opencl_guid() { static ggml_guid guid = { 0xde, 0xe0, 0x70, 0xa2, 0x73, 0x4e, 0x4d, 0xbc, 0xb0, 0xc7, 0x4f, 0xd4, 0x6d, 0x4e, 0x90, 0xfe }; return &guid; } static ggml_backend_i ggml_backend_opencl_i = { /* .get_name = */ ggml_backend_opencl_name, /* .free = */ ggml_backend_opencl_free, /* .set_tensor_async = */ NULL, /* ggml_backend_opencl_set_tensor_async */ /* .get_tensor_async = */ NULL, /* ggml_backend_opencl_get_tensor_async */ /* .cpy_tensor_async = */ NULL, /* ggml_backend_opencl_cpy_tensor_async */ /* .synchronize = */ ggml_backend_opencl_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_opencl_graph_compute, /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ NULL, }; ggml_backend_t ggml_backend_opencl_init(void) { ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_opencl_reg(), 0); ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(dev); ggml_backend_t backend = new ggml_backend { /* .guid = */ ggml_backend_opencl_guid(), /* .iface = */ ggml_backend_opencl_i, /* .device = */ dev, /* .context = */ backend_ctx }; return backend; } bool ggml_backend_is_opencl(ggml_backend_t backend) { return backend && backend->iface.get_name == ggml_backend_opencl_name; } // // buffer // struct ggml_backend_opencl_buffer_context { // A buffer context can hold multiple cl_mem objects. This is for flattening // quantized weights and should be used with GGML_OPENCL_SMALL_ALLOC where // each tensor is allocated a separate buffer. When flattening is enabled // with small allocation, each tensor is backed by two cl_mem objects (for // quants and scales) packed into a backend_opencl_buffer. ggml_backend_opencl_buffer_context(cl_mem buf) : name("OpenCL") { buffer.push_back(buf); } ~ggml_backend_opencl_buffer_context() { for (cl_mem buf : buffer) { CL_CHECK(clReleaseMemObject(buf)); } for (cl_mem im : img) { CL_CHECK(clReleaseMemObject(im)); } // Delete all extras to trigger their destructors for (ggml_tensor_extra_cl * e : temp_tensor_extras) { delete e; } for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) { delete e; } for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0) { delete e; } for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0_in_use) { delete e; } for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4) { delete e; } for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) { delete e; } for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0) { delete e; } for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) { delete e; } } ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() { ggml_tensor_extra_cl * extra; if (temp_tensor_extras.empty()) { extra = new ggml_tensor_extra_cl(); } else { extra = temp_tensor_extras.back(); temp_tensor_extras.pop_back(); } temp_tensor_extras_in_use.push_back(extra); extra->reset(); return extra; } ggml_tensor_extra_cl_q4_0 * ggml_opencl_alloc_temp_tensor_extra_q4_0() { ggml_tensor_extra_cl_q4_0 * extra; if (temp_tensor_extras_q4_0.empty()) { extra = new ggml_tensor_extra_cl_q4_0(); } else { extra = temp_tensor_extras_q4_0.back(); temp_tensor_extras_q4_0.pop_back(); } temp_tensor_extras_q4_0_in_use.push_back(extra); extra->reset(); return extra; } ggml_tensor_extra_cl_mxfp4 * ggml_opencl_alloc_temp_tensor_extra_mxfp4() { ggml_tensor_extra_cl_mxfp4 * extra; if (temp_tensor_extras_mxfp4.empty()) { extra = new ggml_tensor_extra_cl_mxfp4(); } else { extra = temp_tensor_extras_mxfp4.back(); temp_tensor_extras_mxfp4.pop_back(); } temp_tensor_extras_mxfp4_in_use.push_back(extra); extra->reset(); return extra; } ggml_tensor_extra_cl_q8_0 * ggml_opencl_alloc_temp_tensor_extra_q8_0() { ggml_tensor_extra_cl_q8_0 * extra; if (temp_tensor_extras_q8_0.empty()) { extra = new ggml_tensor_extra_cl_q8_0(); } else { extra = temp_tensor_extras_q8_0.back(); temp_tensor_extras_q8_0.pop_back(); } temp_tensor_extras_q8_0_in_use.push_back(extra); extra->reset(); return extra; } void reset() { for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) { temp_tensor_extras.push_back(e); } temp_tensor_extras_in_use.clear(); for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0_in_use) { temp_tensor_extras_q4_0.push_back(e); } temp_tensor_extras_q4_0_in_use.clear(); for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) { temp_tensor_extras_mxfp4.push_back(e); } temp_tensor_extras_mxfp4_in_use.clear(); for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) { temp_tensor_extras_q8_0.push_back(e); } temp_tensor_extras_q8_0_in_use.clear(); } // Pools for extras. Available extras are in `temp_tensor_extras`. Extras // being used are in `temp_tensor_extras_in_use`. At the first run, new // extras get created and put in `in_use`. When the buffer is reset via // the `reset` callback, all extras in `in_use` get moved to available extras // for reuse. std::vector temp_tensor_extras; std::vector temp_tensor_extras_in_use; std::vector temp_tensor_extras_q4_0; std::vector temp_tensor_extras_q4_0_in_use; std::vector temp_tensor_extras_mxfp4; std::vector temp_tensor_extras_mxfp4_in_use; std::vector temp_tensor_extras_q8_0; std::vector temp_tensor_extras_q8_0_in_use; // The buffer_context is initially created by ggml_backend_buft_alloc_buffer // before any tensor is initialized (at the beginning of alloc_tensor_range). // Hence, there is alway a buffer object in this vector. When each tensor is // being initialized, this original buffer object will be released if both // flattening and small allocation are enabled, and additional buffer // objects will be created in init_tensor to represent flattened quantized // weights. std::vector buffer; // These are image1d_buffer_t objects that wrap around the quants and scales. // For Q4_0 quantization, there should be two of them - one for quants and // one for scales. They should be populated only when flattening and small // allocation are enabled. std::vector img; std::string name; }; static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; delete ctx; } static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) { ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(buffer->buft->device); return (void *) (uintptr_t) backend_ctx->alignment; } static enum ggml_status ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; ggml_cl2_init(buffer->buft->device); if (tensor->view_src != nullptr) { GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft); ggml_tensor_extra_cl * view_extra = (ggml_tensor_extra_cl *) tensor->view_src->extra; GGML_ASSERT(view_extra && "view_extra is nullptr?"); // Reuse extra of the parent tensor. The offset of this view tensor // becomes `extra->offset + view_offs` and needs to be calculated when // it is used. This changes is needed because of the change to // ggml_alloc.c in https://github.com/ggerganov/llama.cpp/pull/7640. // `buffer` passed in here will always be `tensor->buffer`. It is OK // to allocate extras from the same buffer context for ordinary // intermediate tensors. But for views into kv cache tensors, doing so // would mess up the extras used by kv cache. // Before #7640, `buffer` is for intermediate tensors, which is always // different from that of kv cache tensors. // // NB: now extra->offset no longer accounts for view_offs. // NB: this should not apply to weight tensors (for end-to-end runs, but // may apply for test-backend-ops). // FIXME: if any unexpected results are seen, double check the offset - // there could be other places that need fix. tensor->extra = view_extra; } else { { size_t offset = (char *) tensor->data - (char *) ggml_backend_opencl_buffer_get_base(buffer); ggml_tensor_extra_cl * extra = ctx->ggml_opencl_alloc_temp_tensor_extra(); extra->offset = offset; extra->data_device = ctx->buffer[0]; extra->actual_size = ggml_nbytes(tensor); tensor->extra = extra; } } return GGML_STATUS_SUCCESS; } // The optimized gemm and gemv kernels are used for large matrices without batch. // tensor is the quantized weights matrix. inline bool use_adreno_kernels(const ggml_backend_opencl_context *backend_ctx, const ggml_tensor *tensor) { int64_t threshold_ne0 = 512; int64_t threshold_ne1 = 512; if (!backend_ctx->adreno_cl_compiler_version.newer_than_or_same(E031, 38, 11, 0) && backend_ctx->adreno_cl_compiler_version.type != DX) { threshold_ne0 = 128; threshold_ne1 = 128; } return tensor->ne[0] >= threshold_ne0 && tensor->ne[1] >= threshold_ne1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; } inline bool use_adreno_moe_kernels(const ggml_backend_opencl_context *backend_ctx, const ggml_tensor *tensor) { GGML_UNUSED(backend_ctx); int ne01 = tensor->ne[1]; return ((strstr(tensor->name, "ffn") != NULL) || (strstr(tensor->name, "as") != NULL)) && (ne01 % 64 == 0); } static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer->buft->device); cl_context context = backend_ctx->context; cl_command_queue queue = backend_ctx->queue; #ifdef GGML_OPENCL_SOA_Q // We separate the quantized bits and scale from block_q4_0 by using an // additional kernel, where each thread handles a block. We first read the // original weights into a temporary buffer, then create two separate // buffers for quantized bits and scales, which are then populated by the // conversion kernel. if (tensor->type == GGML_TYPE_Q4_0) { // Tensors should have been preallocated, therefore they should // already have ggml_tensor_extra_cl as extra. ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra; GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized"); // Allocate the new extra and create aliases from the original. ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; ggml_tensor_extra_cl_q4_0 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q4_0(); size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t); size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2; GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size"); cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); CL_CHECK(clEnqueueWriteBuffer( queue, data_device, CL_TRUE, 0, ggml_nbytes(tensor), data, 0, NULL, NULL)); // We consider the specified offset arg as always, although For weights // the offset arg should be 0 (we do not assert this). //GGML_ASSERT(offset == 0); // We create subbuffers from the original tensor buffer for scales and // quants - i.e., scales and quants are aliases into the buffer obejct // that backs the original tensor. This is a cleaner way to adapt to the // new memory management. // In the old code, we allocate new buffers for scales and quants // respectively, which could still be done but would result in double // allocation; properly deallocating the preallocated buffer that backs // the tensors is tricky and would leak the backend specific information // into the general backend code. // Does this create misaligned subbuffers (alignment is 1024) in certain // cases ? cl_buffer_region region; // The original tensor memory is divided into scales and quants, i.e., // we first store scales, then quants. // Create subbuffer for scales. region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment); region.size = size_d; extra->d = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); auto previous_origin = region.origin; // Create subbuffer for quants. region.origin = align_to(previous_origin + size_d, backend_ctx->alignment); region.size = size_q; extra->q = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); //cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; #ifdef GGML_OPENCL_USE_ADRENO_KERNELS cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; // The optimized kernels need weights in natural order, so unshuffle. if (use_adreno_kernels(backend_ctx, tensor)) { kernel = backend_ctx->kernel_convert_block_q4_0_noshuffle; } #else cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; #endif // GGML_OPENCL_USE_ADRENO_KERNELS CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d)); size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {64, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clReleaseMemObject(data_device)); tensor->extra = extra; // transpose the weights and scales #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // Only do transpose for large, non batched matrix // TODO: use preallocated images instead of sub-buffer then image if (use_adreno_kernels(backend_ctx, tensor)) { // <----------------------------------------------------------------------------------> // // start transpose // <----------------------------------------------------------------------------------> // int M = tensor->ne[1]; // ne01 int K = tensor->ne[0]; // ne00 //For matrix-vector multiplication kernel, we assume K is a multiple of 32 GGML_ASSERT(K % 32 == 0); //For transpose kernels, we assume K is a multiple of 4 (satisfied by prior assert), and M is a multiple of 4 GGML_ASSERT(M % 4 == 0); // transpose is out of place, so we need to allocate transposed buffers // <----------------------------------------------------------------------------------> // // use sub_buffer of max buffer size instead size_t q_size_bytes = K * M / 8 * sizeof(float); backend_ctx->prealloc_quant_trans.allocate(context, q_size_bytes); cl_buffer_region region; region.origin = 0; region.size = q_size_bytes; cl_mem qT_d = clCreateSubBuffer( backend_ctx->prealloc_quant_trans.buffer, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); bool K_tile_trans = true; if ((K / 32) % 4 != 0){ K_tile_trans =false; } size_t d_size_bytes = M * (K / 32) * 2; backend_ctx->prealloc_scales_trans.allocate(context, d_size_bytes); region.origin = 0; region.size = d_size_bytes; cl_mem dT_d = clCreateSubBuffer( backend_ctx->prealloc_scales_trans.buffer, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); // <----------------------------------------------------------------------------------> // // create images from the buffers // <----------------------------------------------------------------------------------> // cl_mem q_d_image1D; cl_mem d_d_image1D; cl_mem qT_d_image1D; cl_mem dT_d_image1D; cl_image_format img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; cl_image_desc img_desc_1d; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.image_width = M * K / 4 / 4; img_desc_1d.buffer = extra->q; q_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.image_width = M * K / 4 / 4; img_desc_1d.buffer = qT_d; qT_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); memset(&img_desc_1d, 0, sizeof(img_desc_1d)); if (K_tile_trans) { img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; img_desc_1d.image_width = M * K / 32 / 4; } else { img_fmt_1d = { CL_R, CL_HALF_FLOAT }; img_desc_1d.image_width = M * K / 32; } img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.buffer = extra->d; d_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.image_width = M * K / 32 / 4; img_desc_1d.buffer = dT_d; dT_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); // <----------------------------------------------------------------------------------> // // set up and call the transpose kernels // <----------------------------------------------------------------------------------> // // weights int height_q = M / 4; int width_q = K / 4 / 4; kernel = backend_ctx->kernel_transpose_16; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &qT_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_q)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_q)); size_t local_size_q[3] = {4, 16, 1}; size_t global_size_q[3] = {static_cast(width_q), static_cast(height_q), 1}; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_q, local_size_q, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); // scales int height_s = M / 4; int width_s = K / 32 / 4; kernel = backend_ctx->kernel_transpose_16; if (!K_tile_trans) { kernel = backend_ctx->kernel_transpose_16_4x1; width_s = K / 32; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &dT_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_s)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_s)); size_t local_size_s[3] = {4, 16, 1}; size_t global_size_s[3] = {static_cast(width_s), static_cast(height_s), 1}; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_s, local_size_s, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); // <----------------------------------------------------------------------------------> // // copy transposed buffer contents to original buffers // <----------------------------------------------------------------------------------> // // weights CL_CHECK(clEnqueueCopyBuffer(queue, qT_d, extra->q, 0, 0, q_size_bytes, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); // scales CL_CHECK(clEnqueueCopyBuffer(queue, dT_d, extra->d, 0, 0, d_size_bytes, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); // <----------------------------------------------------------------------------------> // // deallocate transpose buffers // <----------------------------------------------------------------------------------> // CL_CHECK(clReleaseMemObject(qT_d)); CL_CHECK(clReleaseMemObject(dT_d)); // deallocate temporary images CL_CHECK(clReleaseMemObject(q_d_image1D)); CL_CHECK(clReleaseMemObject(d_d_image1D)); CL_CHECK(clReleaseMemObject(qT_d_image1D)); CL_CHECK(clReleaseMemObject(dT_d_image1D)); // <----------------------------------------------------------------------------------> // // end transpose // <----------------------------------------------------------------------------------> // } #endif // GGML_OPENCL_USE_ADRENO_KERNELS return; } if (tensor->type == GGML_TYPE_MXFP4) { ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra; GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized"); // Allocate the new extra and create aliases from the original. ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; ggml_tensor_extra_cl_mxfp4 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_mxfp4(); size_t size_e = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(char); size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2; GGML_ASSERT(size_e + size_q == ggml_nbytes(tensor) && "Incorrect tensor size"); cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); CL_CHECK(clEnqueueWriteBuffer( queue, data_device, CL_TRUE, 0, ggml_nbytes(tensor), data, 0, NULL, NULL)); // The original tensor memory is divided into scales and quants, i.e., // we first store scales, then quants. cl_buffer_region region; // Create subbuffer for scales. region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment); region.size = size_e; extra->e = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); auto previous_origin = region.origin; // Create subbuffer for quants. region.origin = align_to(previous_origin + size_e, backend_ctx->alignment); region.size = size_q; extra->q = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); #ifdef GGML_OPENCL_USE_ADRENO_KERNELS if (use_adreno_moe_kernels(backend_ctx, tensor)) { cl_kernel kernel = backend_ctx->kernel_convert_block_mxfp4_trans; int ne00 = tensor->ne[0]; int ne01 = tensor->ne[1]; int ne02 = tensor->ne[2]; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->e)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne01)); size_t global_work_size[3] = {static_cast(((ne01 + 63) / 64) * 64), static_cast(ne00 / 32), static_cast(ne02)}; size_t local_work_size[3] = {64, 2, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clReleaseMemObject(data_device)); tensor->extra = extra; return; } #endif cl_kernel kernel = backend_ctx->kernel_convert_block_mxfp4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->e)); size_t global_work_size[3] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[3] = {64, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clReleaseMemObject(data_device)); // Create image for Q cl_image_format img_format_q = {CL_RG, CL_UNSIGNED_INT32}; cl_image_desc img_desc_q = { CL_MEM_OBJECT_IMAGE1D_BUFFER, static_cast(ggml_nelements(tensor)/32*2), 0, 0, 0, 0, 0, 0, 0, { extra->q } }; extra->q_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_format_q, &img_desc_q, NULL, &err); tensor->extra = extra; return; } if (tensor->type == GGML_TYPE_Q8_0) { ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra; GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized"); // Allocate the new extra and create aliases from the original. ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; ggml_tensor_extra_cl_q8_0 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q8_0(); size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t); size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*(ggml_blck_size(tensor->type)*sizeof(char)); GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size"); cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); CL_CHECK(clEnqueueWriteBuffer( queue, data_device, CL_TRUE, 0, ggml_nbytes(tensor), data, 0, NULL, NULL)); // The original tensor memory is divided into scales and quants, i.e., // we first store scales, then quants. cl_buffer_region region; // Create subbuffer for scales. region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment); region.size = size_d; extra->d = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); auto previous_origin = region.origin; // Create subbuffer for quants. region.origin = align_to(previous_origin + size_d, backend_ctx->alignment); region.size = size_q; extra->q = clCreateSubBuffer( extra_orig->data_device, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); cl_kernel kernel = backend_ctx->kernel_convert_block_q8_0; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d)); size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {64, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clReleaseMemObject(data_device)); tensor->extra = extra; return; } #endif // GGML_OPENCL_SOA_Q ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; GGML_ASSERT(extra); CL_CHECK(clEnqueueWriteBuffer( queue, extra->data_device, CL_TRUE, extra->offset + offset, size, data, 0, NULL, NULL)); GGML_UNUSED(buffer); } static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(tensor->extra); ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer->buft->device); cl_context context = backend_ctx->context; cl_command_queue queue = backend_ctx->queue; // Make sure all previously submitted commands in other devices are finished. sync_with_other_backends(backend_ctx); #ifdef GGML_OPENCL_SOA_Q // In end-to-end runs, get_tensor is usually used to get back the logits, // where we can simply do clEnqueueReadBuffer since they are f32. // However, in test-backend-ops, the GPU graph is copied to the CPU backend, // which requires reading back quantized weight tensors. // To properly support this, we need to restore block_q4_0 struct arrays // from the flattened buffers. if (tensor->type == GGML_TYPE_Q4_0) { ggml_tensor_extra_cl_q4_0 * extra = (ggml_tensor_extra_cl_q4_0 *)tensor->extra; #ifdef GGML_OPENCL_USE_ADRENO_KERNELS if (use_adreno_kernels(backend_ctx, tensor)) { cl_int err; cl_kernel kernel; cl_int M = tensor->ne[1]; // ne01 cl_int K = tensor->ne[0]; // ne00 GGML_ASSERT(K % 32 == 0); GGML_ASSERT(M % 4 == 0); size_t size_q = (ggml_nelements(tensor)/ggml_blck_size(tensor->type))*ggml_blck_size(tensor->type)/2; size_t size_d = (ggml_nelements(tensor)/ggml_blck_size(tensor->type))*sizeof(ggml_fp16_t); GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size"); cl_mem buf_trans_q; cl_mem buf_trans_d; CL_CHECK((buf_trans_q = clCreateBuffer(context, CL_MEM_READ_WRITE, size_q, NULL, &err), err)); CL_CHECK((buf_trans_d = clCreateBuffer(context, CL_MEM_READ_WRITE, size_d, NULL, &err), err)); kernel = backend_ctx->kernel_transpose_16_buf; // transpose q back cl_int stride_k_q = K/4; size_t local_size_q[3] = {64, 1, 1}; size_t global_size_q[3] = {(size_t)M, (size_t)stride_k_q, 1}; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &buf_trans_q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_int), &M)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_int), &stride_k_q)); CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_q, local_size_q, 0, NULL, NULL)); // transpose scales back cl_int stride_k_d = K/32; size_t local_size_d[3] = {64, 1, 1}; size_t global_size_d[3] = {(size_t)M, (size_t)stride_k_d, 1}; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->d)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &buf_trans_d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_int), &M)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_int), &stride_k_d)); CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_d, local_size_d, 0, NULL, NULL)); // unpack cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); cl_uchar mask_0F = 0x0F; cl_uchar mask_F0 = 0xF0; size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {1, 1, 1}; kernel = backend_ctx->kernel_restore_block_q4_0_noshuffle; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &buf_trans_q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &buf_trans_d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_uchar), &mask_0F)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_uchar), &mask_F0)); CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); // read back to host CL_CHECK(clEnqueueReadBuffer( queue, data_device, CL_TRUE, offset, size, data, 0, NULL, NULL)); CL_CHECK(clReleaseMemObject(data_device)); CL_CHECK(clReleaseMemObject(buf_trans_q)); CL_CHECK(clReleaseMemObject(buf_trans_d)); return; } #endif cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); cl_kernel kernel = backend_ctx->kernel_restore_block_q4_0; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {1, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clEnqueueReadBuffer( queue, data_device, CL_TRUE, offset, size, data, 0, NULL, NULL)); CL_CHECK(clReleaseMemObject(data_device)); return; } else if (tensor->type == GGML_TYPE_MXFP4) { ggml_tensor_extra_cl_mxfp4 * extra = (ggml_tensor_extra_cl_mxfp4 *)tensor->extra; cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); #ifdef GGML_OPENCL_USE_ADRENO_KERNELS if (use_adreno_moe_kernels(backend_ctx, tensor)) { cl_kernel kernel = backend_ctx->kernel_restore_block_mxfp4_trans; int ne00 = tensor->ne[0]; int ne01 = tensor->ne[1]; int ne02 = tensor->ne[2]; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->e)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_int), &ne01)); size_t global_work_size[3] = {static_cast(((ne01 + 63) / 64) * 64), static_cast(ne00 / 32), static_cast(ne02)}; size_t local_work_size[3] = {64, 2, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clEnqueueReadBuffer( queue, data_device, CL_TRUE, offset, size, data, 0, NULL, NULL)); CL_CHECK(clReleaseMemObject(data_device)); return; } #endif cl_kernel kernel = backend_ctx->kernel_restore_block_mxfp4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->e)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {1, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clEnqueueReadBuffer( queue, data_device, CL_TRUE, offset, size, data, 0, NULL, NULL)); CL_CHECK(clReleaseMemObject(data_device)); return; } if (tensor->type == GGML_TYPE_Q8_0) { ggml_tensor_extra_cl_q8_0 * extra = (ggml_tensor_extra_cl_q8_0 *)tensor->extra; cl_int err; cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); cl_kernel kernel = backend_ctx->kernel_restore_block_q8_0; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; size_t local_work_size[] = {1, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); CL_CHECK(clWaitForEvents(1, &evt)); CL_CHECK(clEnqueueReadBuffer( queue, data_device, CL_TRUE, offset, size, data, 0, NULL, NULL)); CL_CHECK(clReleaseMemObject(data_device)); return; } #endif // GGML_OPENCL_SOA_Q ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; CL_CHECK(clEnqueueReadBuffer( queue, extra->data_device, CL_TRUE, extra->offset + tensor->view_offs + offset, size, data, 0, NULL, NULL)); GGML_UNUSED(buffer); } static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_backend_dev_t dev = buffer->buft->device; ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(dev); cl_command_queue queue = backend_ctx->queue; ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; for (cl_mem buf : ctx->buffer) { CL_CHECK(clEnqueueFillBuffer(queue, buf, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL)); } CL_CHECK(clFinish(queue)); } static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) { ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; ctx->reset(); } static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = { /* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer, /* .get_base = */ ggml_backend_opencl_buffer_get_base, /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor, /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor, /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_opencl_buffer_clear, /* .reset = */ ggml_backend_opencl_buffer_reset, }; // // buffer type // static const char * ggml_backend_opencl_buffer_type_get_name(ggml_backend_buffer_type_t buffer_type) { return "OpenCL"; GGML_UNUSED(buffer_type); } static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) { ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer_type->device); // clCreateBuffer returns -61 for size 0 size = std::max(size, (size_t)1); cl_int err; cl_mem mem = clCreateBuffer(backend_ctx->context, CL_MEM_READ_WRITE, size, NULL, &err); if (err != CL_SUCCESS) { GGML_LOG_INFO("%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0); return nullptr; } ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context(mem); return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size); } static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) { ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(buffer_type->device); return backend_ctx->alignment; } static size_t ggml_backend_opencl_buffer_type_get_max_size(ggml_backend_buffer_type_t buffer_type) { static size_t max_size = -1; if (max_size == (size_t)-1) { ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(buffer_type->device); max_size = backend_ctx->max_alloc_size; } return max_size; } static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { return ggml_backend_is_opencl(backend); UNUSED(buft); } static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = { /* .get_name = */ ggml_backend_opencl_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_opencl_buffer_type_get_max_size, /* .get_alloc_size = */ NULL, /* .is_host = */ NULL, }; // // backend device // static const char * ggml_backend_opencl_device_get_name(ggml_backend_dev_t dev) { return "GPUOpenCL"; GGML_UNUSED(dev); } static const char * ggml_backend_opencl_device_get_description(ggml_backend_dev_t dev) { ggml_backend_opencl_device_context *dev_ctx = (ggml_backend_opencl_device_context *) dev->context; return dev_ctx->device_name.c_str(); } static void ggml_backend_opencl_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { *free = 1; *total = 1; GGML_UNUSED(dev); } static enum ggml_backend_dev_type ggml_backend_opencl_device_get_type(ggml_backend_dev_t dev) { return GGML_BACKEND_DEVICE_TYPE_GPU; GGML_UNUSED(dev); } static void ggml_backend_opencl_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_opencl_device_get_name(dev); props->description = ggml_backend_opencl_device_get_description(dev); props->type = ggml_backend_opencl_device_get_type(dev); ggml_backend_opencl_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = ggml_backend_dev_caps { /* .async = */ false, /* .host_buffer = */ false, /* .buffer_from_host_ptr = */ false, /* .events = */ false, }; } static ggml_backend_t ggml_backend_opencl_device_init(ggml_backend_dev_t dev, const char * params) { ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(dev); // Getting a new reference to the backend, increase ref_count backend_ctx->ref_count++; ggml_backend_t backend = new ggml_backend { /* .guid = */ ggml_backend_opencl_guid(), /* .interface = */ ggml_backend_opencl_i, /* .device = */ dev, /* .context = */ backend_ctx, }; return backend; GGML_UNUSED(params); } static ggml_backend_buffer_type_t ggml_backend_opencl_device_get_buffer_type(ggml_backend_dev_t dev) { auto * dev_ctx = static_cast(dev->context); dev_ctx->buffer_type = ggml_backend_buffer_type{ /* .iface = */ ggml_backend_opencl_buffer_type_interface, /* .device = */ dev, /* .context = */ nullptr, }; return &dev_ctx->buffer_type; } static ggml_backend_buffer_t ggml_backend_opencl_device_buffer_from_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { GGML_UNUSED(dev); GGML_UNUSED(ptr); GGML_UNUSED(size); GGML_UNUSED(max_tensor_size); return nullptr; } static bool ggml_backend_opencl_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { return ggml_opencl_supports_op(dev, op); } static bool ggml_backend_opencl_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { // Check 'dev' and 'buffer_type' are not objects belonging to this backend. if (dev->iface.get_name != ggml_backend_opencl_device_get_name || buft->iface.get_name != ggml_backend_opencl_buffer_type_get_name) { return false; } // Check cl_context is the same. clEnqueue* commands may not use // buffers from another cl_context. ggml_backend_opencl_context * backend_ctx0 = ggml_cl2_init(dev); ggml_backend_opencl_context * backend_ctx1 = ggml_cl2_init(buft->device); return backend_ctx0->context == backend_ctx1->context; } namespace /* anonymous */ { struct ggml_backend_device_i ggml_backend_opencl_device_i = { /* .get_name = */ ggml_backend_opencl_device_get_name, /* .get_description = */ ggml_backend_opencl_device_get_description, /* .get_memory = */ ggml_backend_opencl_device_get_memory, /* .get_type = */ ggml_backend_opencl_device_get_type, /* .get_props = */ ggml_backend_opencl_device_get_props, /* .init_backend = */ ggml_backend_opencl_device_init, /* .get_buffer_type = */ ggml_backend_opencl_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, /* .buffer_from_host_ptr = */ ggml_backend_opencl_device_buffer_from_ptr, /* .supports_op = */ ggml_backend_opencl_device_supports_op, /* .supports_buft = */ ggml_backend_opencl_device_supports_buft, /* .offload_op = */ NULL, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; } // Backend registry static const char * ggml_backend_opencl_reg_get_name(ggml_backend_reg_t reg) { return "OpenCL"; GGML_UNUSED(reg); } static size_t ggml_backend_opencl_reg_device_count(ggml_backend_reg_t reg) { return g_ggml_backend_opencl_devices.size(); GGML_UNUSED(reg); } static ggml_backend_dev_t ggml_backend_opencl_reg_device_get(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index < ggml_backend_opencl_reg_device_count(reg)); return &g_ggml_backend_opencl_devices[index]; GGML_UNUSED(reg); GGML_UNUSED(index); } static struct ggml_backend_reg_i ggml_backend_opencl_reg_i = { /* .get_name = */ ggml_backend_opencl_reg_get_name, /* .device_count = */ ggml_backend_opencl_reg_device_count, /* .device_get = */ ggml_backend_opencl_reg_device_get, /* .get_proc_address = */ NULL, }; ggml_backend_reg_t ggml_backend_opencl_reg(void) { static std::mutex mutex; static ggml_backend_reg reg; static bool initialized = false; std::lock_guard lock(mutex); if (initialized) { return ® } initialized = true; g_ggml_backend_opencl_devices = ggml_opencl_probe_devices(®); reg = ggml_backend_reg{ /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_opencl_reg_i, /* .context = */ NULL, }; return ® } GGML_BACKEND_DL_IMPL(ggml_backend_opencl_reg) //------------------------------------------------------------------------------ // Debugging utils //------------------------------------------------------------------------------ #if 0 #define QK4_0 32 typedef struct { ggml_fp16_t d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); #include #ifdef __cplusplus #include "half.hpp" #endif static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tensor) { void * buf = malloc(ggml_nbytes(tensor)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; cl_command_queue queue = backend_ctx->queue; #ifdef GGML_OPENCL_SOA_Q void * buf_q; void * buf_d; #endif // Make sure everything is done. CL_CHECK(clFinish(queue)); #ifdef GGML_OPENCL_SOA_Q if (tensor->type == GGML_TYPE_Q4_0) { ggml_tensor_extra_cl_q4_0 * extra = (ggml_tensor_extra_cl_q4_0 *) tensor->extra; GGML_ASSERT(extra); size_t size_q = ggml_nelements(tensor)/QK4_0 * QK4_0/2; size_t size_d = ggml_nelements(tensor)/QK4_0 * sizeof(ggml_fp16_t); GGML_ASSERT(size_q + size_d == ggml_nbytes(tensor)); buf_q = malloc(size_q); buf_d = malloc(size_d); CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL)); CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_d, buf_d, 0, NULL, NULL)); CL_CHECK(clFinish(queue)); } else if (tensor->type == GGML_TYPE_MXFP4) { ggml_tensor_extra_cl_mxfp4 * extra = (ggml_tensor_extra_cl_mxfp4 *) tensor->extra; GGML_ASSERT(extra); size_t size_q = ggml_nelements(tensor)/QK_MXFP4 * QK_MXFP4/2; size_t size_e = ggml_nelements(tensor)/QK_MXFP4 * sizeof(char); GGML_ASSERT(size_q + size_e == ggml_nbytes(tensor)); buf_q = malloc(size_q); buf_d = malloc(size_e); CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL)); CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_e, buf_d, 0, NULL, NULL)); CL_CHECK(clFinish(queue)); } else { // Read out the tensor from GPU memory. ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; GGML_ASSERT(extra); CL_CHECK(clEnqueueReadBuffer(queue, extra->data_device, CL_TRUE, extra->offset, ggml_nbytes(tensor), buf, 0, NULL, NULL)); CL_CHECK(clFinish(queue)); } #else // Read out the tensor from GPU memory. ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; GGML_ASSERT(extra); CL_CHECK(clEnqueueReadBuffer(queue, extra->data_device, CL_TRUE, extra->offset, ggml_nbytes(tensor), buf, 0, NULL, NULL)); CL_CHECK(clFinish(queue)); #endif // GGML_OPENCL_SOA_Q // Open file and dump. char fname[512]; snprintf(fname, sizeof(fname), "./tensor-dumps/%s.txt", tensor->name); FILE * f = fopen(fname, "w"); if (!f) { printf("Failed to open %s\n", fname); return; } if (tensor->type == GGML_TYPE_F32) { float * data = (float *) buf; for (int i = 0; i < ggml_nelements(tensor); ++i) { if (isnan(data[i])) { printf("NaN found: %s\n", tensor->name); break; } fprintf(f, "%f\n", data[i]); } } else if (tensor->type == GGML_TYPE_I32) { int * data = (int *) buf; for (int i = 0; i < ggml_nelements(tensor); ++i) { if (isnan(data[i])) { printf("NaN found: %s\n", tensor->name); break; } fprintf(f, "%d\n", data[i]); } } else if (tensor->type == GGML_TYPE_F16) { #ifdef __cplusplus half_float::half * data = (half_float::half *) buf; for (int i = 0; i < ggml_nelements(tensor); ++i) { if (std::isnan(data[i])) { printf("NaN found: %s\n", tensor->name); break; } fprintf(f, "%f\n", float(data[i])); } #endif } else if (tensor->type == GGML_TYPE_Q4_0) { #ifdef GGML_OPENCL_SOA_Q ggml_fp16_t * data_d = (ggml_fp16_t *)buf_d; unsigned char * data_q = (unsigned char *)buf_q; for (int i = 0; i < ggml_nelements(tensor)/QK4_0; ++i) { fprintf(f, "%04x, ", data_d[i]); for (int k = 0; k < QK4_0/2; ++k) { fprintf(f, "%02x, ", data_q[k]); } fprintf(f, "\n"); data_q += QK4_0/2; } free(buf_d); free(buf_q); #else block_q4_0 * data = (block_q4_0 *) buf; for (int i = 0; i < ggml_nelements(tensor)/QK4_0; ++i) { fprintf(f, "%04x, ", data[i].d); for (int k = 0; k < QK4_0/2; ++k) { fprintf(f, "%02x, ", data[i].qs[k]); } fprintf(f, "\n"); } #endif // GGML_OPENCL_SOA_Q } free(buf); fflush(f); fclose(f); } #else #define dump_tensor(tensor) #endif //------------------------------------------------------------------------------ // Ops //------------------------------------------------------------------------------ static bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; const int64_t ne1 = dst->ne[1]; // TODO: find the optimal values for these return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && (ne0 >= 32 && ne1 >= 32 && ne10 >= 32); } static void ggml_cl_nop(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { UNUSED(backend); UNUSED(src0); UNUSED(src1); UNUSED(dst); } static void ggml_cl_get_rows(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); const int ne00 = src0->ne[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const cl_ulong nb10 = src1->nb[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; switch (src0->type) { case GGML_TYPE_F32: kernel = backend_ctx->kernel_get_rows_f32; break; case GGML_TYPE_F16: kernel = backend_ctx->kernel_get_rows_f16; break; case GGML_TYPE_Q4_0: kernel = backend_ctx->kernel_get_rows_q4_0; break; default: GGML_ASSERT(false && "not implemented"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb3)); size_t global_work_size[] = {(size_t)ne10*64, (size_t)ne11, (size_t)ne12}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_set_rows(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src1->type == GGML_TYPE_I64 || src1->type == GGML_TYPE_I32); // ne0 = ne00 // ne2 = ne02 // ne3 = ne03 const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const cl_ulong nb10 = src1->nb[0]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const int ne0 = dst->ne[0]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; const int nblk0 = ne0/ggml_blck_size(dst->type); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; switch (dst->type) { case GGML_TYPE_F32: if (src1->type == GGML_TYPE_I64) { kernel = backend_ctx->kernel_set_rows_f32_i64; } else { kernel = backend_ctx->kernel_set_rows_f32_i32; } break; case GGML_TYPE_F16: if (src1->type == GGML_TYPE_I64) { kernel = backend_ctx->kernel_set_rows_f16_i64; } else { kernel = backend_ctx->kernel_set_rows_f16_i32; } break; default: GGML_ABORT("not implemented"); } fastdiv_vals ne11_ = init_fastdiv_values(ne11); fastdiv_vals ne12_ = init_fastdiv_values(ne12); CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(fastdiv_vals), &ne11_)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(fastdiv_vals), &ne12_)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &nblk0)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb3)); int nth0 = 64; if (backend_ctx->gpu_family == INTEL) { nth0 = 32; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; } int max_workgroup_size = backend_ctx->get_kernel_workgroup_size(kernel); while (nth0 < nblk0 && nth0 < max_workgroup_size) { nth0 *= 2; } int rows_per_workgroup = 1; if (nth0 > nblk0) { rows_per_workgroup = nth0 / nblk0; nth0 = nblk0; } size_t global_work_size[] = { (size_t)(ne01 + rows_per_workgroup - 1)/rows_per_workgroup*nth0, (size_t)ne02*rows_per_workgroup, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth0, (size_t)rows_per_workgroup, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; const cl_ulong nb10 = src1->nb[0]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; const int ne2 = dst->ne[2]; const int ne3 = dst->ne[3]; const cl_ulong nb0 = dst->nb[0]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; const bool bcast_row = ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0; if (bcast_row) { GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ne11 == 1); } if (dst->type == GGML_TYPE_F32) { GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32); if (bcast_row) { kernel = backend_ctx->kernel_add_row; const int ne = ne00 / 4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); } else { kernel = backend_ctx->kernel_add; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nb3)); } } else if (dst->type == GGML_TYPE_F16) { GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); const int type_src0 = (src0->type == GGML_TYPE_F32); const int type_src1 = (src1->type == GGML_TYPE_F32); if (bcast_row) { kernel = backend_ctx->kernel_add_row_f16; const int ne = ne00 / 4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &type_src0)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &type_src1)); } else { kernel = backend_ctx->kernel_add_f16; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nb3)); CL_CHECK(clSetKernelArg(kernel, 30, sizeof(int), &type_src0)); CL_CHECK(clSetKernelArg(kernel, 31, sizeof(int), &type_src1)); } } else { GGML_ASSERT(false && "unsupported data types for add"); } if (bcast_row) { int n = ggml_nelements(dst)/4; size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 1, global_work_size, local_work_size_ptr, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } static void ggml_cl_add_id(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); const ggml_tensor * src2 = dst->src[2]; GGML_ASSERT(src2); GGML_ASSERT(src2->extra); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src2->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous_rows(src0)); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb21 = src2->nb[1]; const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offset2 = extra2->offset + src2->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel = backend_ctx->kernel_add_id; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne1)); int nth = MIN(ne00, (int) backend_ctx->get_kernel_workgroup_size(kernel)); size_t global_work_size[] = { (size_t)ne01*nth, (size_t)ne02, 1 }; size_t local_work_size[] = { (size_t)nth, 1, 1 }; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_mul(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == src1->type); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; UNUSED(ne13); const cl_ulong nb10 = src1->nb[0]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; UNUSED(nb13); const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; const int ne2 = dst->ne[2]; const int ne3 = dst->ne[3]; const cl_ulong nb0 = dst->nb[0]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; bool bcast_row = false; cl_kernel kernel; if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { GGML_ASSERT(ggml_is_contiguous(src0)); // src1 is a row GGML_ASSERT(ne11 == 1); bcast_row = true; int ne = ne00 / 4; if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_mul_row; } else { kernel = backend_ctx->kernel_mul_row_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); } else { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_mul; } else { kernel = backend_ctx->kernel_mul_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nb3)); } if (bcast_row) { int n = ggml_nelements(dst)/4; size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } static void ggml_cl_div(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == src1->type); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; const cl_ulong nb10 = src1->nb[0]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; const int ne0 = dst->ne[0]; const cl_ulong nb0 = dst->nb[0]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; bool bcast_row = false; cl_kernel kernel; if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { GGML_ASSERT(ggml_is_contiguous(src0)); // src1 is a row GGML_ASSERT(ne11 == 1); bcast_row = true; int ne = ne00 / 4; if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_div_row; } else { kernel = backend_ctx->kernel_div_row_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); } else { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_div; } else { kernel = backend_ctx->kernel_div_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb3)); } if (bcast_row) { int n = ggml_nelements(dst)/4; size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } static void ggml_cl_sub(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == src1->type); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; const cl_ulong nb10 = src1->nb[0]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; const int ne0 = dst->ne[0]; const cl_ulong nb0 = dst->nb[0]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; bool bcast_row = false; cl_kernel kernel; if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { GGML_ASSERT(ggml_is_contiguous(src0)); // src1 is a row GGML_ASSERT(ne11 == 1); bcast_row = true; int ne = ne00 / 4; if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sub_row; } else { kernel = backend_ctx->kernel_sub_row_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); } else { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sub; } else { kernel = backend_ctx->kernel_sub_f16; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb3)); } if (bcast_row) { int n = ggml_nelements(dst)/4; size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } static void ggml_cl_sqr(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; // Currently assumes src0 is contiguous int n = ggml_nelements(dst); if (n % 4 == 0) { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sqr_cont_f32_4; } else { kernel = backend_ctx->kernel_sqr_cont_f16_4; } n /= 4; } else { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sqr_cont_f32; } else { kernel = backend_ctx->kernel_sqr_cont_f16; } } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_sqrt(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; // Currently assumes src0 is contiguous int n = ggml_nelements(dst); if (n % 4 == 0) { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sqrt_cont_f32_4; } else { kernel = backend_ctx->kernel_sqrt_cont_f16_4; } n /= 4; } else { if (src0->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sqrt_cont_f32; } else { kernel = backend_ctx->kernel_sqrt_cont_f16; } } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_mean(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_UNUSED(src1); GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; cl_kernel kernel = backend_ctx->kernel_mean_f32; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb3)); size_t global_work_size[] = {(size_t)ne01, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_ssm_conv(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; int ne01 = src0->ne[1]; cl_ulong nb00 = src0->nb[0]; cl_ulong nb01 = src0->nb[1]; cl_ulong nb02 = src0->nb[2]; int ne10 = src1->ne[0]; cl_ulong nb11 = src1->nb[1]; int ne1 = dst->ne[1]; int ne2 = dst->ne[2]; cl_ulong nb0 = dst->nb[0]; cl_ulong nb1 = dst->nb[1]; cl_ulong nb2 = dst->nb[2]; cl_kernel kernel = backend_ctx->kernel_ssm_conv_f32_f32; if (ne10 % 4 == 0) { kernel = backend_ctx->kernel_ssm_conv_f32_f32_4; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb2)); size_t global_work_size[] = {(size_t)ne01, (size_t)ne1, (size_t)ne2}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (ne01 % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; int n = ggml_nelements(dst); if (n % 4 == 0) { kernel = backend_ctx->kernel_gelu_4; n /= 4; } else { kernel = backend_ctx->kernel_gelu; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_gelu_erf(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; int n = ggml_nelements(dst); if (n % 4 == 0) { kernel = backend_ctx->kernel_gelu_erf_4; n /= 4; } else { kernel = backend_ctx->kernel_gelu_erf; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_gelu_quick(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; int n = ggml_nelements(dst); if (n % 4 == 0) { kernel = backend_ctx->kernel_gelu_quick_4; n /= 4; } else { kernel = backend_ctx->kernel_gelu_quick; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_silu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; int n = ggml_nelements(dst); if (n % 4 == 0) { kernel = backend_ctx->kernel_silu_4; n /= 4; } else { kernel = backend_ctx->kernel_silu; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_relu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel = backend_ctx->kernel_relu; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); const int64_t n = ggml_nelements(dst); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_sigmoid(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_sigmoid_f32; } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { kernel = backend_ctx->kernel_sigmoid_f16; } else { GGML_ASSERT(false && "Unsupported data types for sigmoid (input and output must be both f32 or f16)"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); const int64_t n = ggml_nelements(dst); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_clamp(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; float min; float max; memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); cl_kernel kernel = backend_ctx->kernel_clamp; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &min)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(float), &max)); const int64_t n = ggml_nelements(dst); size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; float eps; memcpy(&eps, dst->op_params, sizeof(float)); const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; const int ne03 = src0 ? src0->ne[3] : 0; const cl_ulong nb01 = src0 ? src0->nb[1] : 0; const cl_ulong nb02 = src0 ? src0->nb[2] : 0; const cl_ulong nb03 = src0 ? src0->nb[3] : 0; const int nth = MIN(64, ne00); cl_kernel kernel = backend_ctx->kernel_norm; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(float), &eps)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float)*nth, NULL)); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; //ggml_backend_opencl_device_context * dev_ctx = // (ggml_backend_opencl_device_context *)backend->device->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; float eps; memcpy(&eps, dst->op_params, sizeof(float)); const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; const int ne03 = src0 ? src0->ne[3] : 0; const cl_ulong nb01 = src0 ? src0->nb[1] : 0; const cl_ulong nb02 = src0 ? src0->nb[2] : 0; const cl_ulong nb03 = src0 ? src0->nb[3] : 0; GGML_ASSERT(ne00 % 4 == 0); const int nth = MIN(64, ne00); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; cl_kernel kernel = backend_ctx->kernel_rms_norm; // Note, this kernel declares local memory in kernel args and the size // depends on subgroup size. // Note, this requires OpenCL 2.1 and above // For now we use fixed subgroup size to simplify support for OpenCL 2.0. size_t sgs; //CL_CHECK(clGetKernelSubGroupInfo(kernel, dev_ctx->device, // CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE, // sizeof(local_work_size), local_work_size, // sizeof(size_t), &sgs, NULL)); if (backend_ctx->gpu_family == ADRENO) { sgs = 64; } else if (backend_ctx->gpu_family == INTEL) { sgs = 32; } else { GGML_ASSERT(false && "Unsupported GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(float), &eps)); // This is local memory - the size depends on subgroup size. CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float)*nth/sgs, NULL)); backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_opencl_op_rms_norm_fused(ggml_backend_t backend, ggml_tensor * rms_norm_tensor, ggml_tensor * mul_tensor) { GGML_ASSERT(mul_tensor); GGML_ASSERT(rms_norm_tensor); // src0 is the src of rms_norm, src1 is the other src of mul (one being rms_norm) const ggml_tensor * src0 = rms_norm_tensor->src[0]; const ggml_tensor * src1; if (mul_tensor->src[0] == rms_norm_tensor) { src1 = mul_tensor->src[1]; } else if (mul_tensor->src[1] == rms_norm_tensor) { src1 = mul_tensor->src[0]; } else { GGML_ASSERT(false && "Invalid args for rms_norm and mul"); } const ggml_tensor * dst = mul_tensor; GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; float eps; memcpy(&eps, rms_norm_tensor->op_params, sizeof(float)); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; GGML_ASSERT(ne00 % 4 == 0); size_t sgs; if (backend_ctx->gpu_family == ADRENO) { sgs = 64; } else if (backend_ctx->gpu_family == INTEL) { sgs = 32; } else { GGML_ASSERT(false && "Unsupported GPU"); } cl_kernel kernel = backend_ctx->kernel_rms_norm_mul; int nth = sgs; int max_workgroup_size = backend_ctx->get_kernel_workgroup_size(kernel); while (nth < ne00 && nth < max_workgroup_size) { nth *= 2; } nth = MIN(nth, max_workgroup_size); nth = MIN(nth, ne00); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb3)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(float), &eps)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*sgs, NULL)); backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_opencl_op_norm_fused(ggml_backend_t backend, ggml_tensor * norm_tensor, ggml_tensor * mul_tensor, ggml_tensor * add_tensor) { GGML_ASSERT(norm_tensor && mul_tensor && add_tensor); const ggml_tensor * src0 = norm_tensor->src[0]; const ggml_tensor * src1 = mul_tensor->src[0] == norm_tensor ? mul_tensor->src[1] : mul_tensor->src[0]; const ggml_tensor * src2 = add_tensor->src[0] == mul_tensor ? add_tensor->src[1] : add_tensor->src[0]; const ggml_tensor * dst = add_tensor; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offset2 = extra2->offset + src2->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; float eps; memcpy(&eps, norm_tensor->op_params, sizeof(float)); const int ne00 = src0->ne[0], ne01 = src0->ne[1], ne02 = src0->ne[2], ne03 = src0->ne[3]; const cl_ulong nb01 = src0->nb[1], nb02 = src0->nb[2], nb03 = src0->nb[3]; const int ne10 = src1->ne[0], ne11 = src1->ne[1], ne12 = src1->ne[2], ne13 = src1->ne[3]; const cl_ulong nb11 = src1->nb[1], nb12 = src1->nb[2], nb13 = src1->nb[3]; const int ne20 = src2->ne[0], ne21 = src2->ne[1], ne22 = src2->ne[2], ne23 = src2->ne[3]; const cl_ulong nb21 = src2->nb[1], nb22 = src2->nb[2], nb23 = src2->nb[3]; const cl_ulong nbd1 = dst->nb[1], nbd2 = dst->nb[2], nbd3 = dst->nb[3]; size_t sgs; if (backend_ctx->gpu_family == ADRENO) sgs = 64; else if (backend_ctx->gpu_family == INTEL) sgs = 32; else GGML_ASSERT(false && "Unsupported GPU"); cl_kernel kernel = backend_ctx->kernel_norm_mul_add; int nth = sgs; int max_workgroup_size = backend_ctx->get_kernel_workgroup_size(kernel); while (nth < ne00/4 && nth < max_workgroup_size) nth *= 2; nth = MIN(nth, max_workgroup_size); nth = MIN(nth, ne00/4); size_t gws[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t lws[] = {(size_t)nth, 1, 1}; size_t num_subgroups = (nth + sgs - 1) / sgs; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne22)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne23)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb22)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb23)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nbd1)); CL_CHECK(clSetKernelArg(kernel, 30, sizeof(cl_ulong), &nbd2)); CL_CHECK(clSetKernelArg(kernel, 31, sizeof(cl_ulong), &nbd3)); CL_CHECK(clSetKernelArg(kernel, 32, sizeof(float), &eps)); CL_CHECK(clSetKernelArg(kernel, 33, sizeof(cl_float2) * num_subgroups, NULL)); backend_ctx->enqueue_ndrange_kernel(kernel, 3, gws, lws, dst); } static void ggml_opencl_op_group_norm_fused(ggml_backend_t backend, ggml_tensor * gn_tensor, ggml_tensor * mul_tensor, ggml_tensor * add_tensor) { GGML_ASSERT(gn_tensor && mul_tensor && add_tensor); const ggml_tensor * src0 = gn_tensor->src[0]; const ggml_tensor * src1 = mul_tensor->src[0] == gn_tensor ? mul_tensor->src[1] : mul_tensor->src[0]; const ggml_tensor * src2 = add_tensor->src[0] == mul_tensor ? add_tensor->src[1] : add_tensor->src[0]; const ggml_tensor * dst = add_tensor; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offset2 = extra2->offset + src2->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; int groups; float eps; memcpy(&groups, gn_tensor->op_params, sizeof(int)); memcpy(&eps, (char *)gn_tensor->op_params + sizeof(int), sizeof(float)); cl_kernel kernel = backend_ctx->kernel_group_norm_mul_add; int max_workgroup_size = backend_ctx->get_kernel_workgroup_size(kernel); int ne = ggml_nelements(src0); int group_size = ne / groups; size_t lws[] = { (size_t)MIN(max_workgroup_size, group_size) }; size_t gws[] = { (size_t)groups * lws[0] }; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &group_size)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(float), &eps)); backend_ctx->enqueue_ndrange_kernel(kernel, 1, gws, lws, dst); } static void ggml_cl_group_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; int32_t n_groups = ((const int32_t *) dst->op_params)[0]; int32_t group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + n_groups - 1) / n_groups); float eps = ((const float *) dst->op_params)[1]; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne = ne00*ne01*ne02; cl_kernel kernel = backend_ctx->kernel_group_norm; size_t sgs = 64; if (backend_ctx->gpu_family == ADRENO) { sgs = 64; } else if (backend_ctx->gpu_family == INTEL) { sgs = 32; } else { GGML_ASSERT(false && "Unsupported GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &group_size)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(float), &eps)); size_t global_work_size[] = {(size_t)n_groups*sgs, 1, 1}; size_t local_work_size[] = {(size_t)sgs, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_tanh(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0_abs = extra0->offset + src0->view_offs; cl_ulong offsetd_abs = extrad->offset + dst->view_offs; cl_kernel kernel; if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_tanh_f32_nd; } else if (dst->type == GGML_TYPE_F16) { kernel = backend_ctx->kernel_tanh_f16_nd; } else { GGML_ASSERT(false && "Unsupported type for ggml_cl_tanh"); } GGML_ASSERT(kernel != nullptr); const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = dst->ne[0]; const int ne11 = dst->ne[1]; const int ne12 = dst->ne[2]; const int ne13 = dst->ne[3]; const cl_ulong nb10 = dst->nb[0]; const cl_ulong nb11 = dst->nb[1]; const cl_ulong nb12 = dst->nb[2]; const cl_ulong nb13 = dst->nb[3]; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0_abs)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd_abs)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong),&nb02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong),&nb03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong),&nb10)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong),&nb11)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong),&nb12)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong),&nb13)); size_t global_work_size[3]; if (ne10 == 0 || ne11 == 0 || ne12 == 0 || ne13 == 0) { // Handle case of 0 elements return; } global_work_size[0] = (size_t)ne10; global_work_size[1] = (size_t)ne11; global_work_size[2] = (size_t)ne12; size_t lws0 = 16, lws1 = 4, lws2 = 1; if (ne10 < 16) lws0 = ne10; if (ne11 < 4) lws1 = ne11; if (ne12 < 1) lws2 = ne12 > 0 ? ne12 : 1; while (lws0 * lws1 * lws2 > 256 && lws0 > 1) lws0 /= 2; while (lws0 * lws1 * lws2 > 256 && lws1 > 1) lws1 /= 2; while (lws0 * lws1 * lws2 > 256 && lws2 > 1) lws2 /= 2; size_t local_work_size[] = {lws0, lws1, lws2}; size_t* local_work_size_ptr = local_work_size; if (!backend_ctx->non_uniform_workgroups) { if (global_work_size[0] % local_work_size[0] != 0 || global_work_size[1] % local_work_size[1] != 0 || global_work_size[2] % local_work_size[2] != 0) { local_work_size_ptr = NULL; } } if (global_work_size[0] == 0 || global_work_size[1] == 0 || global_work_size[2] == 0) return; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_repeat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1_shape_def, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(dst->type == src0->type); UNUSED(src1_shape_def); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; if (backend_ctx->kernel_repeat == nullptr) { GGML_LOG_WARN("%s: repeat kernel not available, skipping OpenCL execution.\n", __func__); return; } ggml_tensor_extra_cl * extra_src0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra_dst = (ggml_tensor_extra_cl *)dst->extra; cl_ulong off_src0 = extra_src0->offset + src0->view_offs; cl_ulong off_dst = extra_dst->offset + dst->view_offs; const int src0_ne0 = src0->ne[0]; const int src0_ne1 = src0->ne[1]; const int src0_ne2 = src0->ne[2]; const int src0_ne3 = src0->ne[3]; const cl_ulong src0_nb0 = src0->nb[0]; const cl_ulong src0_nb1 = src0->nb[1]; const cl_ulong src0_nb2 = src0->nb[2]; const cl_ulong src0_nb3 = src0->nb[3]; const int dst_ne0 = dst->ne[0]; const int dst_ne1 = dst->ne[1]; const int dst_ne2 = dst->ne[2]; const int dst_ne3 = dst->ne[3]; const cl_ulong dst_nb0 = dst->nb[0]; const cl_ulong dst_nb1 = dst->nb[1]; const cl_ulong dst_nb2 = dst->nb[2]; const cl_ulong dst_nb3 = dst->nb[3]; cl_kernel kernel = backend_ctx->kernel_repeat; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra_src0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra_dst->data_device)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_ulong), &off_src0)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &off_dst)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &src0_ne0)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &src0_ne1)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &src0_ne2)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &src0_ne3)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &src0_nb0)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &src0_nb1)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &src0_nb2)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &src0_nb3)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &dst_ne0)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &dst_ne1)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &dst_ne2)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &dst_ne3)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &dst_nb0)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &dst_nb1)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &dst_nb2)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &dst_nb3)); size_t gws0 = dst_ne1 > 0 ? (size_t)dst_ne1 : 1; size_t gws1 = dst_ne2 > 0 ? (size_t)dst_ne2 : 1; size_t gws2 = dst_ne3 > 0 ? (size_t)dst_ne3 : 1; size_t global_work_size[] = { gws0, gws1, gws2 }; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } static void ggml_cl_pad(ggml_backend_t backend, const ggml_tensor * src0, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; if (backend_ctx->kernel_pad == nullptr) { GGML_LOG_WARN("%s: pad kernel not available, skipping OpenCL execution.\n", __func__); return; } ggml_tensor_extra_cl * extra_src0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra_dst = (ggml_tensor_extra_cl *)dst->extra; cl_ulong off_src0 = extra_src0->offset + src0->view_offs; cl_ulong off_dst = extra_dst->offset + dst->view_offs; const int s_ne0 = src0->ne[0]; const int s_ne1 = src0->ne[1]; const int s_ne2 = src0->ne[2]; const int s_ne3 = src0->ne[3]; const int s_nb0 = src0->nb[0]; const int s_nb1 = src0->nb[1]; const int s_nb2 = src0->nb[2]; const int s_nb3 = src0->nb[3]; const int d_ne0 = dst->ne[0]; const int d_ne1 = dst->ne[1]; const int d_ne2 = dst->ne[2]; const int d_ne3 = dst->ne[3]; const int d_nb0 = dst->nb[0]; const int d_nb1 = dst->nb[1]; const int d_nb2 = dst->nb[2]; const int d_nb3 = dst->nb[3]; const int lp0 = ((const int*)(dst->op_params))[0]; const int rp0 = ((const int*)(dst->op_params))[1]; const int lp1 = ((const int*)(dst->op_params))[2]; const int rp1 = ((const int*)(dst->op_params))[3]; const int lp2 = ((const int*)(dst->op_params))[4]; const int rp2 = ((const int*)(dst->op_params))[5]; const int lp3 = ((const int*)(dst->op_params))[6]; const int rp3 = ((const int*)(dst->op_params))[7]; cl_kernel kernel = backend_ctx->kernel_pad; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra_src0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &off_src0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra_dst->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &off_dst)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &s_ne0)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &s_ne1)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &s_ne2)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &s_ne3)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &s_nb0)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &s_nb1)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &s_nb2)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &s_nb3)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &d_ne0)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &d_ne1)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &d_ne2)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &d_ne3)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &d_nb0)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &d_nb1)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &d_nb2)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &d_nb3)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &lp0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &rp0)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &lp1)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &rp1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &lp2)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &rp2)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(int), &lp3)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(int), &rp3)); size_t lws0 = 64; size_t gws0 = (( (size_t)d_ne0 + lws0 - 1 ) / lws0) * lws0; size_t global_work_size[] = { gws0, (size_t)d_ne1, (size_t)d_ne2*d_ne3 }; size_t local_work_size[] = { lws0, 1, 1 }; size_t * local_work_size_ptr = local_work_size; if (d_ne0 % lws0 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_upscale(ggml_backend_t backend, const ggml_tensor * src0, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; const int mode_flags = (ggml_scale_mode) ggml_get_op_params_i32(dst, 0); const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); cl_kernel kernel = nullptr; if (mode == GGML_SCALE_MODE_NEAREST) { kernel = backend_ctx->kernel_upscale; if (kernel == nullptr) { GGML_LOG_WARN("%s: nearest upscale kernel not available, skipping OpenCL execution.\n", __func__); return; } } else if (mode == GGML_SCALE_MODE_BILINEAR) { kernel = backend_ctx->kernel_upscale_bilinear; if (kernel == nullptr) { GGML_LOG_WARN("%s: bilinear upscale kernel not available, skipping OpenCL execution.\n", __func__); return; } } else { GGML_LOG_WARN("%s: unsupported upscale mode %d, skipping OpenCL execution.\n", __func__, mode); return; } ggml_tensor_extra_cl * extra_src0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra_dst = (ggml_tensor_extra_cl *)dst->extra; cl_ulong off_src0 = extra_src0->offset + src0->view_offs; cl_ulong off_dst = extra_dst->offset + dst->view_offs; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; const int ne2 = dst->ne[2]; const int ne3 = dst->ne[3]; float sf0 = (float)ne0 / ne00; float sf1 = (float)ne1 / ne01; float sf2 = (float)ne2 / ne02; float sf3 = (float)ne3 / ne03; float pixel_offset = 0.5f; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra_src0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &off_src0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra_dst->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &off_dst)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb03)); if (mode == GGML_SCALE_MODE_NEAREST) { CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float), &sf0)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(float), &sf1)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(float), &sf2)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(float), &sf3)); } else if (mode == GGML_SCALE_MODE_BILINEAR) { if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { sf0 = ne0 > 1 && ne00 > 1 ? (float)(ne0 - 1) / (ne00 - 1) : sf0; sf1 = ne1 > 1 && ne01 > 1 ? (float)(ne1 - 1) / (ne01 - 1) : sf1; pixel_offset = 0.0f; } CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(float), &sf0)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(float), &sf1)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(float), &sf2)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(float), &sf3)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(float), &pixel_offset)); } size_t dst_total_elements = (size_t)ne0 * ne1 * ne2 * ne3; if (dst_total_elements == 0) { return; } size_t global_work_size[] = { dst_total_elements, 1, 1 }; size_t local_work_size_pref = 256; size_t local_work_size[] = { MIN(local_work_size_pref, dst_total_elements), 1, 1}; size_t * local_work_size_ptr = local_work_size; if (dst_total_elements % local_work_size[0] != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_concat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; cl_command_queue queue = backend_ctx->queue; if (backend_ctx->kernel_concat_f32_contiguous == nullptr || backend_ctx->kernel_concat_f32_non_contiguous == nullptr) { GGML_LOG_WARN("%s: concat kernels not available, skipping OpenCL execution.\n", __func__); return; } ggml_tensor_extra_cl * extra0_cl = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1_cl = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad_cl = (ggml_tensor_extra_cl *)dst->extra; cl_ulong off_src0 = extra0_cl->offset + src0->view_offs; cl_ulong off_src1 = extra1_cl->offset + src1->view_offs; cl_ulong off_dst = extrad_cl->offset + dst->view_offs; const int32_t dim = ((const int32_t *) dst->op_params)[0]; GGML_ASSERT(dim >= 0 && dim <= 3); if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) { if (dim == 3) { size_t nbytes_src0 = ggml_nbytes(src0); size_t nbytes_src1 = ggml_nbytes(src1); CL_CHECK(clEnqueueCopyBuffer(queue, extra0_cl->data_device, extrad_cl->data_device, off_src0, off_dst, nbytes_src0, 0, NULL, NULL)); CL_CHECK(clEnqueueCopyBuffer(queue, extra1_cl->data_device, extrad_cl->data_device, off_src1, off_dst + nbytes_src0, nbytes_src1, 0, NULL, NULL)); } else { cl_kernel kernel = backend_ctx->kernel_concat_f32_contiguous; size_t global_work_size[3]; for (int i3 = 0; i3 < dst->ne[3]; ++i3) { cl_ulong current_off_src0 = off_src0 + (i3 * src0->nb[3]); cl_ulong current_off_src1 = off_src1 + (i3 * src1->nb[3]); cl_ulong current_off_dst = off_dst + (i3 * dst->nb[3]); int d_ne00 = src0->ne[0]; int d_ne01 = src0->ne[1]; int d_ne02 = src0->ne[2]; int d_ne10 = src1->ne[0]; int d_ne11 = src1->ne[1]; int d_ne12 = src1->ne[2]; int d_ne0 = dst->ne[0]; int d_ne1 = dst->ne[1]; int d_ne2 = dst->ne[2]; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), ¤t_off_src0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), ¤t_off_src1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), ¤t_off_dst)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &d_ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &d_ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &d_ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &d_ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &d_ne11)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &d_ne12)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &d_ne0)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &d_ne1)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &d_ne2)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &dim)); global_work_size[0] = d_ne0; global_work_size[1] = d_ne1; global_work_size[2] = d_ne2; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } } } else { cl_kernel kernel = backend_ctx->kernel_concat_f32_non_contiguous; cl_long ne00 = src0->ne[0], ne01 = src0->ne[1], ne02 = src0->ne[2], ne03 = src0->ne[3]; cl_ulong nb00 = src0->nb[0], nb01 = src0->nb[1], nb02 = src0->nb[2], nb03 = src0->nb[3]; cl_ulong nb10 = src1->nb[0], nb11 = src1->nb[1], nb12 = src1->nb[2], nb13 = src1->nb[3]; cl_long d_ne0 = dst->ne[0], d_ne1 = dst->ne[1], d_ne2 = dst->ne[2], d_ne3 = dst->ne[3]; cl_ulong d_nb0 = dst->nb[0], d_nb1 = dst->nb[1], d_nb2 = dst->nb[2], d_nb3 = dst->nb[3]; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &off_src0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &off_src1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad_cl->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &off_dst)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_long), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_long), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_long), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_long), &ne03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_long), &d_ne0)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_long), &d_ne1)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_long), &d_ne2)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_long), &d_ne3)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &d_nb0)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(cl_ulong), &d_nb1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(cl_ulong), &d_nb2)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(cl_ulong), &d_nb3)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(int), &dim)); size_t global_work_size_nc[] = { d_ne1 > 0 ? (size_t)d_ne1 : 1, d_ne2 > 0 ? (size_t)d_ne2 : 1, d_ne3 > 0 ? (size_t)d_ne3 : 1 }; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size_nc, NULL, dst); } } static void ggml_cl_timestep_embedding(ggml_backend_t backend, const ggml_tensor * src0, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; if (backend_ctx->kernel_timestep_embedding == nullptr) { GGML_LOG_WARN("%s: timestep_embedding kernel not available, skipping OpenCL execution.\n", __func__); return; } ggml_tensor_extra_cl * extra_src0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra_dst = (ggml_tensor_extra_cl *)dst->extra; cl_ulong off_src0 = extra_src0->offset + src0->view_offs; cl_ulong off_dst = extra_dst->offset + dst->view_offs; const int logical_dim = dst->op_params[0]; const int max_period = dst->op_params[1]; const int dst_nb1_bytes = dst->nb[1]; cl_kernel kernel = backend_ctx->kernel_timestep_embedding; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra_src0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &off_src0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra_dst->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &off_dst)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &dst_nb1_bytes)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &logical_dim)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &max_period)); size_t gws0 = (size_t)(((logical_dim + 1) / 2) + 1); size_t gws1 = (size_t)src0->ne[0]; size_t global_work_size[] = {gws0, gws1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } static void ggml_cl_flash_attn(ggml_backend_t backend, const ggml_tensor * q, const ggml_tensor * k, ggml_tensor * dst) { const ggml_tensor * v = dst->src[2]; const ggml_tensor * mask = dst->src[3]; const ggml_tensor * sinks = dst->src[4]; GGML_ASSERT(q->extra); GGML_ASSERT(k->extra); GGML_ASSERT(v->extra); GGML_ASSERT(dst->extra); if (mask) { GGML_ASSERT(mask->extra); } if (sinks) { GGML_ASSERT(sinks->extra); } ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; const int n_q = q->ne[1]; const int n_kv = k->ne[1]; const int d_head_q = q->ne[0]; const int d_head_v = v->ne[0]; const int n_head = q->ne[2]; const int n_head_kv = k->ne[2]; const int n_batch = q->ne[3]; cl_kernel kernel = NULL; const bool is_f16 = q->type == GGML_TYPE_F16; const bool is_mixed = q->type == GGML_TYPE_F32 && k->type == GGML_TYPE_F16; const std::pair dk_dv = {d_head_q, d_head_v}; if (n_q == 1) { if (is_mixed) { kernel = backend_ctx->kernels_flash_attn_f32_f16_q1.at(dk_dv); } else if (is_f16) { kernel = backend_ctx->kernels_flash_attn_f16_q1.at(dk_dv); } else { kernel = backend_ctx->kernels_flash_attn_f32_q1.at(dk_dv); } } else { if (is_mixed) { kernel = backend_ctx->kernels_flash_attn_f32_f16.at(dk_dv); } else if (is_f16) { kernel = backend_ctx->kernels_flash_attn_f16.at(dk_dv); } else { kernel = backend_ctx->kernels_flash_attn_f32.at(dk_dv); } } GGML_ASSERT(kernel != NULL); ggml_tensor_extra_cl * extra_q = (ggml_tensor_extra_cl *)q->extra; ggml_tensor_extra_cl * extra_k = (ggml_tensor_extra_cl *)k->extra; ggml_tensor_extra_cl * extra_v = (ggml_tensor_extra_cl *)v->extra; ggml_tensor_extra_cl * extra_o = (ggml_tensor_extra_cl *)dst->extra; ggml_tensor_extra_cl * extra_mask = mask ? (ggml_tensor_extra_cl *)mask->extra : NULL; ggml_tensor_extra_cl * extra_sinks = sinks ? (ggml_tensor_extra_cl *)sinks->extra : NULL; cl_ulong offset_q = extra_q->offset + q->view_offs; cl_ulong offset_k = extra_k->offset + k->view_offs; cl_ulong offset_v = extra_v->offset + v->view_offs; cl_ulong offset_o = extra_o->offset + dst->view_offs; cl_mem mask_buffer = extra_mask ? extra_mask->data_device : NULL; cl_ulong offset_mask = extra_mask ? extra_mask->offset + mask->view_offs : 0; cl_mem sinks_buffer = extra_sinks ? extra_sinks->data_device : NULL; cl_ulong offset_sinks = extra_sinks ? extra_sinks->offset + sinks->view_offs : 0; const cl_ulong q_nb1 = q->nb[1], q_nb2 = q->nb[2], q_nb3 = q->nb[3]; const cl_ulong k_nb1 = k->nb[1], k_nb2 = k->nb[2], k_nb3 = k->nb[3]; const cl_ulong v_nb1 = v->nb[1], v_nb2 = v->nb[2], v_nb3 = v->nb[3]; const cl_ulong o_nb1 = dst->nb[1], o_nb2 = dst->nb[2], o_nb3 = dst->nb[3]; const cl_ulong mask_nb1 = mask ? mask->nb[1] : 0; const cl_ulong mask_nb2 = mask ? mask->nb[2] : 0; const cl_ulong mask_nb3 = mask ? mask->nb[3] : 0; const int mask_ne2 = mask ? mask->ne[2] : 0; const int mask_ne3 = mask ? mask->ne[3] : 0; float scale, max_bias, logit_softcap; const float * params = (const float *)dst->op_params; scale = params[0]; max_bias = params[1]; logit_softcap = params[2]; const int is_causal = (mask == NULL && n_q > 1 && n_q == n_kv); const int n_head_log2_val = n_head > 0 ? 1u << (int)floorf(log2f((float)n_head)) : 0; const float n_head_log2_f = n_head_log2_val > 0 ? (float)n_head_log2_val : 1.0f; const float m0 = powf(2.0f, -(max_bias) / n_head_log2_f); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2_f); CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra_q->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset_q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra_k->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset_k)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra_v->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset_v)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extra_o->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offset_o)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(float), &scale)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &n_q)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &n_kv)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &is_causal)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &n_head)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &q_nb1)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &q_nb2)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &q_nb3)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &k_nb1)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &k_nb2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &k_nb3)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &v_nb1)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &v_nb2)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &v_nb3)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &o_nb1)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(cl_ulong), &o_nb2)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(cl_ulong), &o_nb3)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(float), &max_bias)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(float), &m0)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(float), &m1)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(int), &n_head_log2_val)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(float), &logit_softcap)); CL_CHECK(clSetKernelArg(kernel, 30, sizeof(int), &n_head_kv)); CL_CHECK(clSetKernelArg(kernel, 31, sizeof(cl_mem), &mask_buffer)); CL_CHECK(clSetKernelArg(kernel, 32, sizeof(cl_ulong), &offset_mask)); CL_CHECK(clSetKernelArg(kernel, 33, sizeof(cl_ulong), &mask_nb1)); CL_CHECK(clSetKernelArg(kernel, 34, sizeof(cl_ulong), &mask_nb2)); CL_CHECK(clSetKernelArg(kernel, 35, sizeof(cl_ulong), &mask_nb3)); CL_CHECK(clSetKernelArg(kernel, 36, sizeof(int), &mask_ne2)); CL_CHECK(clSetKernelArg(kernel, 37, sizeof(int), &mask_ne3)); CL_CHECK(clSetKernelArg(kernel, 38, sizeof(cl_mem), &sinks_buffer)); CL_CHECK(clSetKernelArg(kernel, 39, sizeof(cl_ulong), &offset_sinks)); if (n_q == 1) { const size_t wg_size = 64; size_t local_work_size[] = { wg_size, 1 }; size_t global_work_size[] = { wg_size, (size_t)(n_head * n_batch) }; backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size, local_work_size, dst); } else { const int block_m = backend_ctx->kernels_flash_attn_bm.at(dk_dv); const size_t wg_size = block_m; size_t local_work_size[] = { wg_size, 1 }; size_t global_work_size[] = { (size_t)((n_q + block_m - 1) / block_m) * wg_size, (size_t)(n_head * n_batch) }; backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size, local_work_size, dst); } } static void ggml_cl_mul_mat_f16_f32_tiled(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const int M = src0->ne[1]; const int N = src1->ne[1]; const int K = src0->ne[0]; cl_kernel kernel = backend_ctx->kernel_mul_mat_f16_f32_tiled; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(int), &M)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(int), &N)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &K)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &offsetd)); // Tiling parameters. These need to be tuned for optimal performance. // They must match the #defines in the kernel mul_mat_f16_f32.cl. // // OPWM / OPWN: Output tile size per Work-Group. A work-group computes a tile of size OPWM x OPWN. // TPWM / TPWN: Threads per Work-group. This is the work-group size. // OPTM / OPTN: Output elements per Thread. Each thread computes OPTM x OPTN elements. // // The following relationships must hold: // OPWM = TPWM * OPTM // OPWN = TPWN * OPTN // const int OPWM = 64; const int OPWN = 64; const int TPWM = 16; const int TPWN = 8; size_t local_work_size[2] = { TPWM, TPWN }; size_t global_work_size[2] = { (size_t) ((M + OPWM - 1) / OPWM) * TPWM, (size_t) ((N + OPWN - 1) / OPWN) * TPWN, }; backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size, local_work_size, dst); } static void ggml_cl_conv_2d(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_TENSOR_BINARY_OP_LOCALS; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const cl_uint Cout = ne03; const cl_uint Cin = ne02; const cl_uint N = ne13; const cl_uint KW = ne00; const cl_uint KH = ne01; const cl_uint W = ne10; const cl_uint H = ne11; const cl_uint OW = ne0; const cl_uint OH = ne1; const cl_uint s0 = dst->op_params[0]; const cl_uint s1 = dst->op_params[1]; const cl_uint p0 = dst->op_params[2]; const cl_uint p1 = dst->op_params[3]; const cl_uint d0 = dst->op_params[4]; const cl_uint d1 = dst->op_params[5]; const cl_uint cl_nb01 = nb01/ggml_type_size(src0->type); const cl_uint cl_nb02 = nb02/ggml_type_size(src0->type); const cl_uint cl_nb03 = nb03/ggml_type_size(src0->type); const cl_uint cl_nb11 = nb11/ggml_type_size(src1->type); const cl_uint cl_nb12 = nb12/ggml_type_size(src1->type); const cl_uint cl_nb13 = nb13/ggml_type_size(src1->type); const cl_uint cl_nb1 = nb1/ggml_type_size(dst->type); const cl_uint cl_nb2 = nb2/ggml_type_size(dst->type); const cl_uint cl_nb3 = nb3/ggml_type_size(dst->type); const int64_t NPQ = (int64_t)N * OW * OH; const uint32_t BS_K = 64; const uint32_t BS_NPQ = 64; const uint32_t BS_CRS = 16; const uint32_t VEC_SIZE = 4; const uint32_t TS_K = 4; const uint32_t TS_NPQ = 8; const uint32_t WG_K = BS_K / TS_K; const uint32_t WG_NPQ = BS_NPQ / TS_NPQ; auto splitWork = [](uint32_t work_size, uint32_t block_size) { return (block_size + work_size - 1) / block_size; }; const uint32_t NB_K = splitWork(Cout, BS_K); const uint32_t NB_NPQ = splitWork(NPQ, BS_NPQ); cl_kernel kernel; size_t shmem_size; if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { kernel = backend_ctx->kernel_conv_2d_f16; shmem_size = (size_t)(BS_K * BS_CRS * sizeof(cl_half) + BS_CRS * (BS_NPQ / VEC_SIZE) * sizeof(cl_half4)); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_conv_2d_f32; shmem_size = (size_t)(BS_K * BS_CRS * sizeof(cl_float) + BS_CRS * (BS_NPQ / VEC_SIZE) * sizeof(cl_float4)); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_conv_2d_f16_f32; shmem_size = (size_t)(BS_K * BS_CRS * sizeof(cl_half) + BS_CRS * (BS_NPQ / VEC_SIZE) * sizeof(cl_float4)); } else { GGML_ASSERT(false && "Unsupported data type combination for conv2d"); } cl_uint idx = 0; CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, idx++, shmem_size, NULL)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &Cout)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &Cin)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &N)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &KW)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &KH)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &W)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &H)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &OW)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &OH)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &s0)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &s1)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &p0)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &p1)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &d0)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &d1)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb01)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb02)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb03)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb11)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb12)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb13)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb1)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb2)); CL_CHECK(clSetKernelArg(kernel, idx++, sizeof(cl_uint), &cl_nb3)); size_t global_work_size[] = { (size_t)NB_K * WG_K, (size_t)NB_NPQ * WG_NPQ, 1 }; size_t local_work_size[] = { (size_t)WG_K, (size_t)WG_NPQ, 1 }; backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size, local_work_size, dst); } static void ggml_cl_mul_mat_kq_kqv_adreno(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const cl_ulong nb10 = src1->nb[0]; const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; GGML_ASSERT(ne00 == ne10); cl_kernel kernel; cl_context context = backend_ctx->context; cl_int status; cl_image_format img_fmt_1d; cl_image_desc img_desc_1d; cl_buffer_region region; cl_mem A_image1d; cl_mem A_sub_buffer; cl_mem B_sub_buffer; cl_mem D_image1d; cl_mem D_sub_buffer; int M = ne01; int N = ne1; int K = ne00; if (nb01 > nb02) { // KQ kernel = backend_ctx->kernel_mul_mm_f16_f32_kq; } else { // KQV kernel = backend_ctx->kernel_mul_mm_f16_f32_kqv; } // create sub-buffer for A // <--------------------------------------------> // extra0 = src0->view_src ? (ggml_tensor_extra_cl *)src0->view_src->extra : (ggml_tensor_extra_cl *)src0->extra; region.origin = (extra0->offset); if (nb01 > nb02) { // KQ region.size = nb01 * ne01; } else { // KQV region.size = nb02 * ne02; } A_sub_buffer = clCreateSubBuffer((extra0->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // <--------------------------------------------> // // create sub-buffer for B // <--------------------------------------------> // region.origin = (extra1->offset); region.size = nb10 * ne10 * ne11 * ne12; B_sub_buffer = clCreateSubBuffer((extra1->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // <--------------------------------------------> // img_fmt_1d = {CL_RGBA, CL_FLOAT}; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; if (nb01 > nb02) { img_desc_1d.image_width = (nb01 * ne01 / 4)/4; } else { img_desc_1d.image_width = (nb02 * ne02 / 4)/4; } img_desc_1d.buffer = A_sub_buffer; A_image1d = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); CL_CHECK(status); // create sub-buffer for output C // <--------------------------------------------> // region.origin = (extrad->offset); region.size = ne0 * ne1 * dst->ne[2] * dst->nb[0]; // size of C in bytes D_sub_buffer = clCreateSubBuffer((extrad->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // <--------------------------------------------> // // create image for C output // <--------------------------------------------> // img_fmt_1d = {CL_R, CL_FLOAT}; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.image_width = ne0 * ne1 * dst->ne[2] * dst->nb[0] / 4; img_desc_1d.buffer = D_sub_buffer; D_image1d = clCreateImage(context, CL_MEM_WRITE_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); CL_CHECK(status); // <--------------------------------------------> // int offset_src0 = 0; int offset_src1 = 0; // set kernel args // <--------------------------------------------> // cl_uint k_arg = 0; CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &A_image1d)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &offset_src0)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &B_sub_buffer)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &offset_src1)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &D_image1d)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &extrad->offset)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &M)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &K)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &N)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &nb01)); size_t global_work_size[3] = {64, static_cast(((M+63)/64)), static_cast(((N+31)/32)*ne12)}; size_t local_work_size[3] = {64, 1, 2}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); // deallocate sub buffers and images // <--------------------------------------------> // CL_CHECK(clReleaseMemObject(A_image1d)); CL_CHECK(clReleaseMemObject(D_image1d)); CL_CHECK(clReleaseMemObject(A_sub_buffer)); CL_CHECK(clReleaseMemObject(B_sub_buffer)); CL_CHECK(clReleaseMemObject(D_sub_buffer)); } static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; #ifdef GGML_OPENCL_SOA_Q ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra; ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra; ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra; #endif const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; const int ne03 = src0 ? src0->ne[3] : 0; const cl_ulong nb00 = src0 ? src0->nb[0] : 0; const cl_ulong nb01 = src0 ? src0->nb[1] : 0; const cl_ulong nb02 = src0 ? src0->nb[2] : 0; const cl_ulong nb03 = src0 ? src0->nb[3] : 0; const int ne10 = src1 ? src1->ne[0] : 0; const int ne11 = src1 ? src1->ne[1] : 0; const int ne12 = src1 ? src1->ne[2] : 0; const int ne13 = src1 ? src1->ne[3] : 0; const cl_ulong nb10 = src1 ? src1->nb[0] : 0; const cl_ulong nb11 = src1 ? src1->nb[1] : 0; const cl_ulong nb12 = src1 ? src1->nb[2] : 0; const cl_ulong nb13 = src1 ? src1->nb[3] : 0; const int ne0 = dst ? dst->ne[0] : 0; const int ne1 = dst ? dst->ne[1] : 0; int r2 = ne12/ne02; int r3 = ne13/ne03; GGML_ASSERT(ne00 == ne10); int nth0 = 32; int nth1 = 1; int nrows = 1; // The number of values produced by each subgroup int ndst = 4; cl_kernel kernel; #ifdef GGML_OPENCL_USE_ADRENO_KERNELS cl_context context = backend_ctx->context; if(src0t == GGML_TYPE_F16 && src1t == GGML_TYPE_F32){ if (ne01 >= 64 && ne1 >= 32 && ne00 >= 16 && (ne12 % ne02) == 0) { // For KQ if (ggml_is_permuted(src0) && ggml_is_permuted(src1) && nb00 <= nb02 && nb02 <= nb01 && nb01 <= nb03 && nb10 <= nb12 && nb12 <= nb11 && nb11 <= nb13) { ggml_cl_mul_mat_kq_kqv_adreno(backend, src0, src1, dst); return; } // For KQV if (!ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) { ggml_cl_mul_mat_kq_kqv_adreno(backend, src0, src1, dst); return; } } } if (ne01 && ne1 && use_adreno_kernels(backend_ctx, src0)) { // init CL objects // <--------------------------------------------> // cl_int status; cl_image_format img_fmt_1d; cl_image_desc img_desc_1d; cl_buffer_region region; cl_mem A_image1d = nullptr; cl_mem B_image1d = nullptr; cl_mem B_sub_buffer = nullptr; cl_mem C_d = nullptr; // for B transpose cl_mem B_d = nullptr; cl_mem B_d_input_image = nullptr; // <--------------------------------------------> // // define matrix dimensions // <--------------------------------------------> // int M = ne01; int N = ne1; int K = ne00; int padding; // <--------------------------------------------> // // q4_0 x fp32 if(src0t == GGML_TYPE_Q4_0 && src1t == GGML_TYPE_F32) { // TODO: remove duplicate definitions of image description + format -- move to top // create an image for A // <--------------------------------------------> // if (N == 1) { img_fmt_1d = { CL_R, CL_UNSIGNED_INT32}; } else { img_fmt_1d = { CL_R, CL_FLOAT}; } memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.image_width = M * K / 2 / 4; // Divide by 4 for char -> float img_desc_1d.buffer = extra0_q4_0->q; A_image1d = clCreateImage( context, CL_MEM_READ_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); CL_CHECK(status); // <--------------------------------------------> // // create a sub_buffer for B // <--------------------------------------------> // region.origin = (extra1->offset); region.size = K * N * sizeof(float); B_sub_buffer = clCreateSubBuffer( extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // <--------------------------------------------> // // transpose activation for Skyler's gemm if (N != 1) { //how many extra elements beyond multiple of 8 int extra_elements = N % 8; //how much padding to add padding = 0; if (extra_elements > 0){ padding = 8 - extra_elements; } // Specify the starting offset (in bytes) region.origin = 0; // Specify the size of the sub-buffer (divide by 2 for FP16) region.size = K * (N + padding) * sizeof(float)/2; backend_ctx->prealloc_act_trans.allocate(context, region.size); B_d = clCreateSubBuffer( backend_ctx->prealloc_act_trans.buffer, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); cl_image_format image_format_B_d_input = { CL_RGBA, CL_FLOAT }; cl_image_desc image_desc_B_d_input = { CL_MEM_OBJECT_IMAGE1D_BUFFER, static_cast(K * N / 4), 0, 0, 0, 0, 0, 0, 0, { B_sub_buffer } }; B_d_input_image = clCreateImage( context, 0, &image_format_B_d_input, &image_desc_B_d_input, NULL, &status); CL_CHECK(status); cl_image_format image_format_B_d_output = { CL_RGBA, CL_HALF_FLOAT }; //(CL_HALF_FLOAT for FP16) cl_image_desc image_desc_B_d_output = { CL_MEM_OBJECT_IMAGE1D_BUFFER, static_cast(K * (N + padding)/4), 0, 0, 0, 0, 0, 0, 0, { B_d } }; B_image1d = clCreateImage( context, 0, &image_format_B_d_output, &image_desc_B_d_output, NULL, &status); CL_CHECK(status); int height_B = N/4; if (height_B == 0) { height_B = 1; } int width_B = K/4; int padded_height_B = (N + padding)/4; kernel = backend_ctx->kernel_transpose_32_16; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &B_d_input_image)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &B_image1d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_B)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_B)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &padded_height_B)); size_t local_size_t[2] = { 1, 16 }; //WGS tuning if (ne0 == 4096 && ne1 == 128 && ne10 == 4096) { local_size_t[0]=4; local_size_t[1]=8; } else if (ne0 == 11008 && ne1 == 128 && ne10 == 4096) { local_size_t[0]=2; local_size_t[1]=8; } else if(ne0 == 4096 && ne1 == 128 && ne10 == 11008) { local_size_t[0]=1; local_size_t[1]=8; } else if(ne0 == 32000 && ne1 == 128 && ne10 == 4096) { local_size_t[0]=2; local_size_t[1]=8; } size_t global_size_t[2] = { static_cast(width_B), static_cast(padded_height_B) }; backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_size_t, local_size_t, dst); } else { // no need to transpose B in other cases // create an image for B from sub_buffer // <--------------------------------------------> // img_fmt_1d = {CL_RGBA, CL_FLOAT}; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); img_desc_1d.image_width = K * N / 4; img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; img_desc_1d.buffer = B_sub_buffer; B_image1d = clCreateImage( context, CL_MEM_READ_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); CL_CHECK(status); // <--------------------------------------------> // } // choose gemm or gemv kernel // <--------------------------------------------> // if (N == 1) { kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general; if (M == 4096 && K == 4096) { kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096; } else if (M == 4096 && K == 11008) { kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008; } else if (M == 11008 && K == 4096) { kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096; } else if (M == 32000 && K == 4096) { kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096; } } else { kernel = backend_ctx->CL_mul_mat_Ab_Bi_8x4; } // <--------------------------------------------> // // set kernel args // <--------------------------------------------> // cl_uint k_arg = 0; if (N == 1) { CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &A_image1d)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &extra0_q4_0->d)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &B_image1d)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_ulong), &extra1->offset)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_ulong), &extrad->offset)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &r3)); } else { region.origin = extrad->offset; // Specify the starting offset (in bytes) region.size = M * N * sizeof(float); // Specify the size of the sub-buffer C_d = clCreateSubBuffer(extrad->data_device, CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); int padded_N = ne1 + padding; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); //A_q_dextra0_q4_0->q CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); //A_s_d CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &B_image1d)); //B_d CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &C_d)); //C_d CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne01)); //M CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &padded_N)); //N with padding CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); //K CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne1)); //N without padding } // <--------------------------------------------> // // choose workgroup size // <--------------------------------------------> // size_t global_work_size[3] = { 64, static_cast((M+63)/64), static_cast((N+31)/32)}; size_t local_work_size[3] = {64, 2, 4}; global_work_size[0] = (size_t)(ceil((float)ne1/8)); global_work_size[1] = (size_t)(ne01/4); global_work_size[2] = (size_t)(1); local_work_size[0] = (size_t)(1); //4x32 for FP32 local_work_size[1] = (size_t)(128); local_work_size[2] = (size_t)(1); //WGS tuning if (ne0 == 4096 && ne1 == 128 && ne10 == 4096) { local_work_size[0] = 1; local_work_size[1] = 128; } else if (ne0 == 11008 && ne1 == 128 && ne10 == 4096) { local_work_size[0] = 2; local_work_size[1] = 64; } else if (ne0 == 4096 && ne1 == 128 && ne10 == 11008) { local_work_size[0] = 2; local_work_size[1] = 64; } else if (ne0 == 32000 && ne1 == 128 && ne10 == 4096) { local_work_size[0] = 2; local_work_size[1] = 64; } if (N == 1) { size_t wavesize = backend_ctx->adreno_wave_size; local_work_size[0] = wavesize; // localsize local_work_size[1] = 4; // reduce factor local_work_size[2] = 1; global_work_size[0] = (((M / 2) + wavesize - 1) / wavesize) * wavesize; global_work_size[1] = 4; // reduce factor global_work_size[2] = 1; } // <--------------------------------------------> // // enqueue kernel with profiling // <--------------------------------------------> // backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); // <--------------------------------------------> // // deallocate sub buffers and images // <--------------------------------------------> // CL_CHECK(clReleaseMemObject(A_image1d)); CL_CHECK(clReleaseMemObject(B_sub_buffer)); CL_CHECK(clReleaseMemObject(B_image1d)); if (N != 1) { CL_CHECK(clReleaseMemObject(B_d)); CL_CHECK(clReleaseMemObject(B_d_input_image)); CL_CHECK(clReleaseMemObject(C_d)); } // <--------------------------------------------> // return; } } // if (ne01 && ne1) #endif // GGML_OPENCL_USE_ADRENO_KERNELS // GEMM using local memory // Current BK = 16, so ne00 % 16 == 0 if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && src1t == GGML_TYPE_F32 && ne00 % 16 == 0 && ne11 > 1) { switch(src0t) { case GGML_TYPE_F32: { kernel = backend_ctx->kernel_mul_mm_f32_f32_l4_lm; nth0 = 128; // calculated as (BM*BN)/(TM*TN) int batch_stride_a = ne00*ne01; int batch_stride_b = ne10*ne11; int batch_stride_d = ne0*ne1; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne10)); // stride_a CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); // stride_b CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne01)); // stride_d CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &batch_stride_a)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &batch_stride_b)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &batch_stride_d)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3)); // 64 is block tile size BM and BN - change here when BM and BN in the kernel are changed. size_t global_work_size[] = {(size_t)(CEIL_DIV(ne01, 64)*nth0), (size_t)(CEIL_DIV(ne11, 64)), (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); return; } case GGML_TYPE_F16: { kernel = backend_ctx->kernel_mul_mm_f16_f32_l4_lm; nth0 = 128; // calculated as (BM*BN)/(TM*TN) int batch_stride_a = ne00*ne01; int batch_stride_b = ne10*ne11; int batch_stride_d = ne0*ne1; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne10)); // stride_a CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); // stride_b CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne01)); // stride_d CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &batch_stride_a)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &batch_stride_b)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &batch_stride_d)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3)); // 64 is block tile size BM and BN - change here when BM and BN in the kernel are changed. size_t global_work_size[] = {(size_t)(CEIL_DIV(ne01, 64)*nth0), (size_t)(CEIL_DIV(ne11, 64)), (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); return; } case GGML_TYPE_Q8_0: { if (ne11 < 32) { break; } kernel = backend_ctx->kernel_mul_mm_q8_0_f32_l4_lm; nth0 = 128; // calculated as (BM*BN)/(TM*TN) int batch_stride_a = ne00*ne01; int batch_stride_b = ne10*ne11; int batch_stride_d = ne0*ne1; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne10)); // stride_a CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); // stride_b CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne01)); // stride_d CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &batch_stride_a)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &batch_stride_b)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &batch_stride_d)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3)); // 64 is block tile size BM and BN - change here when BM and BN in the kernel are changed. size_t global_work_size[] = {(size_t)(CEIL_DIV(ne01, 64)*nth0), (size_t)(CEIL_DIV(ne11, 64)), (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); return; } default: break; } } if (src0t == GGML_TYPE_F16 && src1t == GGML_TYPE_F32 && src0->ne[1] > 32 && // M > 32 src1->ne[1] > 32 && // N > 32 src0->ne[0] > 32 && // K > 32 src0->ne[2] == 1 && src0->ne[3] == 1 && src1->ne[2] == 1 && src1->ne[3] == 1 && ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && backend_ctx->kernel_mul_mat_f16_f32_tiled != NULL) { ggml_cl_mul_mat_f16_f32_tiled(backend, src0, src1, dst); return; } if (!ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1t == GGML_TYPE_F32 && ne00%32 == 0 && ne11 > 2) { #ifdef GGML_OPENCL_SOA_Q // Set up kernel. switch(src0t) { case GGML_TYPE_Q4_0: // This should have been satisfied. GGML_ASSERT(ne11 == ne1); GGML_ASSERT(ne01 == ne0); if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32_1d_16x_flat; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32_1d_8x_flat; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); break; default: break; } // Launch kernel. if (src0t == GGML_TYPE_Q4_0) { size_t global_work_size[] = {(size_t)(ne01 + 7)/8*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; if (backend_ctx->gpu_family == INTEL) { // Set global size for Intel. It uses 16x output values. global_work_size[0] = (size_t)(ne01 + 15)/16*nth0; global_work_size[1] = (size_t)ne11*nth1; global_work_size[2] = (size_t)ne12*ne13; } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); return; } #else // GGML_OPENCL_SOA_Q // TODO: add block_q4_0 variant. #endif // GGML_OPENCL_SOA_Q } // use custom matrix x vector kernel switch (src0t) { case GGML_TYPE_F32: //GGML_ASSERT(ne02 == ne12); GGML_ASSERT(src1t == GGML_TYPE_F32); kernel = backend_ctx->kernel_mul_mat_f32_f32; nrows = 4; if (backend_ctx->gpu_family == INTEL) { nth0 = 32; nth1 = 1; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 1; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); break; case GGML_TYPE_F16: //GGML_ASSERT(ne02 == ne12); if (backend_ctx->gpu_family == INTEL) { nth0 = 32; nth1 = 1; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 1; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } if (src1t == GGML_TYPE_F32) { if (ne11 * ne12 < 4) { kernel = backend_ctx->kernel_mul_mat_f16_f32_1row; } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { kernel = backend_ctx->kernel_mul_mat_f16_f32_l4; nrows = ne11; } else { kernel = backend_ctx->kernel_mul_mat_f16_f32; nrows = 4; } } else { kernel = backend_ctx->kernel_mul_mat_f16_f16; nrows = 4; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); break; case GGML_TYPE_Q4_0: // This should have been satisfied. GGML_ASSERT(ne11 == ne1); GGML_ASSERT(ne01 == ne0); #ifdef GGML_OPENCL_SOA_Q if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat; ndst = 8; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat; ndst =8; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); #else // GGML_OPENCL_SOA_Q if (backend_ctx->gpu_family == INTEL) { // Use 1D local size. Each workgroup is a SIMD group. Each SIMD // group produces N_DST (4 for Q4_0 kernel) values in the result. // The number of workgroups on dim 0 (the leading dimension) is // the nearest multiple of 4 that covers ne0 (equals ne01). nth0 = 16; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32; ndst = 4; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 1; kernel = backend_ctx->kernel_mul_mat_q4_0_f32_v; ndst = 4; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); #endif // GGML_OPENCL_SOA_Q break; case GGML_TYPE_Q4_1: case GGML_TYPE_Q8_0: { #ifdef GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_q8_0_f32_flat; // nth0 - subgroup size // nth1 - number of subgroups per workgroup // ndst - number of output values per workgroup = output per subgroup * number of subgroups if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 2; ndst = nth1*4; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 2; ndst = nth1*4; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3)); #else kernel = backend_ctx->kernel_mul_mv_q8_0_f32; // nth0 - subgroup size // nth1 - number of subgroups per workgroup // ndst - number of output values per workgroup = output per subgroup * number of subgroups if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 2; ndst = nth1*4; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 2; ndst = nth1*4; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3)); #endif // GGML_OPENCL_SOA_Q break; } case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: kernel = backend_ctx->kernel_mul_mv_q6_K_f32; if (backend_ctx->gpu_family == INTEL) { nth0 = 2; nth1 = 16; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 2; nth1 = 64; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); break; case GGML_TYPE_MXFP4: { #ifdef GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_mxfp4_f32_flat; cl_mem q; if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 2; ndst = nth1*2; q = extra0_mxfp4->q; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 2; ndst = nth1*2; q = extra0_mxfp4->q_img; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_mxfp4->e)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r3)); #else kernel = backend_ctx->kernel_mul_mv_mxfp4_f32; if (backend_ctx->gpu_family == INTEL) { nth0 = 16; nth1 = 2; ndst = nth1*2; } else if (backend_ctx->gpu_family == ADRENO) { nth0 = 64; nth1 = 2; ndst = nth1*2; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r3)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(float)*nth0,nullptr)); #endif break; } default: GGML_ASSERT(false && "not implemented"); } if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_MXFP4 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K) { // Each SIMD group produces N_DST values in the result. Assuming each // workgroup has N_SIMDGROUP SIMD groups, then each workgroup will // produce N_DST*N_SIMDGROUP values in the result. Hence, the grid size // (number of workgroups) will be a nearest multiple of // N_DST*N_SIMDGROUP to cover the size of the dimension. Below, 4 is // N_DST*N_SIMDGROUP (see the kernel for Q4_0 matmul). size_t global_work_size[] = {(size_t)(ne01 + ndst-1)/ndst*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else if (src0t == GGML_TYPE_Q4_K) { GGML_ASSERT(false && "not implemented"); } else if (src0t == GGML_TYPE_Q3_K) { GGML_ASSERT(false && "not implemented"); } else if (src0t == GGML_TYPE_Q5_K) { GGML_ASSERT(false && "not implemented"); } else if (src0t == GGML_TYPE_Q6_K) { size_t global_work_size[] = {(size_t)(ne01+1)/2*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { int64_t ny = (ne11 + nrows - 1)/nrows; size_t global_work_size[] = {(size_t)ne01*nth0, (size_t)ny*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); const ggml_tensor * src2 = dst->src[2]; GGML_ASSERT(src2); GGML_ASSERT(src2->extra); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offset2 = extra2->offset + src2->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; GGML_UNUSED(offset0); #ifdef GGML_OPENCL_SOA_Q ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra; ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra; ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra; #endif const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; const int ne12 = src1->ne[2]; const int ne13 = src1->ne[3]; const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; const cl_ulong nb13 = src1->nb[3]; const int ne20 = src2->ne[0]; const int ne21 = src2->ne[1]; const cl_ulong nb21 = src2->nb[1]; const cl_ulong nb20 = src2->nb[0]; UNUSED(nb20); const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; const int r2 = ne12/ne02; const int r3 = ne13/ne03; const int dst_rows = ne20*ne21; // ne20 = n_used_experts, ne21 = n_rows GGML_ASSERT(ne00 == ne10); int sgs = 32; // subgroup size int nsg = 1; // number of subgroups int nrows = 1; // number of row in src1 int ndst = 4; // number of values produced by each subgroup cl_kernel kernel; // subgroup mat vec switch (src0->type) { case GGML_TYPE_Q4_0: { kernel = backend_ctx->kernel_mul_mv_id_q4_0_f32_8x_flat; if (backend_ctx->gpu_family == INTEL) { sgs = 16; nsg = 1; ndst = 8; } else if (backend_ctx->gpu_family == ADRENO) { sgs = 64; nsg = 1; ndst = 8; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &r3)); break; } case GGML_TYPE_Q8_0: { #ifdef GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32_flat; if (backend_ctx->gpu_family == INTEL) { sgs = 16; nsg = 2; ndst = 4; } else if (backend_ctx->gpu_family == ADRENO) { sgs = 64; nsg = 2; ndst = 4; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1)); #else kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32; if (backend_ctx->gpu_family == INTEL) { sgs = 16; nsg = 2; ndst = 4; } else if (backend_ctx->gpu_family == ADRENO) { sgs = 64; nsg = 2; ndst = 4; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1)); #endif // GGML_OPENCL_SOA_Q break; } case GGML_TYPE_MXFP4: { #ifdef GGML_OPENCL_USE_ADRENO_KERNELS if (use_adreno_moe_kernels(backend_ctx, src0)) { cl_int status; size_t local_size[3] = {64, 2, 1}; size_t global_size[3] = {64, 2, 1}; cl_mem src1_sub_buffer, buf_src1_image, buf_src2; int tile_size = 320; if (ne12 == 1) { // for gemv kernel = backend_ctx->kernel_gemv_moe_mxfp4_f32; // create a sub_buffer for src2 cl_buffer_region region; region.origin = offset2; region.size = ne20 * ne21 * sizeof(int); buf_src2 = clCreateSubBuffer(extra2->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // set thread grid global_size[0] = static_cast(ne01); global_size[1] = 4; global_size[2] = static_cast(ne20); local_size[1] = 4; } else { // for gemm kernel = backend_ctx->kernel_gemm_moe_mxfp4_f32; // preprocess router table int num_tiles_per_expert = (ne01 + tile_size - 1) / tile_size; void * host_src2_reorder = malloc(ne20 * ne21 * 4 * num_tiles_per_expert * sizeof(short)); void * host_src2 = malloc(ne21 * nb21); CL_CHECK(clEnqueueReadBuffer(backend_ctx->queue, extra2->data_device, CL_TRUE, offset2, ne21 * nb21, host_src2, 0, NULL, NULL)); int total_experts = nb21 / nb20; int out_idx = 0; for (int i_expert = 0; i_expert < ne02; i_expert++) { for (int i_tile = 0; i_tile < num_tiles_per_expert; i_tile++) { for (int j = 0; j < ne21; j++) { for (int i = 0; i < ne20; i++) { int expert = ((int *)host_src2)[j * total_experts + i]; if (i_expert == expert) { ((short *)host_src2_reorder)[out_idx] = static_cast(expert); ((short *)host_src2_reorder)[out_idx + 1] = static_cast(j * ne11 + (i % ne11)); ((short *)host_src2_reorder)[out_idx + 2] = static_cast(j * ne20 + i); ((short *)host_src2_reorder)[out_idx + 3] = static_cast(i_tile); out_idx += 4; } } } } } buf_src2 = clCreateBuffer(backend_ctx->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, ne20 * ne21 * 4 * num_tiles_per_expert * sizeof(short), host_src2_reorder, &status); CL_CHECK(status); // set thread grid global_size[0] = static_cast(tile_size); global_size[2] = static_cast(ne20 * ne21 * num_tiles_per_expert); } // create a sub_buffer for src1 cl_buffer_region region; region.origin = offset1; region.size = ne10 * ne11 * ne12 * sizeof(float); src1_sub_buffer = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); CL_CHECK(status); // create image for src1 cl_image_format image_format_buf_src1 = {CL_RGBA, CL_FLOAT}; cl_image_desc image_desc_buf_src1 = {CL_MEM_OBJECT_IMAGE1D_BUFFER, static_cast(ne10 * ne11 * ne12 / 4), 0,0,0,0,0,0,0, {src1_sub_buffer}}; buf_src1_image = clCreateImage(backend_ctx->context, CL_MEM_READ_ONLY, &image_format_buf_src1, &image_desc_buf_src1, NULL, &status); CL_CHECK(status); // Set kernel args int arg_idx = 0; CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extra0_mxfp4->q)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extra0_mxfp4->e)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &buf_src1_image)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &buf_src2)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne01)); if (ne12 == 1) { CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne11)); } else { CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &tile_size)); } // launch kernel backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_size, local_size, dst); // deallocate sub buffers and images CL_CHECK(clReleaseMemObject(src1_sub_buffer)); CL_CHECK(clReleaseMemObject(buf_src1_image)); CL_CHECK(clReleaseMemObject(buf_src2)); return; } // else fallback to generic kernel #endif // GGML_OPENCL_USE_ADRENO_KERNELS #ifdef GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat; cl_mem q; if (backend_ctx->gpu_family == INTEL) { sgs = 16; nsg = 2; ndst = 2; q = extra0_mxfp4->q; } else if (backend_ctx->gpu_family == ADRENO) { sgs = 64; nsg = 1; ndst = 4; q = extra0_mxfp4->q_img; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_mxfp4->e)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); #else // GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32; if (backend_ctx->gpu_family == INTEL) { sgs = 16; nsg = 2; ndst = 2; } else if (backend_ctx->gpu_family == ADRENO) { sgs = 64; nsg = 2; ndst = 2; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne20)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne21)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb21)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*sgs,nullptr)); #endif // GGML_OPENCL_SOA_Q break; } default: GGML_ASSERT(false && "not implemented");; } int _ne1 = 1; int ne123 = dst_rows; size_t global_work_size[] = {(size_t)(ne01+ndst*nsg-1)/(ndst*nsg)*sgs, (size_t)(_ne1+nrows-1)/nrows*nsg, (size_t)ne123}; size_t local_work_size[] = {(size_t)sgs, (size_t)nsg, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_UNUSED(src1); GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; float scale; float bias; memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(float)); memcpy(&bias, ((int32_t *) dst->op_params) + 1, sizeof(float)); ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel = backend_ctx->kernel_scale; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &scale)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(float), &bias)); int n = ggml_nelements(dst)/4; size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (n % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_cpy(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); // GGML_OP_CPY happens between src0 and src1. // GGML_OP_DUP and GGML_OP_CONT happen between src0 and dst. UNUSED(dst); const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; const int ne03 = src0 ? src0->ne[3] : 0; const cl_ulong nb00 = src0 ? src0->nb[0] : 0; const cl_ulong nb01 = src0 ? src0->nb[1] : 0; const cl_ulong nb02 = src0 ? src0->nb[2] : 0; const cl_ulong nb03 = src0 ? src0->nb[3] : 0; const int ne10 = src1 ? src1->ne[0] : 0; const int ne11 = src1 ? src1->ne[1] : 0; const int ne12 = src1 ? src1->ne[2] : 0; const int ne13 = src1 ? src1->ne[3] : 0; const cl_ulong nb10 = src1 ? src1->nb[0] : 0; const cl_ulong nb11 = src1 ? src1->nb[1] : 0; const cl_ulong nb12 = src1 ? src1->nb[2] : 0; const cl_ulong nb13 = src1 ? src1->nb[3] : 0; const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_kernel kernel; switch (src0t) { case GGML_TYPE_F32: switch (src1t) { case GGML_TYPE_F16: kernel = backend_ctx->kernel_cpy_f32_f16; break; case GGML_TYPE_F32: kernel = backend_ctx->kernel_cpy_f32_f32; break; default: GGML_ASSERT(false && "not implemented"); } break; case GGML_TYPE_F16: switch (src1t) { case GGML_TYPE_F16: kernel = backend_ctx->kernel_cpy_f16_f16; break; case GGML_TYPE_F32: kernel = backend_ctx->kernel_cpy_f16_f32; break; default: GGML_ASSERT(false && "not implemented"); } break; default: GGML_ASSERT(false && "not implemented"); } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne11)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); const int nth = MIN(64, ne00); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, src1); } static void ggml_cl_dup(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { ggml_cl_cpy(backend, src0, dst, nullptr); UNUSED(src1); } static void ggml_cl_diag_mask_inf(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); UNUSED(src1); int n_past = ((int32_t *)(dst->op_params))[0]; const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_kernel kernel; if (ne00%8 == 0) { kernel = backend_ctx->kernel_diag_mask_inf_8; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &n_past)); size_t global_work_size[] = {(size_t)ne00*ne01*ne02/8, 1, 1}; size_t local_work_size[] = {64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { kernel = backend_ctx->kernel_diag_mask_inf; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &n_past)); size_t global_work_size[] = {(size_t)ne00, (size_t)ne01, (size_t)ne02}; size_t local_work_size[] = {64, 1, 1}; size_t * local_work_size_ptr = local_work_size; if (ne00 % 64 != 0 && !backend_ctx->non_uniform_workgroups) { local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } } static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); // Softmax can now fuse KQ mask and KQ scale, which used to be two additional // ops before softmax. It now also fuses alibi if `max_bias > 0`. For llama, // alibi is not used; however, for some other models, it is used. // KQ_mask if (src1) { GGML_ASSERT(src1); GGML_ASSERT(src1->extra); } const ggml_tensor * src2 = dst->src[2]; if (src2) { GGML_ASSERT(src2->extra); } ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; ggml_tensor_extra_cl * extra1 = src1 ? (ggml_tensor_extra_cl *)src1->extra : nullptr; ggml_tensor_extra_cl * extra2 = src2 ? (ggml_tensor_extra_cl *)src2->extra : nullptr; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_ulong offset1 = extra1 ? extra1->offset + src1->view_offs : offset0; cl_ulong offset2 = extra2 ? extra2->offset + src2->view_offs : offset0; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_long nb01 = src0->nb[1]; const cl_long nb02 = src0->nb[2]; const cl_long nb03 = src0->nb[3]; const int ne12 = src1 ? src1->ne[2] : 0; const int ne13 = src1 ? src1->ne[3] : 0; const cl_long nb11 = src1 ? src1->nb[1] : 0; const cl_long nb12 = src1 ? src1->nb[2] : 0; const cl_long nb13 = src1 ? src1->nb[3] : 0; const cl_long nb1 = dst->nb[1]; const cl_long nb2 = dst->nb[2]; const cl_long nb3 = dst->nb[3]; float scale, max_bias; memcpy(&scale, dst->op_params + 0, sizeof(float)); memcpy(&max_bias, dst->op_params + 1, sizeof(float)); const int n_head = src0->ne[2]; const int n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); // Local size must be wave size. Each workgroup is a wave, working on a row, // where a row corresponds to leading dimension. int nth = MIN(32, ne00); if (backend_ctx->gpu_family == INTEL) { // This is the same as the initial value. nth = MIN(32, ne00); } else if (backend_ctx->gpu_family == ADRENO) { nth = 64; } else { GGML_ASSERT(false && "TODO: Unknown GPU"); } cl_kernel kernel; if (ne00%4 == 0) { if (use_f16) { kernel = backend_ctx->kernel_soft_max_4_f16; } else { kernel = backend_ctx->kernel_soft_max_4; } } else { if (use_f16) { kernel = backend_ctx->kernel_soft_max_f16; } else { kernel = backend_ctx->kernel_soft_max; } } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), extra1 ? &extra1->data_device : &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), extra2 ? &extra2->data_device : &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne12)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne13)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb13)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb3)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(float), &scale)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(float), &max_bias)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(float), &m0)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(float), &m1)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &n_head_log2)); size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; ggml_tensor * src2 = dst->src[2]; ggml_tensor_extra_cl * extra2 = src2 ? (ggml_tensor_extra_cl *)src2->extra : nullptr; cl_ulong offset2 = extra2 ? extra2->offset + src2->view_offs : offset0; const int ne00 = src0 ? src0->ne[0] : 0; const int ne01 = src0 ? src0->ne[1] : 0; const int ne02 = src0 ? src0->ne[2] : 0; const int ne03 = src0 ? src0->ne[3] : 0; const cl_ulong nb00 = src0 ? src0->nb[0] : 0; const cl_ulong nb01 = src0 ? src0->nb[1] : 0; const cl_ulong nb02 = src0 ? src0->nb[2] : 0; const cl_ulong nb03 = src0 ? src0->nb[3] : 0; const int ne10 = src1 ? src1->ne[0] : 0; const int ne11 = src1 ? src1->ne[1] : 0; UNUSED(ne11); const int ne12 = src1 ? src1->ne[2] : 0; UNUSED(ne12); const int ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13); const int ne0 = dst ? dst->ne[0] : 0; const int ne1 = dst ? dst->ne[1] : 0; const int ne2 = dst ? dst->ne[2] : 0; const int ne3 = dst ? dst->ne[3] : 0; const cl_ulong nb0 = dst ? dst->nb[0] : 0; const cl_ulong nb1 = dst ? dst->nb[1] : 0; const cl_ulong nb2 = dst ? dst->nb[2] : 0; const cl_ulong nb3 = dst ? dst->nb[3] : 0; GGML_ASSERT(ne10 % ne02 == 0); GGML_ASSERT(ne10 >= ne02); int nth = MIN(64, ne00); const int n_past = ((int *) dst->op_params)[0]; const int n_dims = ((int *) dst->op_params)[1]; const int mode = ((int *) dst->op_params)[2]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; float freq_base; float freq_scale; float ext_factor; float attn_factor; float beta_fast; float beta_slow; int32_t sections[4]; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int32_t)*4); const bool is_neox = mode & 2; const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; const int is_imrope = mode == GGML_ROPE_TYPE_IMROPE; if (is_mrope) { GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); } if (is_vision) { GGML_ASSERT(n_dims == ne00/2); } cl_kernel kernel; if (is_neox) { switch (src0->type) { case GGML_TYPE_F32: kernel = backend_ctx->kernel_rope_neox_f32; break; case GGML_TYPE_F16: kernel = backend_ctx->kernel_rope_neox_f16; break; default: GGML_ASSERT(false); }; } else if (is_mrope && !is_vision) { switch (src0->type) { case GGML_TYPE_F32: kernel = backend_ctx->kernel_rope_multi_f32; break; case GGML_TYPE_F16: kernel = backend_ctx->kernel_rope_multi_f16; break; default: GGML_ASSERT(false); }; } else if (is_vision) { switch (src0->type) { case GGML_TYPE_F32: kernel = backend_ctx->kernel_rope_vision_f32; break; case GGML_TYPE_F16: kernel = backend_ctx->kernel_rope_vision_f16; break; default: GGML_ASSERT(false); } } else { switch (src0->type) { case GGML_TYPE_F32: kernel = backend_ctx->kernel_rope_norm_f32; break; case GGML_TYPE_F16: kernel = backend_ctx->kernel_rope_norm_f16; break; default: GGML_ASSERT(false); }; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), extra2 ? &extra2->data_device : &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb00)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne1)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne2)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne3)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb0)); CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(cl_ulong), &nb3)); CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &n_past)); CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &n_dims)); CL_CHECK(clSetKernelArg(kernel, 26, sizeof(int), &n_ctx_orig)); CL_CHECK(clSetKernelArg(kernel, 27, sizeof(float), &freq_base)); CL_CHECK(clSetKernelArg(kernel, 28, sizeof(float), &freq_scale)); CL_CHECK(clSetKernelArg(kernel, 29, sizeof(float), &ext_factor)); CL_CHECK(clSetKernelArg(kernel, 30, sizeof(float), &attn_factor)); CL_CHECK(clSetKernelArg(kernel, 31, sizeof(float), &beta_fast)); CL_CHECK(clSetKernelArg(kernel, 32, sizeof(float), &beta_slow)); // both mrope and vision kernels have sections if (is_mrope || is_vision) { CL_CHECK(clSetKernelArg(kernel, 33, sizeof(int32_t)*4, §ions)); } // only mrope has is_imrope if (is_mrope && !is_vision) { CL_CHECK(clSetKernelArg(kernel, 34, sizeof(int), &is_imrope)); } size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_im2col(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); // src0 - filter, src1 - input GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; const cl_long IC = src1->ne[is_2D ? 2 : 1]; const cl_long IH = is_2D ? src1->ne[1] : 1; const cl_long IW = src1->ne[0]; const cl_long KH = is_2D ? src0->ne[1] : 1; const cl_long KW = src0->ne[0]; const cl_long OH = is_2D ? dst->ne[2] : 1; const cl_long OW = dst->ne[1]; // nb is byte offset, src is type float32 const cl_ulong delta_offset = src1->nb[is_2D ? 2 : 1]/4; const cl_long batch = src1->ne[is_2D ? 3 : 2]; const cl_ulong batch_offset = src1->nb[is_2D ? 3 : 2]/4; const cl_long pelements = OW*KW*KH; const cl_long CHW = IC*KH*KW; cl_kernel kernel; if(dst->type == GGML_TYPE_F16) { kernel = backend_ctx->kernel_im2col_f16; } else { kernel = backend_ctx->kernel_im2col_f32; } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra1->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_ulong), &batch_offset)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &delta_offset)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_long), &IW)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_long), &IH)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_long), &IC)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_long), &OW)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_long), &OH)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_long), &KW)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_long), &KH)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_long), &pelements)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_long), &CHW)); CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &s0)); CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &s1)); CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &p0)); CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &p1)); CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &d0)); CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &d1)); const int num_blocks = (pelements + 256 - 1) / 256; size_t global_work_size[] = {(size_t)num_blocks*256, (size_t)OH, (size_t)batch*IC}; size_t local_work_size[] = {256, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_argsort(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_UNUSED(src1); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_I32); GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const int ne00 = src0->ne[0]; const int nrows = ggml_nrows(src0); int ne00_padded = 1; while (ne00_padded < ne00) { ne00_padded *= 2; } int order = (enum ggml_sort_order) dst->op_params[0]; cl_kernel kernel = backend_ctx->kernel_argsort_f32_i32; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne00_padded)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &order)); CL_CHECK(clSetKernelArg(kernel, 7, ne00_padded*sizeof(int), NULL)); size_t global_work_size[] = {(size_t)ne00_padded, (size_t)nrows, (size_t)1}; size_t local_work_size[] = {(size_t)ne00_padded, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_sum_rows(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_UNUSED(src1); GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; const int ne00 = src0->ne[0]; const int ne01 = src0->ne[1]; const int ne02 = src0->ne[2]; const int ne03 = src0->ne[3]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; const cl_ulong nb03 = src0->nb[3]; const cl_ulong nb1 = dst->nb[1]; const cl_ulong nb2 = dst->nb[2]; const cl_ulong nb3 = dst->nb[3]; cl_kernel kernel = backend_ctx->kernel_sum_rows_f32; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb3)); size_t global_work_size[] = {(size_t)ne01, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)64, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_glu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); GGML_ASSERT(dst); GGML_ASSERT(dst->extra); GGML_ASSERT(ggml_is_contiguous_1(src0)); if (src1) { GGML_ASSERT(src1); GGML_ASSERT(src1->extra); GGML_ASSERT(ggml_are_same_shape(src0, src1)); } ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; cl_kernel kernel; switch (ggml_get_glu_op(dst)) { case GGML_GLU_OP_GEGLU: if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_geglu; } else { kernel = backend_ctx->kernel_geglu_f16; } break; case GGML_GLU_OP_REGLU: if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_reglu; } else { kernel = backend_ctx->kernel_reglu_f16; } break; case GGML_GLU_OP_SWIGLU: if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_swiglu; } else { kernel = backend_ctx->kernel_swiglu_f16; } break; case GGML_GLU_OP_SWIGLU_OAI: kernel = backend_ctx->kernel_swiglu_oai; break; case GGML_GLU_OP_GEGLU_ERF: if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_geglu_erf; } else { kernel = backend_ctx->kernel_geglu_erf_f16; } break; case GGML_GLU_OP_GEGLU_QUICK: if (dst->type == GGML_TYPE_F32) { kernel = backend_ctx->kernel_geglu_quick; } else { kernel = backend_ctx->kernel_geglu_quick_f16; } break; default: GGML_ABORT("Unsupported glu op"); } ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; ggml_tensor_extra_cl * extra1 = src1 ? (ggml_tensor_extra_cl *)src1->extra : nullptr; cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; cl_ulong offset1 = extra1 ? extra1->offset + src1->view_offs : offset0; const int ne0 = dst->ne[0]; const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb11 = src1 ? src1->nb[1] : nb01; const cl_ulong nb1 = dst->nb[1]; const int swp = ggml_get_op_params_i32(dst, 1); const float alpha = ggml_get_op_params_f32(dst, 2); const float limit = ggml_get_op_params_f32(dst, 3); const int ne00_off = src1 ? 0 : (swp ? ne0 : 0); const int ne10_off = src1 ? 0 : (swp ? 0 : ne0); CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), src1 ? &extra1->data_device : &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne0)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb1)); CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne00_off)); CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne10_off)); if (ggml_get_glu_op(dst) == GGML_GLU_OP_SWIGLU_OAI) { CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float), &limit)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(float), &alpha)); } const size_t nrows = ggml_nrows(src0); size_t nth = 512; size_t global_work_size[] = {nrows*nth, 1, 1}; size_t local_work_size[] = {nth, 1, 1}; backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } //------------------------------------------------------------------------------ // Op offloading //------------------------------------------------------------------------------ typedef void (*ggml_cl_func_t)(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor) { ggml_cl_func_t func = nullptr; ggml_tensor * src0 = tensor->src[0]; ggml_tensor * src1 = tensor->src[1]; const bool any_on_device = tensor->extra || (src0 != nullptr && src0->extra) || (src1 != nullptr && src1->extra); switch (tensor->op) { case GGML_OP_GET_ROWS: if (!any_on_device) { return false; } func = ggml_cl_get_rows; break; case GGML_OP_SET_ROWS: if (!any_on_device) { return false; } func = ggml_cl_set_rows; break; case GGML_OP_CPY: if (!any_on_device) { return false; } func = ggml_cl_cpy; break; case GGML_OP_DUP: case GGML_OP_CONT: if (!any_on_device) { return false; } func = ggml_cl_dup; break; case GGML_OP_ADD: if (!any_on_device) { return false; } func = ggml_cl_add; break; case GGML_OP_ADD_ID: if (!any_on_device) { return false; } func = ggml_cl_add_id; break; case GGML_OP_MUL: if (!any_on_device) { return false; } func = ggml_cl_mul; break; case GGML_OP_DIV: if (!any_on_device) { return false; } func = ggml_cl_div; break; case GGML_OP_SUB: if (!any_on_device) { return false; } func = ggml_cl_sub; break; case GGML_OP_SQR: if (!any_on_device) { return false; } func = ggml_cl_sqr; break; case GGML_OP_SQRT: if (!any_on_device) { return false; } func = ggml_cl_sqrt; break; case GGML_OP_MEAN: if (!any_on_device) { return false; } func = ggml_cl_mean; break; case GGML_OP_UNARY: switch (ggml_get_unary_op(tensor)) { case GGML_UNARY_OP_GELU: if (!any_on_device) { return false; } func = ggml_cl_gelu; break; case GGML_UNARY_OP_GELU_ERF: if (!any_on_device) { return false; } func = ggml_cl_gelu_erf; break; case GGML_UNARY_OP_GELU_QUICK: if (!any_on_device) { return false; } func = ggml_cl_gelu_quick; break; case GGML_UNARY_OP_SILU: if (!any_on_device) { return false; } func = ggml_cl_silu; break; case GGML_UNARY_OP_RELU: if (!any_on_device) { return false; } func = ggml_cl_relu; break; case GGML_UNARY_OP_SIGMOID: if (!any_on_device) { return false; } func = ggml_cl_sigmoid; break; case GGML_UNARY_OP_TANH: if (!any_on_device) { return false; } func = ggml_cl_tanh; break; default: return false; } break; case GGML_OP_GLU: if (!any_on_device) { return false; } func = ggml_cl_glu; break; case GGML_OP_CLAMP: if (!any_on_device) { return false; } func = ggml_cl_clamp; break; case GGML_OP_NORM: if (!any_on_device) { return false; } func = ggml_cl_norm; break; case GGML_OP_RMS_NORM: if (!any_on_device) { return false; } func = ggml_cl_rms_norm; break; case GGML_OP_GROUP_NORM: if (!any_on_device) { return false; } func = ggml_cl_group_norm; break; case GGML_OP_REPEAT: if (!any_on_device) { return false; } func = ggml_cl_repeat; break; case GGML_OP_PAD: if (!any_on_device) { return false; } ggml_cl_pad(backend, tensor->src[0], tensor); return true; case GGML_OP_UPSCALE: if (!any_on_device) { return false; } ggml_cl_upscale(backend, tensor->src[0], tensor); return true; case GGML_OP_CONV_2D: if (!any_on_device) { return false; } func = ggml_cl_conv_2d; break; case GGML_OP_SSM_CONV: if (!any_on_device) { return false; } func = ggml_cl_ssm_conv; break; case GGML_OP_CONCAT: if (!any_on_device) { return false; } func = ggml_cl_concat; break; case GGML_OP_TIMESTEP_EMBEDDING: if (!any_on_device) { return false; } ggml_cl_timestep_embedding(backend, tensor->src[0], tensor); return true; case GGML_OP_MUL_MAT: if (!any_on_device && !ggml_cl_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) { return false; } func = ggml_cl_mul_mat; break; case GGML_OP_MUL_MAT_ID: if (!any_on_device) { return false; } func = ggml_cl_mul_mat_id; break; case GGML_OP_SCALE: if (!any_on_device) { return false; } func = ggml_cl_scale; break; case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: if (!any_on_device) { return false; } func = ggml_cl_nop; break; case GGML_OP_DIAG_MASK_INF: if (!any_on_device) { return false; } func = ggml_cl_diag_mask_inf; break; case GGML_OP_SOFT_MAX: if (!any_on_device) { return false; } func = ggml_cl_soft_max; break; case GGML_OP_ROPE: if (!any_on_device) { return false; } func = ggml_cl_rope; break; case GGML_OP_IM2COL: if (!any_on_device) { return false; } func = ggml_cl_im2col; break; case GGML_OP_ARGSORT: if (!any_on_device) { return false; } func = ggml_cl_argsort; break; case GGML_OP_SUM_ROWS: if (!any_on_device) { return false; } func = ggml_cl_sum_rows; break; case GGML_OP_FLASH_ATTN_EXT: if (!any_on_device) { return false; } ggml_cl_flash_attn(backend, tensor->src[0], tensor->src[1], tensor); return true; default: return false; } func(backend, tensor->src[0], tensor->src[1], tensor); return true; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/000077500000000000000000000000001512524704700210225ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-opencl/kernels/add.cl000066400000000000000000000120261512524704700220730ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // add //------------------------------------------------------------------------------ // general-purpose kernel for addition of two tensors // pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3 // cons: not very efficient kernel void kernel_add( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) + *((global float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_add_row( global float4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float4 * dst, ulong offsetd, int ne ) { src0 = (global float4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] + src1[idx1]; } kernel void kernel_add_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int type_src0, int type_src1 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; half v0, v1; if (type_src0 == 1) { v0 = convert_half(*((global float *)(src0_ptr + i0*nb00))); } else { v0 = *((global half *)(src0_ptr + i0*nb00)); } if (type_src1 == 1) { v1 = convert_half(*((global float *)(src1_ptr + i10*nb10))); } else { v1 = *((global half *)(src1_ptr + i10*nb10)); } *((global half *)(dst_ptr + i0*nb0)) = v0 + v1; } } kernel void kernel_add_row_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global half4 * dst, ulong offsetd, int ne, int type_src0, int type_src1 ) { dst = (global half4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne half4 v0, v1; if (type_src0 == 1) { global float4* src0_f32 = (global float4*)((global char*)src0 + offset0); v0 = convert_half4(src0_f32[gid]); } else { global half4* src0_f16 = (global half4*)((global char*)src0 + offset0); v0 = src0_f16[gid]; } if (type_src1 == 1) { global float4* src1_f32 = (global float4*)((global char*)src1 + offset1); v1 = convert_half4(src1_f32[idx1]); } else { global half4* src1_f16 = (global half4*)((global char*)src1 + offset1); v1 = src1_f16[idx1]; } dst[gid] = v0 + v1; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/add_id.cl000066400000000000000000000026271512524704700225550ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // add_id //------------------------------------------------------------------------------ kernel void kernel_add_id( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, ulong nb01, ulong nb02, ulong nb11, ulong nb21, int ne0, int ne1 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); src2 = (global char*)((global char*)src2 + offset2); dst = (global char*)((global char*)dst + offsetd); int i1 = get_group_id(0); int i2 = get_group_id(1); const int i11 = *((global const int *) (src2 + i1*sizeof(int) + i2*nb21)); const size_t nb1 = ne0 * sizeof(float); const size_t nb2 = ne1 * nb1; global float * dst_row = (global float *)((global char *)dst + i1*nb1 + i2*nb2); global float * src0_row = (global float *)((global char *)src0 + i1*nb01 + i2*nb02); global float * src1_row = (global float *)((global char *)src1 + i11*nb11); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { dst_row[i0] = src0_row[i0] + src1_row[i0]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/argsort.cl000066400000000000000000000052611512524704700230270ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define SWAP(x, y, T) { T tmp = (x); (x) = (y); (y) = tmp; } enum ggml_sort_order { GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC, }; kernel void kernel_argsort_f32_i32( global float * src0, ulong offset0, global int * dst, ulong offsetd, const int ne00, const int ne00_pad, const int order, local int * dst_row ) { // bitonic sort int col = get_local_id(0); int row = get_group_id(1); if (col >= ne00_pad) { return; } src0 = (global char *)((global char *)src0 + offset0); dst = (global float *)((global char *)dst + offsetd); global float * x_row = src0 + row * ne00; // initialize indices dst_row[col] = col; barrier(CLK_LOCAL_MEM_FENCE); for (int k = 2; k <= ne00_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ne00 || (dst_row[ixj] < ne00 && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj], int); } } else { if (dst_row[ixj] >= ne00 || (dst_row[col] < ne00 && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj], int); } } } barrier(CLK_LOCAL_MEM_FENCE); } } // copy the result to dst without the padding if (col < ne00) { dst[row * ne00 + col] = dst_row[col]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/clamp.cl000066400000000000000000000012111512524704700224310ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // clamp //------------------------------------------------------------------------------ kernel void kernel_clamp( global float * src0, ulong offset0, global float * dst, ulong offsetd, float min, float max ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); dst[get_global_id(0)] = src0[get_global_id(0)] < min ? min : (src0[get_global_id(0)] > max ? max : src0[get_global_id(0)]); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/concat.cl000066400000000000000000000104661512524704700226200ustar00rootroot00000000000000kernel void kernel_concat_f32_contiguous( global const char * p_src0, ulong off_src0, global const char * p_src1, ulong off_src1, global char * p_dst, ulong off_dst, int d_ne00, int d_ne01, int d_ne02, // src0->ne[0..2] for the slice int d_ne10, int d_ne11, int d_ne12, // src1->ne[0..2] for the slice (d_ne1X must match d_ne0X on non-concat axes) int d_ne0, int d_ne1, int d_ne2, // dst->ne[0..2] for the slice int dim ) { global const float * src0 = (global const float*)((global char*)p_src0 + off_src0); global const float * src1 = (global const float*)((global char*)p_src1 + off_src1); global float * dst = (global float*)((global char*)p_dst + off_dst); int i0 = get_global_id(0); // Index along dst's 0th dimension int i1 = get_global_id(1); // Index along dst's 1st dimension int i2 = get_global_id(2); // Index along dst's 2nd dimension if (i0 >= d_ne0 || i1 >= d_ne1 || i2 >= d_ne2) { return; } ulong dst_idx = (ulong)i2 * d_ne0 * d_ne1 + (ulong)i1 * d_ne0 + i0; ulong src_idx; if (dim == 0) { if (i0 < d_ne00) { // Data from src0 src_idx = (ulong)i2 * d_ne00 * d_ne01 + (ulong)i1 * d_ne00 + i0; dst[dst_idx] = src0[src_idx]; } else { // Data from src1 src_idx = (ulong)i2 * d_ne10 * d_ne11 + (ulong)i1 * d_ne10 + (i0 - d_ne00); dst[dst_idx] = src1[src_idx]; } } else if (dim == 1) { if (i1 < d_ne01) { // Data from src0 src_idx = (ulong)i2 * d_ne00 * d_ne01 + (ulong)i1 * d_ne00 + i0; dst[dst_idx] = src0[src_idx]; } else { // Data from src1 src_idx = (ulong)i2 * d_ne10 * d_ne11 + (ulong)(i1 - d_ne01) * d_ne10 + i0; dst[dst_idx] = src1[src_idx]; } } else if (dim == 2) { if (i2 < d_ne02) { // Data from src0 src_idx = (ulong)i2 * d_ne00 * d_ne01 + (ulong)i1 * d_ne00 + i0; dst[dst_idx] = src0[src_idx]; } else { // Data from src1 src_idx = (ulong)(i2 - d_ne02) * d_ne10 * d_ne11 + (ulong)i1 * d_ne10 + i0; dst[dst_idx] = src1[src_idx]; } } } kernel void kernel_concat_f32_non_contiguous( global const char * p_src0, ulong off_src0, global const char * p_src1, ulong off_src1, global char * p_dst, ulong off_dst, long ne00, long ne01, long ne02, long ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, ulong nb10, ulong nb11, ulong nb12, ulong nb13, // Strides for src1 long d_ne0, long d_ne1, long d_ne2, long d_ne3, ulong d_nb0, ulong d_nb1, ulong d_nb2, ulong d_nb3, int dim ) { global const char * src0_base = p_src0 + off_src0; global const char * src1_base = p_src1 + off_src1; global char * dst_base = p_dst + off_dst; long current_i1 = get_global_id(0); // Index for dst_dim_1 long current_i2 = get_global_id(1); // Index for dst_dim_2 long current_i3 = get_global_id(2); // Index for dst_dim_3 if (current_i1 >= d_ne1 || current_i2 >= d_ne2 || current_i3 >= d_ne3) { return; } global const float * x_val_ptr; global float * y_val_ptr; for (long current_i0 = 0; current_i0 < d_ne0; ++current_i0) { bool use_src0; long s_i0 = current_i0, s_i1 = current_i1, s_i2 = current_i2, s_i3 = current_i3; if (dim == 0) { use_src0 = (current_i0 < ne00); if (!use_src0) { s_i0 = current_i0 - ne00; } } else if (dim == 1) { use_src0 = (current_i1 < ne01); if (!use_src0) { s_i1 = current_i1 - ne01; } } else if (dim == 2) { use_src0 = (current_i2 < ne02); if (!use_src0) { s_i2 = current_i2 - ne02; } } else { // dim == 3 use_src0 = (current_i3 < ne03); if (!use_src0) { s_i3 = current_i3 - ne03; } } if (use_src0) { x_val_ptr = (global const float *)(src0_base + (ulong)s_i3*nb03 + (ulong)s_i2*nb02 + (ulong)s_i1*nb01 + (ulong)s_i0*nb00); } else { x_val_ptr = (global const float *)(src1_base + (ulong)s_i3*nb13 + (ulong)s_i2*nb12 + (ulong)s_i1*nb11 + (ulong)s_i0*nb10); } y_val_ptr = (global float *)(dst_base + (ulong)current_i3*d_nb3 + (ulong)current_i2*d_nb2 + (ulong)current_i1*d_nb1 + (ulong)current_i0*d_nb0); *y_val_ptr = *x_val_ptr; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/conv2d.cl000066400000000000000000000155231512524704700225430ustar00rootroot00000000000000#ifdef USE_FP16 #pragma OPENCL EXTENSION cl_khr_fp16 : enable #define T_FLOAT half #define T_FLOAT4 half4 #define VSTORE_T_FLOAT4(data, offset, p) vstore_half4_rte(data, offset, p) #else #define T_FLOAT float #define T_FLOAT4 float4 #define VSTORE_T_FLOAT4(data, offset, p) vstore4(data, offset, p) #endif #if defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #else #define REQD_SUBGROUP_SIZE_128 #endif #define T_ACCUM float4 #define VEC_SIZE 4 #define BS_K 64 #define BS_NPQ 64 #define BS_CRS 16 #define TS_K 4 #define TS_NPQ 8 #define WG_K (BS_K / TS_K) #define WG_NPQ (BS_NPQ / TS_NPQ) #define BS_NPQ_VEC (BS_NPQ / VEC_SIZE) #define TS_NPQ_VEC (TS_NPQ / VEC_SIZE) static inline uint splitWork(uint work_size, uint block_size){ return (work_size + block_size - 1) / block_size; } REQD_SUBGROUP_SIZE_128 kernel void kernel_conv_2d( global void* p_knl, ulong off_knl, global void* p_src, ulong off_src, global void* p_dst, ulong off_dst, local void* shared, uint Cout, uint Cin, uint N, uint KW, uint KH, uint W, uint H, uint OW, uint OH, uint s0, uint s1, uint p0, uint p1, uint d0, uint d1, uint nb01, uint nb02, uint nb03, uint nb11, uint nb12, uint nb13, uint nb1, uint nb2, uint nb3 ) { global T_FLOAT* knl_data = (global T_FLOAT*) ((global char*)p_knl + off_knl); global T_FLOAT* src_data = (global T_FLOAT*) ((global char*)p_src + off_src); global T_FLOAT* dst_data = (global T_FLOAT*) ((global char*)p_dst + off_dst); const uint K = Cout; const uint CRS = Cin*KH*KW; const uint NPQ = N*OH*OW; const uint lid_k = get_local_id(0); const uint lid_npq = get_local_id(1); const uint tid = lid_npq * WG_K + lid_k; const uint B_idx_K = get_group_id(0); const uint B_idx_NPQ = get_group_id(1); const uint offset_k = B_idx_K * BS_K; const uint offset_npq = B_idx_NPQ * BS_NPQ; local T_FLOAT* Ash = (local T_FLOAT*)shared; local T_FLOAT4* Bsh = (local T_FLOAT4*) &Ash[BS_K * BS_CRS]; T_ACCUM regC[TS_K][TS_NPQ_VEC]; for (int i = 0; i < TS_K; ++i) { for (int j = 0; j < TS_NPQ_VEC; ++j) { regC[i][j] = (T_ACCUM)(0.0f); } } const uint NB_CRS = splitWork(CRS, BS_CRS); for (uint B_idx_CRS = 0; B_idx_CRS < NB_CRS; ++B_idx_CRS) { const uint offset_crs = B_idx_CRS * BS_CRS; for (int i = tid; i < BS_K * BS_CRS; i += (WG_K * WG_NPQ)) { const uint k_l = i / BS_CRS; const uint crs_l = i % BS_CRS; const uint k_g = offset_k + k_l; const uint crs_g = offset_crs + crs_l; if (k_g < K && crs_g < CRS) { const uint Cin_idx = crs_g / (KW*KH); const uint KH_idx = (crs_g - Cin_idx*KW*KH) / KW; const uint KW_idx = crs_g - Cin_idx*KW*KH - KH_idx*KW; const uint knl_idx = KW_idx + KH_idx*nb01 + Cin_idx*nb02 + k_g*nb03; Ash[k_l * BS_CRS + crs_l] = knl_data[knl_idx]; } else { Ash[k_l * BS_CRS + crs_l] = (T_FLOAT)0.0f; } } for (int i = tid; i < BS_CRS * BS_NPQ_VEC; i += (WG_K * WG_NPQ)) { const uint crs_l = i / BS_NPQ_VEC; const uint npq_l_vec = i % BS_NPQ_VEC; const uint crs_g = offset_crs + crs_l; T_FLOAT4 val = (T_FLOAT4)(0.0f); if (crs_g < CRS) { const uint Cin_idx = crs_g / (KW * KH); const uint KH_idx = (crs_g - Cin_idx * KW * KH) / KW; const uint KW_idx = crs_g - Cin_idx * KW * KH - KH_idx * KW; for (int v = 0; v < VEC_SIZE; ++v) { const uint npq_g = offset_npq + npq_l_vec * VEC_SIZE + v; if (npq_g < NPQ) { const uint N_idx = npq_g / (OH * OW); const uint pq_idx = npq_g % (OH * OW); const uint OH_idx = pq_idx / OW; const uint OW_idx = pq_idx % OW; const int H_idx = (int)(OH_idx * s1 + KH_idx * d1 - p1); const int W_idx = (int)(OW_idx * s0 + KW_idx * d0 - p0); if (H_idx >= 0 && H_idx < H && W_idx >= 0 && W_idx < W) { const uint src_idx = W_idx + H_idx * nb11 + Cin_idx * nb12 + N_idx * nb13; ((T_FLOAT*)&val)[v] = src_data[src_idx]; } } } } Bsh[crs_l * BS_NPQ_VEC + npq_l_vec] = val; } barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (uint crs_l = 0; crs_l < BS_CRS; ++crs_l) { T_FLOAT regA[TS_K]; for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { regA[k_l_reg] = Ash[(lid_k * TS_K + k_l_reg) * BS_CRS + crs_l]; } for (uint npq_l_vec_reg = 0; npq_l_vec_reg < TS_NPQ_VEC; ++npq_l_vec_reg) { T_FLOAT4 regB = Bsh[crs_l * BS_NPQ_VEC + lid_npq * TS_NPQ_VEC + npq_l_vec_reg]; for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { regC[k_l_reg][npq_l_vec_reg] = mad(convert_float(regA[k_l_reg]), convert_float4(regB), regC[k_l_reg][npq_l_vec_reg]); } } } barrier(CLK_LOCAL_MEM_FENCE); } for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { const uint k_g = offset_k + lid_k * TS_K + k_l_reg; if (k_g >= K) continue; for (uint npq_l_vec_reg = 0; npq_l_vec_reg < TS_NPQ_VEC; ++npq_l_vec_reg) { const uint npq_g_base = offset_npq + (lid_npq * TS_NPQ_VEC + npq_l_vec_reg) * VEC_SIZE; const uint N_idx = npq_g_base / (OH * OW); const uint pq_idx = npq_g_base % (OH * OW); const uint OH_idx = pq_idx / OW; const uint OW_idx = pq_idx % OW; if (nb1 == OW && OW_idx + VEC_SIZE <= OW && npq_g_base + VEC_SIZE <= NPQ) { const uint dst_idx = OW_idx + OH_idx*nb1 + k_g*nb2 + N_idx*nb3; VSTORE_T_FLOAT4(regC[k_l_reg][npq_l_vec_reg], 0, &dst_data[dst_idx]); } else { T_ACCUM res = regC[k_l_reg][npq_l_vec_reg]; for (int v = 0; v < VEC_SIZE; ++v) { const uint npq_g = npq_g_base + v; if (npq_g < NPQ) { const uint N_idx_s = npq_g / (OH*OW); const uint pq_idx_s = npq_g % (OH*OW); const uint OH_idx_s = pq_idx_s / OW; const uint OW_idx_s = pq_idx_s % OW; const uint dst_idx_s = OW_idx_s + OH_idx_s*nb1 + k_g*nb2 + N_idx_s*nb3; dst_data[dst_idx_s] = (T_FLOAT)(((float*)&res)[v]); } } } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/conv2d_f16_f32.cl000066400000000000000000000150061512524704700236650ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #if defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #else #define REQD_SUBGROUP_SIZE_128 #endif #define T_ACCUM float4 #define VEC_SIZE 4 #define BS_K 64 #define BS_NPQ 64 #define BS_CRS 16 #define TS_K 4 #define TS_NPQ 8 #define WG_K (BS_K / TS_K) #define WG_NPQ (BS_NPQ / TS_NPQ) #define BS_NPQ_VEC (BS_NPQ / VEC_SIZE) #define TS_NPQ_VEC (TS_NPQ / VEC_SIZE) static inline uint splitWork(uint work_size, uint block_size){ return (work_size + block_size - 1) / block_size; } REQD_SUBGROUP_SIZE_128 kernel void kernel_conv_2d( global void* p_knl, ulong off_knl, global void* p_src, ulong off_src, global void* p_dst, ulong off_dst, local void* shared, uint Cout, uint Cin, uint N, uint KW, uint KH, uint W, uint H, uint OW, uint OH, uint s0, uint s1, uint p0, uint p1, uint d0, uint d1, uint nb01, uint nb02, uint nb03, uint nb11, uint nb12, uint nb13, uint nb1, uint nb2, uint nb3 ) { global half* knl_data = (global half*) ((global char*)p_knl + off_knl); global float* src_data = (global float*) ((global char*)p_src + off_src); global float* dst_data = (global float*) ((global char*)p_dst + off_dst); const uint K = Cout; const uint CRS = Cin*KH*KW; const uint NPQ = N*OH*OW; const uint lid_k = get_local_id(0); const uint lid_npq = get_local_id(1); const uint tid = lid_npq * WG_K + lid_k; const uint B_idx_K = get_group_id(0); const uint B_idx_NPQ = get_group_id(1); const uint offset_k = B_idx_K * BS_K; const uint offset_npq = B_idx_NPQ * BS_NPQ; local half* Ash = (local half*)shared; local float4* Bsh = (local float4*) &Ash[BS_K * BS_CRS]; T_ACCUM regC[TS_K][TS_NPQ_VEC]; for (int i = 0; i < TS_K; ++i) { for (int j = 0; j < TS_NPQ_VEC; ++j) { regC[i][j] = (T_ACCUM)(0.0f); } } const uint NB_CRS = splitWork(CRS, BS_CRS); for (uint B_idx_CRS = 0; B_idx_CRS < NB_CRS; ++B_idx_CRS) { const uint offset_crs = B_idx_CRS * BS_CRS; for (int i = tid; i < BS_K * BS_CRS; i += (WG_K * WG_NPQ)) { const uint k_l = i / BS_CRS; const uint crs_l = i % BS_CRS; const uint k_g = offset_k + k_l; const uint crs_g = offset_crs + crs_l; if (k_g < K && crs_g < CRS) { const uint Cin_idx = crs_g / (KW*KH); const uint KH_idx = (crs_g - Cin_idx*KW*KH) / KW; const uint KW_idx = crs_g - Cin_idx*KW*KH - KH_idx*KW; const uint knl_idx = KW_idx + KH_idx*nb01 + Cin_idx*nb02 + k_g*nb03; Ash[k_l * BS_CRS + crs_l] = knl_data[knl_idx]; } else { Ash[k_l * BS_CRS + crs_l] = (half)0.0f; } } for (int i = tid; i < BS_CRS * BS_NPQ_VEC; i += (WG_K * WG_NPQ)) { const uint crs_l = i / BS_NPQ_VEC; const uint npq_l_vec = i % BS_NPQ_VEC; const uint crs_g = offset_crs + crs_l; float4 val = (float4)(0.0f); if (crs_g < CRS) { const uint Cin_idx = crs_g / (KW * KH); const uint KH_idx = (crs_g - Cin_idx * KW * KH) / KW; const uint KW_idx = crs_g - Cin_idx * KW * KH - KH_idx * KW; for (int v = 0; v < VEC_SIZE; ++v) { const uint npq_g = offset_npq + npq_l_vec * VEC_SIZE + v; if (npq_g < NPQ) { const uint N_idx = npq_g / (OH * OW); const uint pq_idx = npq_g % (OH * OW); const uint OH_idx = pq_idx / OW; const uint OW_idx = pq_idx % OW; const int H_idx = (int)(OH_idx * s1 + KH_idx * d1 - p1); const int W_idx = (int)(OW_idx * s0 + KW_idx * d0 - p0); if (H_idx >= 0 && H_idx < H && W_idx >= 0 && W_idx < W) { const uint src_idx = W_idx + H_idx * nb11 + Cin_idx * nb12 + N_idx * nb13; ((float*)&val)[v] = src_data[src_idx]; } } } } Bsh[crs_l * BS_NPQ_VEC + npq_l_vec] = val; } barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (uint crs_l = 0; crs_l < BS_CRS; ++crs_l) { half regA[TS_K]; for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { regA[k_l_reg] = Ash[(lid_k * TS_K + k_l_reg) * BS_CRS + crs_l]; } for (uint npq_l_vec_reg = 0; npq_l_vec_reg < TS_NPQ_VEC; ++npq_l_vec_reg) { float4 regB = Bsh[crs_l * BS_NPQ_VEC + lid_npq * TS_NPQ_VEC + npq_l_vec_reg]; for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { regC[k_l_reg][npq_l_vec_reg] = mad(convert_float(regA[k_l_reg]), regB, regC[k_l_reg][npq_l_vec_reg]); } } } barrier(CLK_LOCAL_MEM_FENCE); } for (uint k_l_reg = 0; k_l_reg < TS_K; ++k_l_reg) { const uint k_g = offset_k + lid_k * TS_K + k_l_reg; if (k_g >= K) continue; for (uint npq_l_vec_reg = 0; npq_l_vec_reg < TS_NPQ_VEC; ++npq_l_vec_reg) { const uint npq_g_base = offset_npq + (lid_npq * TS_NPQ_VEC + npq_l_vec_reg) * VEC_SIZE; const uint N_idx = npq_g_base / (OH * OW); const uint pq_idx = npq_g_base % (OH * OW); const uint OH_idx = pq_idx / OW; const uint OW_idx = pq_idx % OW; if (nb1 == OW && OW_idx + VEC_SIZE <= OW && npq_g_base + VEC_SIZE <= NPQ) { const uint dst_idx = OW_idx + OH_idx*nb1 + k_g*nb2 + N_idx*nb3; vstore4(regC[k_l_reg][npq_l_vec_reg], 0, &dst_data[dst_idx]); } else { T_ACCUM res = regC[k_l_reg][npq_l_vec_reg]; for (int v = 0; v < VEC_SIZE; ++v) { const uint npq_g = npq_g_base + v; if (npq_g < NPQ) { const uint N_idx_s = npq_g / (OH*OW); const uint pq_idx_s = npq_g % (OH*OW); const uint OH_idx_s = pq_idx_s / OW; const uint OW_idx_s = pq_idx_s % OW; const uint dst_idx_s = OW_idx_s + OH_idx_s*nb1 + k_g*nb2 + N_idx_s*nb3; dst_data[dst_idx_s] = ((float*)&res)[v]; } } } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/cpy.cl000066400000000000000000000120311512524704700221320ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // cpy //------------------------------------------------------------------------------ kernel void kernel_cpy_f16_f16( global half * src0, ulong offset0, global half * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global half*)((global char*)src0 + offset0); dst = (global half*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; int i3 = n / (ne2*ne1*ne0); int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); global half * dst_data = (global half *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { global const half * src = (global half *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f16_f32( global half * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global half*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; int i3 = n / (ne2*ne1*ne0); int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); global float * dst_data = (global float *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { global half * src = (global half *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f16( global float * src0, ulong offset0, global half * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global half*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; int i3 = n / (ne2*ne1*ne0); int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); global half * dst_data = (global half *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { global const float * src = (global float *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; int i3 = n / (ne2*ne1*ne0); int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); global float * dst_data = (global float *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { global const float * src = (global float *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/cvt.cl000066400000000000000000000213251512524704700221410ustar00rootroot00000000000000//------------------------------------------------------------------------------ // This file is contains kernels for data conversion. // These kernels are used when loading the model, so its performance is less // important. //------------------------------------------------------------------------------ #pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; //------------------------------------------------------------------------------ // kernel_convert_block_q4_0 // Convert the block_q4_0 format to 2 separate arrays (AOS -> SOA). // This kernel does not deshuffle the bits. //------------------------------------------------------------------------------ kernel void kernel_convert_block_q4_0( global struct block_q4_0 * src0, global uchar * dst_q, global half * dst_d ) { global struct block_q4_0 * b = (global struct block_q4_0 *) src0 + get_global_id(0); global uchar * q = (global uchar *) dst_q + QK4_0/2*get_global_id(0); global half * d = (global half *) dst_d + get_global_id(0); *d = b->d; for (int i = 0; i < QK4_0/2; ++i) { q[i] = b->qs[i]; } } kernel void kernel_restore_block_q4_0( global uchar * src_q, global half * src_d, global struct block_q4_0 * dst ) { global struct block_q4_0 * b = (global struct block_q4_0 *) dst + get_global_id(0); global uchar * q = (global uchar *) src_q + QK4_0/2*get_global_id(0); global half * d = (global half *) src_d + get_global_id(0); b->d = *d; for (int i = 0; i < QK4_0/2; ++i) { b->qs[i] = q[i]; } } //------------------------------------------------------------------------------ // kernel_convert_block_q4_0_noshuffle // Flatten q4_0 weights and unshuffle the bits //------------------------------------------------------------------------------ kernel void kernel_convert_block_q4_0_noshuffle( global struct block_q4_0 * src0, global uchar * dst_q, global half * dst_d ) { global struct block_q4_0 * b = (global struct block_q4_0 *) src0 + get_global_id(0); global uchar * q = (global uchar *) dst_q + QK4_0/2*get_global_id(0); global half * d = (global half *) dst_d + get_global_id(0); *d = b->d; for (int i = 0; i < QK4_0/4; ++i) { uchar x0 = b->qs[2*i + 0]; uchar x1 = b->qs[2*i + 1]; q[i + 0 ] = convert_uchar(x0 & 0x0F) | convert_uchar((x1 & 0x0F) << 4); q[i + QK4_0/4] = convert_uchar((x0 & 0xF0) >> 4) | convert_uchar(x1 & 0xF0); #ifdef ADRENO_GPU // Workaround for adreno - must have the following printf statement for // the kernel to work properly. Otherwise it produces incorrect result. // convert_uchar above also seems necessary. // Compare against a large number so that it does not print anything. // get_sub_group_local_id() also works. if (get_global_id(0) == 65536*4096) { printf("%04x - %02x\n", *(global ushort*)d, ((x0 & 0xF0) >> 4) | (x1 & 0xF0)); } #endif } } kernel void kernel_restore_block_q4_0_noshuffle( global uchar * src_q, global half * src_d, global struct block_q4_0 * dst, uchar mask_0F, uchar mask_F0 ) { global struct block_q4_0 * b = (global struct block_q4_0 *) dst + get_global_id(0); global uchar * q = (global uchar *) src_q + QK4_0/2*get_global_id(0); global half * d = (global half *) src_d + get_global_id(0); b->d = *d; for (int i = 0; i < QK4_0/4; ++i) { uchar x0 = q[i + 0 ] ; uchar x1 = q[i + QK4_0/4]; b->qs[2*i + 0] = convert_uchar((x0 & mask_0F) | ((x1 & mask_0F) << 4)); b->qs[2*i + 1] = convert_uchar(((x0 & mask_F0) >> 4) | (x1 & mask_F0)); } } //------------------------------------------------------------------------------ // block_mxfp4 //------------------------------------------------------------------------------ #define QK_MXFP4 32 struct block_mxfp4 { uchar e; // E8M0 uchar qs[QK_MXFP4 / 2]; }; //------------------------------------------------------------------------------ // kernel_convert_block_mxfp4 // Convert the block_mxfp4 format to 2 separate arrays (AOS -> SOA). // This kernel does not deshuffle the bits. //------------------------------------------------------------------------------ kernel void kernel_convert_block_mxfp4( global struct block_mxfp4 * src0, global uchar * dst_q, global uchar * dst_e ) { global struct block_mxfp4 * b = (global struct block_mxfp4 *) src0 + get_global_id(0); global uchar * q = (global uchar *) dst_q + QK_MXFP4 / 2 * get_global_id(0); global uchar * e = (global uchar *) dst_e + get_global_id(0); *e = b->e; for (int i = 0; i < QK_MXFP4 / 2; ++i) { q[i] = b->qs[i]; } } kernel void kernel_convert_block_mxfp4_trans( global struct block_mxfp4 * src0, __global uint4 * dst_q, __global uchar * dst_e, uint ne00, uint ne01 ) { int i00 = get_global_id(1); uint i01 = get_global_id(0); uint i02 = get_global_id(2); uint ne00_blk = ne00 / QK_MXFP4; uint src_blk_offset = i00 + i01 * ne00_blk + i02 * ne00_blk * ne01; uint dst_blk_offset = i01 + i00 * ne01 + i02 * ne00_blk * ne01; global struct block_mxfp4 * b = src0 + src_blk_offset; dst_q[dst_blk_offset] = ((global uint4 *)(&(b->qs[0])))[0]; dst_e[dst_blk_offset] = b->e; } kernel void kernel_restore_block_mxfp4( global uchar * src_q, global half * src_e, global struct block_mxfp4 * dst ) { global struct block_mxfp4 * b = (global struct block_mxfp4 *) dst + get_global_id(0); global uchar * q = (global uchar *) src_q + QK_MXFP4 / 2 * get_global_id(0); global uchar * e = (global uchar *) src_e + get_global_id(0); b->e = *e; for (int i = 0; i < QK_MXFP4 / 2; ++i) { b->qs[i] = q[i]; } } kernel void kernel_restore_block_mxfp4_trans( __global uint4 * src_q, __global uchar * src_e, global struct block_mxfp4 * dst, uint ne00, uint ne01 ) { int i00 = get_global_id(1); uint i01 = get_global_id(0); uint i02 = get_global_id(2); uint ne00_blk = ne00 / QK_MXFP4; uint src_blk_offset = i01 + i00 * ne01 + i02 * ne00_blk * ne01; uint dst_blk_offset = i00 + i01 * ne00_blk + i02 * ne00_blk * ne01; global struct block_mxfp4 * b = dst + dst_blk_offset; ((global uint4 *)(&(b->qs[0])))[0] = src_q[src_blk_offset]; b->e = src_e[src_blk_offset]; } //------------------------------------------------------------------------------ // block_q8_0 //------------------------------------------------------------------------------ typedef struct { half d; // delta char qs[QK8_0]; // quants } block_q8_0; kernel void kernel_convert_block_q8_0( global block_q8_0 * src0, global uchar * dst_q, global half * dst_d ) { global block_q8_0 * b = (global block_q8_0 *) src0 + get_global_id(0); global uchar * q = (global uchar *) dst_q + QK8_0*get_global_id(0); global half * d = (global half *) dst_d + get_global_id(0); *d = b->d; for (int i = 0; i < QK8_0; ++i) { q[i] = b->qs[i]; } } kernel void kernel_restore_block_q8_0( global uchar * src_q, global half * src_d, global block_q8_0 * dst ) { global block_q8_0 * b = (global block_q8_0 *) dst + get_global_id(0); global uchar * q = (global uchar *) src_q + QK8_0*get_global_id(0); global half * d = (global half *) src_d + get_global_id(0); b->d = *d; for (int i = 0; i < QK8_0; ++i) { b->qs[i] = q[i]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/diag_mask_inf.cl000066400000000000000000000031071512524704700241160ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // diag_mask_inf kernels //------------------------------------------------------------------------------ kernel void kernel_diag_mask_inf( global float * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int n_past ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); int i02 = get_global_id(2); int i01 = get_global_id(1); int i00 = get_global_id(0); if (i00 > n_past + i01) { dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; } else { dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; } } kernel void kernel_diag_mask_inf_8( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd, int ne00, int ne01, int n_past ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); int i = 2*get_global_id(0); dst[i+0] = src0[i+0]; dst[i+1] = src0[i+1]; int i4 = 4*i; int i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; int i01 = i4/(ne00); i4 -= i01*ne00; int i00 = i4; for (int k = 3; k >= 0; --k) { if (i00 + 4 + k <= n_past + i01) { break; } (&dst[i+1])[k] = -INFINITY; if (i00 + k > n_past + i01) { (&dst[i])[k] = -INFINITY; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/div.cl000066400000000000000000000073371512524704700221360ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // div //------------------------------------------------------------------------------ kernel void kernel_div( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) / *((global float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_div_row( global float4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float4 * dst, ulong offsetd, int ne ) { src0 = (global float4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] / src1[idx1]; } kernel void kernel_div_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global half *)(dst_ptr + i0*nb0)) = *((global half *)(src0_ptr + i0*nb00)) / *((global half *)(src1_ptr + i10*nb10)); } } kernel void kernel_div_row_f16( global half4 * src0, ulong offset0, global half4 * src1, ulong offset1, global half4 * dst, ulong offsetd, int ne ) { src0 = (global half4*)((global char*)src0 + offset0); src1 = (global half4*)((global char*)src1 + offset1); dst = (global half4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] / src1[idx1]; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/embed_kernel.py000066400000000000000000000007351512524704700240150ustar00rootroot00000000000000# import sys import logging logger = logging.getLogger("opencl-embed-kernel") def main(): logging.basicConfig(level=logging.INFO) if len(sys.argv) != 3: logger.info("Usage: python embed_kernel.py ") sys.exit(1) ifile = open(sys.argv[1], "r") ofile = open(sys.argv[2], "w") for i in ifile: ofile.write('R"({})"\n'.format(i)) ifile.close() ofile.close() if __name__ == "__main__": main() ggml-org-ggml-3678254/src/ggml-opencl/kernels/flash_attn_f16.cl000066400000000000000000000330151512524704700241430ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define ACC_TYPE float #define ACC_TYPE4 float4 #define DATA_TYPE half #define DATA_TYPE4 half4 #define CONVERT_ACC4(x) convert_float4(x) #define CONVERT_DATA4(x) convert_half4(x) #define DK_VEC (DK/4) #define DV_VEC (DV/4) #define WG_SIZE (BLOCK_M) #define Q1_WG_SIZE 64 inline float get_alibi_slope( const float max_bias, const uint h, const uint n_head_log2, const float m0, const float m1 ) { if (max_bias <= 0.0f) { return 1.0f; } const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; return pow(base, exph); } __kernel void flash_attn_f16( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int block_q_idx = get_group_id(0); const int head_batch_idx = get_global_id(1); const int my_query_row = block_q_idx * BLOCK_M + tid; const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; if (my_query_row < n_q) { const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2 + my_query_row * q_nb1; const global DATA_TYPE4* q_ptr = (const global DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_ACC4(q_ptr[i]); } } ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = (ACC_TYPE4)(0.0f); } ACC_TYPE m_i = -INFINITY; ACC_TYPE l_i = 0.0f; float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); __local DATA_TYPE4 l_k[BLOCK_N][DK_VEC]; __local DATA_TYPE4 l_v[BLOCK_N][DV_VEC]; for (int k_start = 0; k_start < n_kv; k_start += BLOCK_N) { for (int i = tid; i < BLOCK_N * DK_VEC; i += WG_SIZE) { const int row = i / DK_VEC; const int col = i % DK_VEC; const int k_row_idx = k_start + row; if (k_row_idx < n_kv) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_row_idx * k_nb1; l_k[row][col] = ((__global DATA_TYPE4*)(k_base + k_row_offset))[col]; } } for (int i = tid; i < BLOCK_N * DV_VEC; i += WG_SIZE) { const int row = i / DV_VEC; const int col = i % DV_VEC; const int v_row_idx = k_start + row; if (v_row_idx < n_kv) { const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + v_row_idx * v_nb1; l_v[row][col] = ((__global DATA_TYPE4*)(v_base + v_row_offset))[col]; } } barrier(CLK_LOCAL_MEM_FENCE); if (my_query_row >= n_q) { continue; } for (int j = 0; j < BLOCK_N; j += 2) { const int k_row0 = k_start + j; const int k_row1 = k_start + j + 1; ACC_TYPE4 dot_acc0 = (ACC_TYPE4)(0.0f); ACC_TYPE4 dot_acc1 = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc0 = mad(q_priv[k], CONVERT_ACC4(l_k[j][k]), dot_acc0); dot_acc1 = mad(q_priv[k], CONVERT_ACC4(l_k[j+1][k]), dot_acc1); } ACC_TYPE score0 = (dot_acc0.s0 + dot_acc0.s1 + dot_acc0.s2 + dot_acc0.s3) * scale; ACC_TYPE score1 = (dot_acc1.s0 + dot_acc1.s1 + dot_acc1.s2 + dot_acc1.s3) * scale; if (is_causal) { if (k_row0 > (n_kv - n_q + my_query_row)) score0 = -INFINITY; if (k_row1 > (n_kv - n_q + my_query_row)) score1 = -INFINITY; } if (k_row0 >= n_kv) score0 = -INFINITY; if (k_row1 >= n_kv) score1 = -INFINITY; if (mask_base != NULL) { const global DATA_TYPE* mask_ptr = (const global DATA_TYPE*)(mask_base + my_query_row * mask_nb1); if (k_row0 < n_kv) score0 += slope * (ACC_TYPE)mask_ptr[k_row0]; if (k_row1 < n_kv) score1 += slope * (ACC_TYPE)mask_ptr[k_row1]; } if (logit_softcap > 0.0f) { score0 = logit_softcap * tanh(score0 / logit_softcap); score1 = logit_softcap * tanh(score1 / logit_softcap); } const ACC_TYPE m_new = max(m_i, max(score0, score1)); const ACC_TYPE p0 = exp(score0 - m_new); const ACC_TYPE p1 = exp(score1 - m_new); const ACC_TYPE scale_prev = exp(m_i - m_new); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = o_acc[i] * scale_prev + p0 * CONVERT_ACC4(l_v[j][i]) + p1 * CONVERT_ACC4(l_v[j+1][i]); } l_i = l_i * scale_prev + p0 + p1; m_i = m_new; } } if (my_query_row < n_q) { if (sinks_void != NULL) { const global ACC_TYPE* sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); const ACC_TYPE m_sink = sinks_ptr[head_idx]; const ACC_TYPE m_final = max(m_i, m_sink); const ACC_TYPE scale_o = exp(m_i - m_final); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] *= scale_o; } l_i = l_i * exp(m_i - m_final) + exp(m_sink - m_final); } const ulong o_row_offset = batch_idx * o_nb3 + my_query_row * o_nb2 + head_idx * o_nb1; global DATA_TYPE4 *o_row = (global DATA_TYPE4 *)(o_base + o_row_offset); if (l_i > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_i; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = CONVERT_DATA4(o_acc[i] * l_inv); } } else { #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = (DATA_TYPE4)(0.0f); } } } } __kernel void flash_attn_f16_q1( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int head_batch_idx = get_global_id(1); const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2; const global DATA_TYPE4* q_ptr = (const global DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_ACC4(q_ptr[i]); } float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); const global ACC_TYPE* sinks_ptr = NULL; if (sinks_void != NULL) { sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); } ACC_TYPE m_i = (sinks_ptr != NULL) ? sinks_ptr[head_idx] : -INFINITY; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const global DATA_TYPE4* k_ptr = (const global DATA_TYPE4*)(k_base + k_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global DATA_TYPE* mask_ptr = (const global DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } m_i = max(m_i, score); } __local ACC_TYPE local_m[Q1_WG_SIZE]; local_m[tid] = m_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_m[tid] = max(local_m[tid], local_m[tid + s]); barrier(CLK_LOCAL_MEM_FENCE); } const ACC_TYPE m_final = local_m[0]; ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_acc[i] = (ACC_TYPE4)(0.0f); ACC_TYPE l_i = 0.0f; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + k_idx * v_nb1; const global DATA_TYPE4* k_ptr = (const global DATA_TYPE4*)(k_base + k_row_offset); const global DATA_TYPE4* v_ptr = (const global DATA_TYPE4*)(v_base + v_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global DATA_TYPE* mask_ptr = (const global DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } const ACC_TYPE p = exp(score - m_final); l_i += p; #pragma unroll for (int i = 0; i < DV_VEC; i++) { o_acc[i] = mad(p, CONVERT_ACC4(v_ptr[i]), o_acc[i]); } } __local ACC_TYPE local_l[Q1_WG_SIZE]; __local ACC_TYPE4 local_o_comp[Q1_WG_SIZE]; local_l[tid] = l_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_l[tid] += local_l[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } const ulong o_row_offset = batch_idx * o_nb3 + head_idx * o_nb1; global DATA_TYPE4 *o_row = (global DATA_TYPE4 *)(o_base + o_row_offset); ACC_TYPE l_final = local_l[0]; if (sinks_ptr != NULL) { l_final += exp(sinks_ptr[head_idx] - m_final); } if (l_final > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_final; for (int i = 0; i < DV_VEC; i++) { local_o_comp[tid] = o_acc[i]; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_o_comp[tid] += local_o_comp[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } if (tid == 0) { o_row[i] = CONVERT_DATA4(local_o_comp[0] * l_inv); } } } else if (tid == 0) { #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_row[i] = (DATA_TYPE4)(0.0f); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/flash_attn_f32.cl000066400000000000000000000330561512524704700241460ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define ACC_TYPE float #define ACC_TYPE4 float4 #define DATA_TYPE float #define DATA_TYPE4 float4 #define MASK_DATA_TYPE half #define CONVERT_ACC4(x) (x) #define CONVERT_DATA4(x) (x) #define DK_VEC (DK/4) #define DV_VEC (DV/4) #define WG_SIZE (BLOCK_M) #define Q1_WG_SIZE 64 inline float get_alibi_slope( const float max_bias, const uint h, const uint n_head_log2, const float m0, const float m1 ) { if (max_bias <= 0.0f) { return 1.0f; } const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; return pow(base, exph); } __kernel void flash_attn_f32( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int block_q_idx = get_group_id(0); const int head_batch_idx = get_global_id(1); const int my_query_row = block_q_idx * BLOCK_M + tid; const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; if (my_query_row < n_q) { const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2 + my_query_row * q_nb1; const global DATA_TYPE4* q_ptr = (const global DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_ACC4(q_ptr[i]); } } ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = (ACC_TYPE4)(0.0f); } ACC_TYPE m_i = -INFINITY; ACC_TYPE l_i = 0.0f; float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); __local DATA_TYPE4 l_k[BLOCK_N][DK_VEC]; __local DATA_TYPE4 l_v[BLOCK_N][DV_VEC]; for (int k_start = 0; k_start < n_kv; k_start += BLOCK_N) { for (int i = tid; i < BLOCK_N * DK_VEC; i += WG_SIZE) { const int row = i / DK_VEC; const int col = i % DK_VEC; const int k_row_idx = k_start + row; if (k_row_idx < n_kv) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_row_idx * k_nb1; l_k[row][col] = ((__global DATA_TYPE4*)(k_base + k_row_offset))[col]; } } for (int i = tid; i < BLOCK_N * DV_VEC; i += WG_SIZE) { const int row = i / DV_VEC; const int col = i % DV_VEC; const int v_row_idx = k_start + row; if (v_row_idx < n_kv) { const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + v_row_idx * v_nb1; l_v[row][col] = ((__global DATA_TYPE4*)(v_base + v_row_offset))[col]; } } barrier(CLK_LOCAL_MEM_FENCE); if (my_query_row >= n_q) { continue; } for (int j = 0; j < BLOCK_N; j += 2) { const int k_row0 = k_start + j; const int k_row1 = k_start + j + 1; ACC_TYPE4 dot_acc0 = (ACC_TYPE4)(0.0f); ACC_TYPE4 dot_acc1 = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc0 = mad(q_priv[k], CONVERT_ACC4(l_k[j][k]), dot_acc0); dot_acc1 = mad(q_priv[k], CONVERT_ACC4(l_k[j+1][k]), dot_acc1); } ACC_TYPE score0 = (dot_acc0.s0 + dot_acc0.s1 + dot_acc0.s2 + dot_acc0.s3) * scale; ACC_TYPE score1 = (dot_acc1.s0 + dot_acc1.s1 + dot_acc1.s2 + dot_acc1.s3) * scale; if (is_causal) { if (k_row0 > (n_kv - n_q + my_query_row)) score0 = -INFINITY; if (k_row1 > (n_kv - n_q + my_query_row)) score1 = -INFINITY; } if (k_row0 >= n_kv) score0 = -INFINITY; if (k_row1 >= n_kv) score1 = -INFINITY; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base + my_query_row * mask_nb1); if (k_row0 < n_kv) score0 += slope * (ACC_TYPE)mask_ptr[k_row0]; if (k_row1 < n_kv) score1 += slope * (ACC_TYPE)mask_ptr[k_row1]; } if (logit_softcap > 0.0f) { score0 = logit_softcap * tanh(score0 / logit_softcap); score1 = logit_softcap * tanh(score1 / logit_softcap); } const ACC_TYPE m_new = max(m_i, max(score0, score1)); const ACC_TYPE p0 = exp(score0 - m_new); const ACC_TYPE p1 = exp(score1 - m_new); const ACC_TYPE scale_prev = exp(m_i - m_new); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = o_acc[i] * scale_prev + p0 * CONVERT_ACC4(l_v[j][i]) + p1 * CONVERT_ACC4(l_v[j+1][i]); } l_i = l_i * scale_prev + p0 + p1; m_i = m_new; } } if (my_query_row < n_q) { if (sinks_void != NULL) { const global ACC_TYPE* sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); const ACC_TYPE m_sink = sinks_ptr[head_idx]; const ACC_TYPE m_final = max(m_i, m_sink); const ACC_TYPE scale_o = exp(m_i - m_final); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] *= scale_o; } l_i = l_i * exp(m_i - m_final) + exp(m_sink - m_final); } const ulong o_row_offset = batch_idx * o_nb3 + my_query_row * o_nb2 + head_idx * o_nb1; global DATA_TYPE4 *o_row = (global DATA_TYPE4 *)(o_base + o_row_offset); if (l_i > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_i; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = CONVERT_DATA4(o_acc[i] * l_inv); } } else { #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = (DATA_TYPE4)(0.0f); } } } } __kernel void flash_attn_f32_q1( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int head_batch_idx = get_global_id(1); const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2; const global DATA_TYPE4* q_ptr = (const global DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_ACC4(q_ptr[i]); } float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); const global ACC_TYPE* sinks_ptr = NULL; if (sinks_void != NULL) { sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); } ACC_TYPE m_i = (sinks_ptr != NULL) ? sinks_ptr[head_idx] : -INFINITY; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const global DATA_TYPE4* k_ptr = (const global DATA_TYPE4*)(k_base + k_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } m_i = max(m_i, score); } __local ACC_TYPE local_m[Q1_WG_SIZE]; local_m[tid] = m_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_m[tid] = max(local_m[tid], local_m[tid + s]); barrier(CLK_LOCAL_MEM_FENCE); } const ACC_TYPE m_final = local_m[0]; ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_acc[i] = (ACC_TYPE4)(0.0f); ACC_TYPE l_i = 0.0f; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + k_idx * v_nb1; const global DATA_TYPE4* k_ptr = (const global DATA_TYPE4*)(k_base + k_row_offset); const global DATA_TYPE4* v_ptr = (const global DATA_TYPE4*)(v_base + v_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } const ACC_TYPE p = exp(score - m_final); l_i += p; #pragma unroll for (int i = 0; i < DV_VEC; i++) { o_acc[i] = mad(p, CONVERT_ACC4(v_ptr[i]), o_acc[i]); } } __local ACC_TYPE local_l[Q1_WG_SIZE]; __local ACC_TYPE4 local_o_comp[Q1_WG_SIZE]; local_l[tid] = l_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_l[tid] += local_l[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } const ulong o_row_offset = batch_idx * o_nb3 + head_idx * o_nb1; global DATA_TYPE4 *o_row = (global DATA_TYPE4 *)(o_base + o_row_offset); ACC_TYPE l_final = local_l[0]; if (sinks_ptr != NULL) { l_final += exp(sinks_ptr[head_idx] - m_final); } if (l_final > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_final; for (int i = 0; i < DV_VEC; i++) { local_o_comp[tid] = o_acc[i]; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_o_comp[tid] += local_o_comp[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } if (tid == 0) { o_row[i] = CONVERT_DATA4(local_o_comp[0] * l_inv); } } } else if (tid == 0) { #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_row[i] = (DATA_TYPE4)(0.0f); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/flash_attn_f32_f16.cl000066400000000000000000000333301512524704700246150ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define ACC_TYPE float #define ACC_TYPE4 float4 #define Q_DATA_TYPE4 float4 #define KV_DATA_TYPE4 half4 #define O_DATA_TYPE4 float4 #define MASK_DATA_TYPE half #define CONVERT_Q_ACC4(x) (x) #define CONVERT_KV_ACC4(x) convert_float4(x) #define CONVERT_O_DATA4(x) (x) #define DK_VEC (DK/4) #define DV_VEC (DV/4) #define WG_SIZE (BLOCK_M) #define Q1_WG_SIZE 64 inline float get_alibi_slope( const float max_bias, const uint h, const uint n_head_log2, const float m0, const float m1 ) { if (max_bias <= 0.0f) { return 1.0f; } const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; return pow(base, exph); } __kernel void flash_attn_f32_f16( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int block_q_idx = get_group_id(0); const int head_batch_idx = get_global_id(1); const int my_query_row = block_q_idx * BLOCK_M + tid; const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; if (my_query_row < n_q) { const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2 + my_query_row * q_nb1; const global Q_DATA_TYPE4* q_ptr = (const global Q_DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_Q_ACC4(q_ptr[i]); } } ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = (ACC_TYPE4)(0.0f); } ACC_TYPE m_i = -INFINITY; ACC_TYPE l_i = 0.0f; float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); __local KV_DATA_TYPE4 l_k[BLOCK_N][DK_VEC]; __local KV_DATA_TYPE4 l_v[BLOCK_N][DV_VEC]; for (int k_start = 0; k_start < n_kv; k_start += BLOCK_N) { for (int i = tid; i < BLOCK_N * DK_VEC; i += WG_SIZE) { const int row = i / DK_VEC; const int col = i % DK_VEC; const int k_row_idx = k_start + row; if (k_row_idx < n_kv) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_row_idx * k_nb1; l_k[row][col] = ((__global KV_DATA_TYPE4*)(k_base + k_row_offset))[col]; } } for (int i = tid; i < BLOCK_N * DV_VEC; i += WG_SIZE) { const int row = i / DV_VEC; const int col = i % DV_VEC; const int v_row_idx = k_start + row; if (v_row_idx < n_kv) { const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + v_row_idx * v_nb1; l_v[row][col] = ((__global KV_DATA_TYPE4*)(v_base + v_row_offset))[col]; } } barrier(CLK_LOCAL_MEM_FENCE); if (my_query_row >= n_q) { continue; } for (int j = 0; j < BLOCK_N; j += 2) { const int k_row0 = k_start + j; const int k_row1 = k_start + j + 1; ACC_TYPE4 dot_acc0 = (ACC_TYPE4)(0.0f); ACC_TYPE4 dot_acc1 = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc0 = mad(q_priv[k], CONVERT_KV_ACC4(l_k[j][k]), dot_acc0); dot_acc1 = mad(q_priv[k], CONVERT_KV_ACC4(l_k[j+1][k]), dot_acc1); } ACC_TYPE score0 = (dot_acc0.s0 + dot_acc0.s1 + dot_acc0.s2 + dot_acc0.s3) * scale; ACC_TYPE score1 = (dot_acc1.s0 + dot_acc1.s1 + dot_acc1.s2 + dot_acc1.s3) * scale; if (is_causal) { if (k_row0 > (n_kv - n_q + my_query_row)) score0 = -INFINITY; if (k_row1 > (n_kv - n_q + my_query_row)) score1 = -INFINITY; } if (k_row0 >= n_kv) score0 = -INFINITY; if (k_row1 >= n_kv) score1 = -INFINITY; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base + my_query_row * mask_nb1); if (k_row0 < n_kv) score0 += slope * (ACC_TYPE)mask_ptr[k_row0]; if (k_row1 < n_kv) score1 += slope * (ACC_TYPE)mask_ptr[k_row1]; } if (logit_softcap > 0.0f) { score0 = logit_softcap * tanh(score0 / logit_softcap); score1 = logit_softcap * tanh(score1 / logit_softcap); } const ACC_TYPE m_new = max(m_i, max(score0, score1)); const ACC_TYPE p0 = exp(score0 - m_new); const ACC_TYPE p1 = exp(score1 - m_new); const ACC_TYPE scale_prev = exp(m_i - m_new); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] = o_acc[i] * scale_prev + p0 * CONVERT_KV_ACC4(l_v[j][i]) + p1 * CONVERT_KV_ACC4(l_v[j+1][i]); } l_i = l_i * scale_prev + p0 + p1; m_i = m_new; } } if (my_query_row < n_q) { if (sinks_void != NULL) { const global ACC_TYPE* sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); const ACC_TYPE m_sink = sinks_ptr[head_idx]; const ACC_TYPE m_final = max(m_i, m_sink); const ACC_TYPE scale_o = exp(m_i - m_final); #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_acc[i] *= scale_o; } l_i = l_i * exp(m_i - m_final) + exp(m_sink - m_final); } const ulong o_row_offset = batch_idx * o_nb3 + my_query_row * o_nb2 + head_idx * o_nb1; global O_DATA_TYPE4 *o_row = (global O_DATA_TYPE4 *)(o_base + o_row_offset); if (l_i > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_i; #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = CONVERT_O_DATA4(o_acc[i] * l_inv); } } else { #pragma unroll for (int i = 0; i < DV_VEC; ++i) { o_row[i] = (O_DATA_TYPE4)(0.0f); } } } } __kernel void flash_attn_f32_f16_q1( const global void * q_void, ulong q_offset, const global void * k_void, ulong k_offset, const global void * v_void, ulong v_offset, global void * o_void, ulong o_offset, const float scale, const int n_q, const int n_kv, const int is_causal, const int n_head, const ulong q_nb1, const ulong q_nb2, const ulong q_nb3, const ulong k_nb1, const ulong k_nb2, const ulong k_nb3, const ulong v_nb1, const ulong v_nb2, const ulong v_nb3, const ulong o_nb1, const ulong o_nb2, const ulong o_nb3, const float max_bias, const float m0, const float m1, const int n_head_log2, const float logit_softcap, const int n_head_kv, const global void* mask_void, const ulong mask_offset, const ulong mask_nb1, const ulong mask_nb2, const ulong mask_nb3, const int mask_ne2, const int mask_ne3, const global void* sinks_void, const ulong sinks_offset ) { const int tid = get_local_id(0); const int head_batch_idx = get_global_id(1); const int batch_idx = head_batch_idx / n_head; const int head_idx = head_batch_idx % n_head; const int gqa_ratio = n_head / n_head_kv; const int head_kv_idx = head_idx / gqa_ratio; const global char* q_base = (const global char*)q_void + q_offset; const global char* k_base = (const global char*)k_void + k_offset; const global char* v_base = (const global char*)v_void + v_offset; global char* o_base = (global char*)o_void + o_offset; const global char* mask_base = NULL; if (mask_void != NULL) { const int mask_head_idx = head_idx % mask_ne2; const int mask_batch_idx = batch_idx % mask_ne3; mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2; } ACC_TYPE4 q_priv[DK_VEC]; const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2; const global Q_DATA_TYPE4* q_ptr = (const global Q_DATA_TYPE4*)(q_base + q_row_offset); #pragma unroll for (int i = 0; i < DK_VEC; ++i) { q_priv[i] = CONVERT_Q_ACC4(q_ptr[i]); } float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1); const global ACC_TYPE* sinks_ptr = NULL; if (sinks_void != NULL) { sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset); } ACC_TYPE m_i = (sinks_ptr != NULL) ? sinks_ptr[head_idx] : -INFINITY; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const global KV_DATA_TYPE4* k_ptr = (const global KV_DATA_TYPE4*)(k_base + k_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_KV_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } m_i = max(m_i, score); } __local ACC_TYPE local_m[Q1_WG_SIZE]; local_m[tid] = m_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_m[tid] = max(local_m[tid], local_m[tid + s]); barrier(CLK_LOCAL_MEM_FENCE); } const ACC_TYPE m_final = local_m[0]; ACC_TYPE4 o_acc[DV_VEC]; #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_acc[i] = (ACC_TYPE4)(0.0f); ACC_TYPE l_i = 0.0f; for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) { const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1; const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + k_idx * v_nb1; const global KV_DATA_TYPE4* k_ptr = (const global KV_DATA_TYPE4*)(k_base + k_row_offset); const global KV_DATA_TYPE4* v_ptr = (const global KV_DATA_TYPE4*)(v_base + v_row_offset); ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f); #pragma unroll for (int k = 0; k < DK_VEC; k++) { dot_acc = mad(q_priv[k], CONVERT_KV_ACC4(k_ptr[k]), dot_acc); } ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale; if (mask_base != NULL) { const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base); score += slope * (ACC_TYPE)mask_ptr[k_idx]; } if (logit_softcap > 0.0f) { score = logit_softcap * tanh(score / logit_softcap); } const ACC_TYPE p = exp(score - m_final); l_i += p; #pragma unroll for (int i = 0; i < DV_VEC; i++) { o_acc[i] = mad(p, CONVERT_KV_ACC4(v_ptr[i]), o_acc[i]); } } __local ACC_TYPE local_l[Q1_WG_SIZE]; __local ACC_TYPE4 local_o_comp[Q1_WG_SIZE]; local_l[tid] = l_i; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_l[tid] += local_l[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } const ulong o_row_offset = batch_idx * o_nb3 + head_idx * o_nb1; global O_DATA_TYPE4 *o_row = (global O_DATA_TYPE4 *)(o_base + o_row_offset); ACC_TYPE l_final = local_l[0]; if (sinks_ptr != NULL) { l_final += exp(sinks_ptr[head_idx] - m_final); } if (l_final > 0.0f) { const ACC_TYPE l_inv = 1.0f / l_final; for (int i = 0; i < DV_VEC; i++) { local_o_comp[tid] = o_acc[i]; barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) { if (tid < s) local_o_comp[tid] += local_o_comp[tid + s]; barrier(CLK_LOCAL_MEM_FENCE); } if (tid == 0) { o_row[i] = CONVERT_O_DATA4(local_o_comp[0] * l_inv); } } } else if (tid == 0) { #pragma unroll for (int i = 0; i < DV_VEC; ++i) o_row[i] = (O_DATA_TYPE4)(0.0f); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/gelu.cl000066400000000000000000000047301512524704700223020ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // gelu //------------------------------------------------------------------------------ #define GELU_COEF_A 0.044715f #define GELU_QUICK_COEF -1.702f #define SQRT_2_OVER_PI 0.79788456080286535587989211986876f #define SQRT_2_INV 0.70710678118654752440084436210484f kernel void kernel_gelu( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); float x = src0[get_global_id(0)]; dst[get_global_id(0)] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); float4 x = src0[get_global_id(0)]; dst[get_global_id(0)] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_erf( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); float x = src0[get_global_id(0)]; dst[get_global_id(0)] = 0.5f*x*(1.0f + erf(x*SQRT_2_INV)); } kernel void kernel_gelu_erf_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); float4 x = src0[get_global_id(0)]; dst[get_global_id(0)] = 0.5f*x*(1.0f + erf(x*SQRT_2_INV)); } kernel void kernel_gelu_quick( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); float x = src0[get_global_id(0)]; dst[get_global_id(0)] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } kernel void kernel_gelu_quick_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); float4 x = src0[get_global_id(0)]; dst[get_global_id(0)] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl000066400000000000000000000142531512524704700247240ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_subgroups : enable #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define QK_MXFP4 32 #define N_SIMDGROUP 2 #define SIMDGROUP_WIDTH 64 static inline half8 mxfp4_to_fp16_packed8(ushort2 fp4x8) { //, ushort 0x0E00, ushort 0x8000) { ushort2 fp16_packed_a_0, fp16_packed_b_0, bias_a, bias_b, sign_a, sign_b; fp16_packed_a_0.lo = (fp4x8.s0 << 9) & 0x0E00; fp16_packed_a_0.hi = (fp4x8.s0 << 5) & 0x0E00; fp16_packed_b_0.lo = (fp4x8.s0 << 1) & 0x0E00; fp16_packed_b_0.hi = (fp4x8.s0 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a_0.lo != 0) ? 0x3800 : 0x0; bias_a.hi = (fp16_packed_a_0.hi != 0) ? 0x3800 : 0x0; bias_b.lo = (fp16_packed_b_0.lo != 0) ? 0x3800 : 0x0; bias_b.hi = (fp16_packed_b_0.hi != 0) ? 0x3800 : 0x0; fp16_packed_a_0.lo = (fp16_packed_a_0.lo != 0x0200) ? fp16_packed_a_0.lo : 0x0; fp16_packed_a_0.hi = (fp16_packed_a_0.hi != 0x0200) ? fp16_packed_a_0.hi : 0x0; fp16_packed_b_0.lo = (fp16_packed_b_0.lo != 0x0200) ? fp16_packed_b_0.lo : 0x0; fp16_packed_b_0.hi = (fp16_packed_b_0.hi != 0x0200) ? fp16_packed_b_0.hi : 0x0; sign_a.lo = (fp4x8.s0 << 12) & 0x8000; sign_a.hi = (fp4x8.s0 << 8) & 0x8000; sign_b.lo = (fp4x8.s0 << 4) & 0x8000; sign_b.hi = fp4x8.s0 & 0x8000; fp16_packed_a_0 = sign_a + bias_a + fp16_packed_a_0; fp16_packed_b_0 = sign_b + bias_b + fp16_packed_b_0; ushort2 fp16_packed_a_1, fp16_packed_b_1; fp16_packed_a_1.lo = (fp4x8.s1 << 9) & 0x0E00; fp16_packed_a_1.hi = (fp4x8.s1 << 5) & 0x0E00; fp16_packed_b_1.lo = (fp4x8.s1 << 1) & 0x0E00; fp16_packed_b_1.hi = (fp4x8.s1 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a_1.lo != 0) ? 0x3800 : 0x0; bias_a.hi = (fp16_packed_a_1.hi != 0) ? 0x3800 : 0x0; bias_b.lo = (fp16_packed_b_1.lo != 0) ? 0x3800 : 0x0; bias_b.hi = (fp16_packed_b_1.hi != 0) ? 0x3800 : 0x0; fp16_packed_a_1.lo = (fp16_packed_a_1.lo != 0x0200) ? fp16_packed_a_1.lo : 0x0; fp16_packed_a_1.hi = (fp16_packed_a_1.hi != 0x0200) ? fp16_packed_a_1.hi : 0x0; fp16_packed_b_1.lo = (fp16_packed_b_1.lo != 0x0200) ? fp16_packed_b_1.lo : 0x0; fp16_packed_b_1.hi = (fp16_packed_b_1.hi != 0x0200) ? fp16_packed_b_1.hi : 0x0; sign_a.lo = (fp4x8.s1 << 12) & 0x8000; sign_a.hi = (fp4x8.s1 << 8) & 0x8000; sign_b.lo = (fp4x8.s1 << 4) & 0x8000; sign_b.hi = fp4x8.s1 & 0x8000; fp16_packed_a_1 = sign_a + bias_a + fp16_packed_a_1; fp16_packed_b_1 = sign_b + bias_b + fp16_packed_b_1; return as_half8((ushort8)(fp16_packed_a_0, fp16_packed_b_0, fp16_packed_a_1, fp16_packed_b_1)); } static inline float e8m0_to_fp32(uchar x) { int bits; bits = (x == 0) ? 0x00400000 : ((uint) x << 23); return as_float(bits); } __attribute__((qcom_reqd_sub_group_size("half"))) __kernel void kernel_gemm_moe_mxfp4_f32( __global uint4 * src0_q, __global uchar * src0_e, __read_only image1d_buffer_t src1, __global ushort4 * src2, __global float * dst, ulong offsetd, int ne00, int ne01, int tile_size ) { uint i01 = get_global_id(0); uint i20 = get_global_id(2); uint sgid = get_local_id(1); uint slid = get_sub_group_local_id(); ushort4 router = src2[i20]; ushort expert_id = router.x; ushort i11 = router.y; ushort i1 = router.z; ushort tile_id = router.w; if (tile_id * tile_size + i01 >= ne01) { // handle edge case when ne01 is not multiple of tile_size return; } uint expert_offset = expert_id * ne00 * ne01 / 32; uint tile_offset = expert_offset + tile_id * tile_size + i01; __private float sum = 0.0f; // each thread calculate partial sum of one output // loop along ne00 in block granularity, skip 4 blocks every iter for (uint ib00 = sgid; ib00 < (ne00 / QK_MXFP4); ib00 += N_SIMDGROUP) { // load one block of q uint4 regQ = src0_q[tile_offset + ib00 * ne01]; // convert 8 fp4 to fp16 half8 fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s0)); uint offset = i11 * ne00 / 4 + ib00 * 8; float4 shared_y4; shared_y4 = read_imagef(src1, (offset + 0)); float4 acc = shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 4)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s1)); shared_y4 = read_imagef(src1, (offset + 1)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 5)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s2)); shared_y4 = read_imagef(src1, (offset + 2)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 6)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s3)); shared_y4 = read_imagef(src1, (offset + 3)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 7)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); uchar regE = src0_e[tile_offset + ib00 * ne01]; sum += e8m0_to_fp32(regE) * ((acc.s0 + acc.s1) + (acc.s2 + acc.s3)); } // reduction in local memory, assumes #subgroups=4 __local float reduceLM[SIMDGROUP_WIDTH * (N_SIMDGROUP - 1)]; if (sgid == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = sum; // if (sgid == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = sum; // if (sgid == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = sum; barrier(CLK_LOCAL_MEM_FENCE); if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; // if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; // if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; // 1 outputs per thread in subgroup 0 if (sgid == 0) { dst = dst + (offsetd >> 2); dst[i01 + tile_id * tile_size + i1 * ne01] = sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl000066400000000000000000000135311512524704700247330ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_subgroups : enable #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define QK_MXFP4 32 #define N_SIMDGROUP 4 #define SIMDGROUP_WIDTH 64 static inline half8 mxfp4_to_fp16_packed8(ushort2 fp4x8) { //, ushort 0x0E00, ushort 0x8000) { ushort2 fp16_packed_a_0, fp16_packed_b_0, bias_a, bias_b, sign_a, sign_b; fp16_packed_a_0.lo = (fp4x8.s0 << 9) & 0x0E00; fp16_packed_a_0.hi = (fp4x8.s0 << 5) & 0x0E00; fp16_packed_b_0.lo = (fp4x8.s0 << 1) & 0x0E00; fp16_packed_b_0.hi = (fp4x8.s0 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a_0.lo != 0) ? 0x3800 : 0x0; bias_a.hi = (fp16_packed_a_0.hi != 0) ? 0x3800 : 0x0; bias_b.lo = (fp16_packed_b_0.lo != 0) ? 0x3800 : 0x0; bias_b.hi = (fp16_packed_b_0.hi != 0) ? 0x3800 : 0x0; fp16_packed_a_0.lo = (fp16_packed_a_0.lo != 0x0200) ? fp16_packed_a_0.lo : 0x0; fp16_packed_a_0.hi = (fp16_packed_a_0.hi != 0x0200) ? fp16_packed_a_0.hi : 0x0; fp16_packed_b_0.lo = (fp16_packed_b_0.lo != 0x0200) ? fp16_packed_b_0.lo : 0x0; fp16_packed_b_0.hi = (fp16_packed_b_0.hi != 0x0200) ? fp16_packed_b_0.hi : 0x0; sign_a.lo = (fp4x8.s0 << 12) & 0x8000; sign_a.hi = (fp4x8.s0 << 8) & 0x8000; sign_b.lo = (fp4x8.s0 << 4) & 0x8000; sign_b.hi = fp4x8.s0 & 0x8000; fp16_packed_a_0 = sign_a + bias_a + fp16_packed_a_0; fp16_packed_b_0 = sign_b + bias_b + fp16_packed_b_0; ushort2 fp16_packed_a_1, fp16_packed_b_1; fp16_packed_a_1.lo = (fp4x8.s1 << 9) & 0x0E00; fp16_packed_a_1.hi = (fp4x8.s1 << 5) & 0x0E00; fp16_packed_b_1.lo = (fp4x8.s1 << 1) & 0x0E00; fp16_packed_b_1.hi = (fp4x8.s1 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a_1.lo != 0) ? 0x3800 : 0x0; bias_a.hi = (fp16_packed_a_1.hi != 0) ? 0x3800 : 0x0; bias_b.lo = (fp16_packed_b_1.lo != 0) ? 0x3800 : 0x0; bias_b.hi = (fp16_packed_b_1.hi != 0) ? 0x3800 : 0x0; fp16_packed_a_1.lo = (fp16_packed_a_1.lo != 0x0200) ? fp16_packed_a_1.lo : 0x0; fp16_packed_a_1.hi = (fp16_packed_a_1.hi != 0x0200) ? fp16_packed_a_1.hi : 0x0; fp16_packed_b_1.lo = (fp16_packed_b_1.lo != 0x0200) ? fp16_packed_b_1.lo : 0x0; fp16_packed_b_1.hi = (fp16_packed_b_1.hi != 0x0200) ? fp16_packed_b_1.hi : 0x0; sign_a.lo = (fp4x8.s1 << 12) & 0x8000; sign_a.hi = (fp4x8.s1 << 8) & 0x8000; sign_b.lo = (fp4x8.s1 << 4) & 0x8000; sign_b.hi = fp4x8.s1 & 0x8000; fp16_packed_a_1 = sign_a + bias_a + fp16_packed_a_1; fp16_packed_b_1 = sign_b + bias_b + fp16_packed_b_1; return as_half8((ushort8)(fp16_packed_a_0, fp16_packed_b_0, fp16_packed_a_1, fp16_packed_b_1)); } static inline float e8m0_to_fp32(uchar x) { int bits; bits = (x == 0) ? 0x00400000 : ((uint) x << 23); return as_float(bits); } __attribute__((qcom_reqd_sub_group_size("half"))) __kernel void kernel_gemv_moe_mxfp4_f32( __global uint4 * src0_q, __global uchar * src0_e, __read_only image1d_buffer_t src1, __global uint * src2, __global float * dst, ulong offsetd, int ne00, int ne01, int ne11 ) { uint i01 = get_global_id(0); uint i20 = get_global_id(2); uint sgid = get_local_id(1); uint slid = get_sub_group_local_id(); uint i11 = i20 % ne11; uint expert_id = src2[i20]; uint expert_offset = expert_id * ne00 * ne01 / 32; __private float sum = 0.0f; // each thread calculate partial sum of one output // loop along ne00 in block granularity, skip 4 blocks every iter for (uint ib00 = sgid; ib00 < (ne00 / QK_MXFP4); ib00 += N_SIMDGROUP) { // load one block of q uint4 regQ = src0_q[expert_offset + ib00 * ne01 + i01]; uint offset = i11 * ne00 / 4 + ib00 * 8; half8 fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s0)); float4 shared_y4; shared_y4 = read_imagef(src1, (offset + 0)); float4 acc = shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 4)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s1)); shared_y4 = read_imagef(src1, (offset + 1)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 5)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s2)); shared_y4 = read_imagef(src1, (offset + 2)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 6)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s3)); shared_y4 = read_imagef(src1, (offset + 3)); acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); shared_y4 = read_imagef(src1, (offset + 7)); acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); uchar regE = src0_e[ib00 * ne01 + i01 + expert_offset]; sum += e8m0_to_fp32(regE) * ((acc.s0 + acc.s1) + (acc.s2 + acc.s3)); } // reduction in local memory, assumes #subgroups=4 __local float reduceLM[SIMDGROUP_WIDTH * (N_SIMDGROUP - 1)]; if (sgid == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = sum; if (sgid == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = sum; if (sgid == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = sum; barrier(CLK_LOCAL_MEM_FENCE); if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; // 1 outputs per thread in subgroup 0 if (sgid == 0) { dst = dst + (offsetd >> 2); dst[i01 + i20 * ne01] = sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/gemv_noshuffle.cl000066400000000000000000000371541512524704700243630ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_subgroups : enable #ifdef cl_qcom_reqd_sub_group_size #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #endif // assume #define QK4_0 32 #define N_SIMDGROUP 4 #define dequantizeBlockAccum_ns_sgbroadcast_1_hi(total_sums, bits4, scale, y) \ float shared_y; \ shared_y = sub_group_broadcast(y.s0, 0); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 0); \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 0); \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 0); \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 0); \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 0); \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 0); \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 0); \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s0, 1); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 1); \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 1); \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 1); \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 1); \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 1); \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 1); \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 1); \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ #define dequantizeBlockAccum_ns_sgbroadcast_1_lo(total_sums, bits4, scale, y) \ shared_y = sub_group_broadcast(y.s0, 2); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 2); \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 2); \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 2); \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 2); \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 2); \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 2); \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 2); \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s0, 3); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 3); \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 3); \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 3); \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 3); \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 3); \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 3); \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 3); \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ #define dequantizeBlockAccum_ns_sgbroadcast_8_hi(total_sums, bits4, scale, y) \ float8 shared_y; \ shared_y = sub_group_broadcast(y, 0); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ shared_y = sub_group_broadcast(y, 1); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ #define dequantizeBlockAccum_ns_sgbroadcast_8_lo(total_sums, bits4, scale, y) \ shared_y = sub_group_broadcast(y, 2); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ shared_y = sub_group_broadcast(y, 3); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif __kernel void kernel_gemv_noshuffle( __read_only image1d_buffer_t src0_q, // quantized A global half2 * src0_d, // A scales __read_only image1d_buffer_t src1, // B ulong offset1, // offset to B (0) global float * dst, // C ulong offsetd, // offset to C (0) uint K, // K int ne01, // M int ne02, // 1 int ne10, // K int ne12, // 1 int ne0, // M int ne1, // N int r2, // 1 int r3) { uint groupId = get_local_id(1); uint gid = get_global_id(0); ushort slid = get_sub_group_local_id(); __private uint4 regA; __private half2 regS; __private float8 regB; __private float2 totalSum = (float2)(0.0f); // loop along K in block granularity, skip 4 blocks every iter for (uint k = groupId; k < (K / QK4_0); k += N_SIMDGROUP) { regS = src0_d[gid + k * LINE_STRIDE_A]; // each fiber loads scale of two rows // first 4 fibers in each wave load 8 B values to its private scope if (slid < 4) { regB.s0123 = read_imagef(src1, (slid * 2 + k * 8)); regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8)); } // load half weights for two blocks in consecutive rows regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x; regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x; regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x; regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x; #ifdef VECTOR_SUB_GROUP_BROADCAT dequantizeBlockAccum_ns_sgbroadcast_8_hi(totalSum, as_ushort8(regA), regS, regB); #else dequantizeBlockAccum_ns_sgbroadcast_1_hi(totalSum, as_ushort8(regA), regS, regB); #endif // VECTOR_SUB_GROUP_BROADCAT regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x; regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x; regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x; regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x; #ifdef VECTOR_SUB_GROUP_BROADCAT dequantizeBlockAccum_ns_sgbroadcast_8_lo(totalSum, as_ushort8(regA), regS, regB); #else dequantizeBlockAccum_ns_sgbroadcast_1_lo(totalSum, as_ushort8(regA), regS, regB); #endif // VECTOR_SUB_GROUP_BROADCAT } // reduction in local memory, assumes #wave=4 __local float2 reduceLM[SIMDGROUP_WIDTH * 3]; if (groupId == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = totalSum; if (groupId == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = totalSum; if (groupId == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = totalSum; barrier(CLK_LOCAL_MEM_FENCE); if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; // 2 outputs per fiber in wave 0 if (groupId == 0) { dst = (global float*)((global char*)dst + offsetd); vstore2(totalSum, 0, &(dst[gid * 2])); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/gemv_noshuffle_general.cl000066400000000000000000000373411512524704700260560ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_subgroups : enable #ifdef cl_qcom_reqd_sub_group_size #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #endif // assume #define QK4_0 32 #define N_SIMDGROUP 4 #define dequantizeBlockAccum_ns_sgbroadcast_1_hi(total_sums, bits4, scale, y) \ float shared_y; \ shared_y = sub_group_broadcast(y.s0, 0); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 0); \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 0); \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 0); \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 0); \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 0); \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 0); \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 0); \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s0, 1); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 1); \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 1); \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 1); \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 1); \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 1); \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 1); \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 1); \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ #define dequantizeBlockAccum_ns_sgbroadcast_1_lo(total_sums, bits4, scale, y) \ shared_y = sub_group_broadcast(y.s0, 2); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 2); \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 2); \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 2); \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 2); \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 2); \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 2); \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 2); \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s0, 3); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s1, 3); \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s2, 3); \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s3, 3); \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s4, 3); \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s5, 3); \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s6, 3); \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ shared_y = sub_group_broadcast(y.s7, 3); \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ #define dequantizeBlockAccum_ns_sgbroadcast_8_hi(total_sums, bits4, scale, y) \ float8 shared_y; \ shared_y = sub_group_broadcast(y, 0); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ shared_y = sub_group_broadcast(y, 1); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ #define dequantizeBlockAccum_ns_sgbroadcast_8_lo(total_sums, bits4, scale, y) \ shared_y = sub_group_broadcast(y, 2); \ total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ shared_y = sub_group_broadcast(y, 3); \ total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif __kernel void kernel_gemv_noshuffle( __read_only image1d_buffer_t src0_q, // quantized A global half2 * src0_d, // A scales __read_only image1d_buffer_t src1, // B ulong offset1, // offset to B (0) global float * dst, // C ulong offsetd, // offset to C (0) int ne00, // K int ne01, // M int ne02, // 1 int ne10, // K int ne12, // 1 int ne0, // M int ne1, // N int r2, // 1 int r3) { uint groupId = get_local_id(1); uint gid = get_global_id(0); ushort slid = get_sub_group_local_id(); uint K = ne00; uint M = ne01; uint LINE_STRIDE_A = M / 2; uint BLOCK_STRIDE_A = N_SIMDGROUP * M; __private uint4 regA; __private half2 regS; __private float8 regB; __private float2 totalSum = (float2)(0.0f); // loop along K in block granularity, skip 4 blocks every iter for (uint k = groupId; k < (K / QK4_0); k += N_SIMDGROUP) { regS = src0_d[gid + k * LINE_STRIDE_A]; // each fiber loads scale of two rows // first 4 fibers in each wave load 8 B values to its private scope if (slid < 4) { regB.s0123 = read_imagef(src1, (slid * 2 + k * 8)); regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8)); } // load half weights for two blocks in consecutive rows regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x; regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x; regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x; regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x; #ifdef VECTOR_SUB_GROUP_BROADCAT dequantizeBlockAccum_ns_sgbroadcast_8_hi(totalSum, as_ushort8(regA), regS, regB); #else dequantizeBlockAccum_ns_sgbroadcast_1_hi(totalSum, as_ushort8(regA), regS, regB); #endif // VECTOR_SUB_GROUP_BROADCAT regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x; regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x; regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x; regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x; #ifdef VECTOR_SUB_GROUP_BROADCAT dequantizeBlockAccum_ns_sgbroadcast_8_lo(totalSum, as_ushort8(regA), regS, regB); #else dequantizeBlockAccum_ns_sgbroadcast_1_lo(totalSum, as_ushort8(regA), regS, regB); #endif // VECTOR_SUB_GROUP_BROADCAT } // reduction in local memory, assumes #wave=4 __local float2 reduceLM[SIMDGROUP_WIDTH * 3]; if (groupId == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = totalSum; if (groupId == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = totalSum; if (groupId == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = totalSum; barrier(CLK_LOCAL_MEM_FENCE); if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; // 2 outputs per fiber in wave 0 if (groupId == 0) { dst = (global float*)((global char*)dst + offsetd); vstore2(totalSum, 0, &(dst[gid * 2])); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/get_rows.cl000066400000000000000000000122421512524704700231740ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; #define QK4_0 32 //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; //------------------------------------------------------------------------------ // dequantize_q4_0_f32, dequantize_q4_0_f16 //------------------------------------------------------------------------------ void dequantize_q4_0_f32(global struct block_q4_0 * xb, short il, float16 * reg) { global ushort * qs = ((global ushort *)xb + 1); float d1 = il ? (xb->d / 16.h) : xb->d; float d2 = d1 / 256.f; float md = -8.h * xb->d; ushort mask0 = il ? 0x00F0 : 0x000F; ushort mask1 = mask0 << 8; reg->s0 = d1 * (qs[0] & mask0) + md; reg->s1 = d2 * (qs[0] & mask1) + md; reg->s2 = d1 * (qs[1] & mask0) + md; reg->s3 = d2 * (qs[1] & mask1) + md; reg->s4 = d1 * (qs[2] & mask0) + md; reg->s5 = d2 * (qs[2] & mask1) + md; reg->s6 = d1 * (qs[3] & mask0) + md; reg->s7 = d2 * (qs[3] & mask1) + md; reg->s8 = d1 * (qs[4] & mask0) + md; reg->s9 = d2 * (qs[4] & mask1) + md; reg->sa = d1 * (qs[5] & mask0) + md; reg->sb = d2 * (qs[5] & mask1) + md; reg->sc = d1 * (qs[6] & mask0) + md; reg->sd = d2 * (qs[6] & mask1) + md; reg->se = d1 * (qs[7] & mask0) + md; reg->sf = d2 * (qs[7] & mask1) + md; } //------------------------------------------------------------------------------ // get_rows //------------------------------------------------------------------------------ kernel void kernel_get_rows_f32( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne10, ulong nb10, ulong nb11, ulong nb12, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int i10 = get_group_id(0); int i11 = get_group_id(1); int i12 = get_group_id(2); int r = ((global int *) ((global char *) src1 + i12*nb12 + i11*nb11 + i10*nb10))[0]; int i02 = i11; int i03 = i12; for (int ind = get_local_id(0); ind < ne00; ind += get_local_size(0)) { if (ind >= ne00) { return; } ((global float *) ((global char *) dst + i12*nb3 + i11*nb2 + i10*nb1))[ind] = ((global float *) ((global char *) src0 + r*nb01 + i02*nb02 + i03*nb03))[ind]; } } kernel void kernel_get_rows_f16( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne10, ulong nb10, ulong nb11, ulong nb12, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int i10 = get_group_id(0); int i11 = get_group_id(1); int i12 = get_group_id(2); int r = ((global int32_t *) ((global char *) src1 + i12*nb12 + i11*nb11 + i10*nb10))[0]; int i02 = i11; int i03 = i12; for (int ind = get_local_id(0); ind < ne00; ind += get_local_size(0)) { if (ind >= ne00) { return; } ((global float *) ((global char *) dst + i12*nb3 + i11*nb2 + i10*nb1))[ind] = ((global half *) ((global char *) src0 + r*nb01 + i02*nb02 + i03*nb03))[ind]; } } kernel void kernel_get_rows_q4_0( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne10, ulong nb10, ulong nb11, ulong nb12, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); const int NL = 2; int i10 = get_group_id(0); int i11 = get_group_id(1); int i12 = get_group_id(2); int r = ((global int32_t *) ((global char *) src1 + i12*nb12 + i11*nb11 + i10*nb10))[0]; int i02 = i11; int i03 = i12; for (int ind = get_local_id(0); ind < ne00/16; ind += get_local_size(0)) { float16 temp; if (ind >= ne00) { return; } dequantize_q4_0_f32( ((global struct block_q4_0 *) ((global char *) src0 + r*nb01 + i02*nb02 + i03*nb03)) + ind/NL, ind%NL, &temp); *(((global float16 *) ((global char *) dst + i12*nb3 + i11*nb2 + i10*nb1)) + ind) = temp; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/glu.cl000066400000000000000000000300721512524704700221330ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define GELU_COEF_A 0.044715f #define GELU_QUICK_COEF -1.702f #define SQRT_2_OVER_PI 0.79788456080286535587989211986876f #define SQRT_2_INV 0.70710678118654752440084436210484f //------------------------------------------------------------------------------ // geglu //------------------------------------------------------------------------------ kernel void kernel_geglu( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu = 0.5f*x0*(1.0f + tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); dst_row[i0] = gelu*x1; } } kernel void kernel_geglu_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global half * src0_row = (global half *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global half * src1_row = (global half *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global half * dst_row = (global half *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const half x0 = src0_row[i0]; const half x1 = src1_row[i0]; const half gelu = 0.5f*x0*(1.0f + tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); dst_row[i0] = gelu*x1; } } //------------------------------------------------------------------------------ // reglu //------------------------------------------------------------------------------ kernel void kernel_reglu( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; dst_row[i0] = x0*x1*(x0 > 0.0f); } } kernel void kernel_reglu_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global half * src0_row = (global half *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global half * src1_row = (global half *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global half * dst_row = (global half *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const half x0 = src0_row[i0]; const half x1 = src1_row[i0]; dst_row[i0] = x0*x1*(x0 > 0.0f); } } //------------------------------------------------------------------------------ // swiglu //------------------------------------------------------------------------------ kernel void kernel_swiglu( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float silu = x0 / (1.0f + exp(-x0)); dst_row[i0] = silu*x1; } } kernel void kernel_swiglu_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global half * src0_row = (global half *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global half * src1_row = (global half *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global half * dst_row = (global half *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const half x0 = src0_row[i0]; const half x1 = src1_row[i0]; const half silu = x0 / (1.0f + exp(-x0)); dst_row[i0] = silu*x1; } } //------------------------------------------------------------------------------ // swiglu_oai //------------------------------------------------------------------------------ kernel void kernel_swiglu_oai( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off, float limit, float alpha ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { float x0 = src0_row[i0]; float x1 = src1_row[i0]; x0 = min(x0, limit); x1 = max(min(x1, limit), -limit); float out_glu = x0 / (1.0f + exp(-x0 * alpha)); out_glu = out_glu * (1.0f + x1); dst_row[i0] = out_glu; } } //------------------------------------------------------------------------------ // geglu_erf //------------------------------------------------------------------------------ kernel void kernel_geglu_erf( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu_erf = 0.5f*x0*(1.0f + erf(x0*SQRT_2_INV)); dst_row[i0] = gelu_erf*x1; } } kernel void kernel_geglu_erf_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global half * src0_row = (global half *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global half * src1_row = (global half *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global half * dst_row = (global half *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const half x0 = src0_row[i0]; const half x1 = src1_row[i0]; const half gelu_erf = 0.5f*x0*(1.0f + erf(x0*SQRT_2_INV)); dst_row[i0] = gelu_erf*x1; } } //------------------------------------------------------------------------------ // geglu_quick //------------------------------------------------------------------------------ kernel void kernel_geglu_quick( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global float * src0_row = (global float *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global float * src1_row = (global float *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global float * dst_row = (global float *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const float x0 = src0_row[i0]; const float x1 = src1_row[i0]; const float gelu_quick = x0*(1.0f/(1.0f + exp(GELU_QUICK_COEF*x0))); dst_row[i0] = gelu_quick*x1; } } kernel void kernel_geglu_quick_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb01, ulong nb11, int ne0, ulong nb1, int ne00_off, int ne10_off ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); global half * src0_row = (global half *) ((global char *) src0 + get_group_id(0)*nb01) + ne00_off; global half * src1_row = (global half *) ((global char *) src1 + get_group_id(0)*nb11) + ne10_off; global half * dst_row = (global half *) ((global char *) dst + get_group_id(0)*nb1); for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const half x0 = src0_row[i0]; const half x1 = src1_row[i0]; const half gelu_quick = x0*(1.0f/(1.0f + exp(GELU_QUICK_COEF*x0))); dst_row[i0] = gelu_quick*x1; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/group_norm.cl000066400000000000000000000065761512524704700235470ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif // Workgroup must be a subgroup #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_32 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_group_norm( global float * src0, ulong offset0, global float * dst, ulong offsetd, int ne, int group_size, float eps ) { src0 = (global float *)((global char *)src0 + offset0); dst = (global float *)((global char *)dst + offsetd); int start = get_group_id(0) * group_size; int end = start + group_size; start += get_local_id(0); if (end >= ne) { end = ne; } float tmp = 0.0f; for (int j = start; j < end; j += get_local_size(0)) { tmp += src0[j]; } tmp = sub_group_reduce_add(tmp); const float mean = tmp / group_size; tmp = 0.0f; for (int j = start; j < end; j += get_local_size(0)) { float xi = src0[j] - mean; dst[j] = xi; tmp += xi * xi; } tmp = sub_group_reduce_add(tmp); const float variance = tmp / group_size; const float scale = 1.0f/sqrt(variance + eps); for (int j = start; j < end; j += get_local_size(0)) { dst[j] *= scale; } } //------------------------------------------------------------------------------ // group_norm_mul_add //------------------------------------------------------------------------------ #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_32 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_group_norm_mul_add( global float * src0, ulong offset0, global float * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne, int group_size, float eps ) { src0 = (global float *)((global char *)src0 + offset0); src1 = (global float *)((global char *)src1 + offset1); src2 = (global float *)((global char *)src2 + offset2); dst = (global float *)((global char *)dst + offsetd); int start = get_group_id(0) * group_size; int end = start + group_size; if (end > ne) { end = ne; } float sum = 0.0f; float sum_sq = 0.0f; for (int j = start + get_local_id(0); j < end; j += get_local_size(0)) { float val = src0[j]; sum += val; sum_sq += val*val; } sum = sub_group_reduce_add(sum); sum_sq = sub_group_reduce_add(sum_sq); const float mean = sum / group_size; const float var = sum_sq / group_size - mean * mean; const float scale = rsqrt(var + eps); for (int j = start + get_local_id(0); j < end; j += get_local_size(0)) { dst[j] = ((src0[j] - mean) * scale) * src1[j] + src2[j]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/im2col_f16.cl000066400000000000000000000025301512524704700232030ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable kernel void kernel_im2col_f16( global float * src1, ulong offset1, global half * dst, ulong offsetd, ulong batch_offset, ulong delta_offset, long IW, long IH, long IC, long OW, long OH, long KW, long KH, long pelements, long CHW, int s0, int s1, int p0, int p1, int d0, int d1 ) { long i = get_global_id(0); if (i >= pelements) { return; } src1 = (global float*)((global char*)src1 + offset1); dst = (global half*)((global char*)dst + offsetd); long ksize = OW * KH; long kx = i / ksize; long kd = kx * ksize; long ky = (i - kd) / OW; long ix = i % OW; long oh = get_group_id(1); long batch = get_group_id(2) / IC; long ic = get_group_id(2) % IC; long iiw = ix * s0 + kx * d0 - p0; long iih = oh * s1 + ky * d1 - p1; long offset_dst = ((batch * OH + oh) * OW + ix) * CHW + (ic * (KW * KH) + ky * KW + kx); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = 0.0f; } else { long offset_src = ic * delta_offset + batch * batch_offset; dst[offset_dst] = src1[offset_src + iih * IW + iiw]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/im2col_f32.cl000066400000000000000000000025311512524704700232020ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable kernel void kernel_im2col_f32( global float * src1, ulong offset1, global float * dst, ulong offsetd, ulong batch_offset, ulong delta_offset, long IW, long IH, long IC, long OW, long OH, long KW, long KH, long pelements, long CHW, int s0, int s1, int p0, int p1, int d0, int d1 ) { long i = get_global_id(0); if (i >= pelements) { return; } src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); long ksize = OW * KH; long kx = i / ksize; long kd = kx * ksize; long ky = (i - kd) / OW; long ix = i % OW; long oh = get_group_id(1); long batch = get_group_id(2) / IC; long ic = get_group_id(2) % IC; long iiw = ix * s0 + kx * d0 - p0; long iih = oh * s1 + ky * d1 - p1; long offset_dst = ((batch * OH + oh) * OW + ix) * CHW + (ic * (KW * KH) + ky * KW + kx); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = 0.0f; } else { long offset_src = ic * delta_offset + batch * batch_offset; dst[offset_dst] = src1[offset_src + iih * IW + iiw]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mean.cl000066400000000000000000000020011512524704700222530ustar00rootroot00000000000000 kernel void kernel_mean_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global float *)((global char *)src0 + offset0); dst = (global float *)((global char *)dst + offsetd); int i3 = get_global_id(2); int i2 = get_global_id(1); int i1 = get_global_id(0); if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } global float * src_row = (global float *) ((global char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); global float * dst_row = (global float *) ((global char *) dst + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; for (int i0 = 0; i0 < ne00; i0++) { row_sum += src_row[i0]; } dst_row[0] = row_sum / ne00; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul.cl000066400000000000000000000077251512524704700221520ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // mul //------------------------------------------------------------------------------ kernel void kernel_mul( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) * *((global float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_mul_row( global float4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float4 * dst, ulong offsetd, int ne ) { src0 = (global float4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] * src1[idx1]; } kernel void kernel_mul_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global half *)(dst_ptr + i0*nb0)) = *((global half *)(src0_ptr + i0*nb00)) * *((global half *)(src1_ptr + i10*nb10)); } } kernel void kernel_mul_row_f16( global half4 * src0, ulong offset0, global half4 * src1, ulong offset1, global half4 * dst, ulong offsetd, int ne ) { src0 = (global half4*)((global char*)src0 + offset0); src1 = (global half4*)((global char*)src1 + offset1); dst = (global half4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] * src1[idx1]; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl000066400000000000000000000137671512524704700245350ustar00rootroot00000000000000// src0_q, src0_d, src1 are transposed as a preprocessing step // 4-bit weights are transposed in groups of 4 (unsigned short int) // consider weights originally "next to each other", now "on top of each other" // each fiber computes a 8x4 tile of output elements // using unshuffled weights #pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #ifdef cl_qcom_reqd_sub_group_size #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_128 #endif kernel void kernel_mul_mat_Ab_Bi_8x4( global const ushort * src0_q, // quantized A global const half * src0_d, // A scales __read_only image1d_buffer_t src1, // B (1d image) global float * dst, // C int m, // M int n, // N with padding int k, // K int n_no_padding // N without padding ) { int m_4 = m >> 2; int n_4 = n >> 2; int gy = get_global_id(0); int gx = get_global_id(1); int gx_2 = gx << 2; half8 c0 = 0, c1 = 0, c2 = 0, c3 = 0; // 8x4 output elements half8 B; // registers for activations half4 dequantized_weights; // registers for dequantized weights __global const ushort* weight_ptr = src0_q + gx_2; // pointer for weights __global const half* scale_ptr = src0_d + gx_2; // pointer for scales for(int i=0; i> 4) - 8) * scale.s0; // dequantize a row of the 16 weights dequantized_weights.s1 = (((bits4.s1 & (0x00F0)) >> 4) - 8) * scale.s1; dequantized_weights.s2 = (((bits4.s2 & (0x00F0)) >> 4) - 8) * scale.s2; dequantized_weights.s3 = (((bits4.s3 & (0x00F0)) >> 4) - 8) * scale.s3; c0 += B * dequantized_weights.s0; //vector-scalar multiplication to accumulate c1 += B * dequantized_weights.s1; c2 += B * dequantized_weights.s2; c3 += B * dequantized_weights.s3; // j=2 B.s0123 = read_imageh(src1, gy*2 + (i+2)*(n_4)); B.s4567 = read_imageh(src1, gy*2 + (i+2)*(n_4)+1); dequantized_weights.s0 = (((bits4.s0 & (0x0F00)) >> 8) - 8) * scale.s0; // dequantize a row of the 16 weights dequantized_weights.s1 = (((bits4.s1 & (0x0F00)) >> 8) - 8) * scale.s1; dequantized_weights.s2 = (((bits4.s2 & (0x0F00)) >> 8) - 8) * scale.s2; dequantized_weights.s3 = (((bits4.s3 & (0x0F00)) >> 8) - 8) * scale.s3; c0 += B * dequantized_weights.s0; // vector-scalar multiplication to accumulate c1 += B * dequantized_weights.s1; c2 += B * dequantized_weights.s2; c3 += B * dequantized_weights.s3; // j=3 B.s0123 = read_imageh(src1, gy*2 + (i+3)*(n_4)); B.s4567 = read_imageh(src1, gy*2 + (i+3)*(n_4)+1); dequantized_weights.s0 = (((bits4.s0 & (0xF000)) >> 12) - 8) * scale.s0; // dequantize a row of the 16 weights dequantized_weights.s1 = (((bits4.s1 & (0xF000)) >> 12) - 8) * scale.s1; dequantized_weights.s2 = (((bits4.s2 & (0xF000)) >> 12) - 8) * scale.s2; dequantized_weights.s3 = (((bits4.s3 & (0xF000)) >> 12) - 8) * scale.s3; c0 += B * dequantized_weights.s0; // vector-scalar multiplication to accumulate c1 += B * dequantized_weights.s1; c2 += B * dequantized_weights.s2; c3 += B * dequantized_weights.s3; } int idx = (gy<<3)*m + (gx<<2); // vectorized store 16 elements // conditional check if store is to a valid location. Required when N is not a multiple of 8 // if statements allow registers to be reused for each store // provides a performance boost due to reduced register footprint, which increases number of concurrent waves if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s0, c1.s0, c2.s0, c3.s0), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s1, c1.s1, c2.s1, c3.s1), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s2, c1.s2, c2.s2, c3.s2), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s3, c1.s3, c2.s3, c3.s3), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s4, c1.s4, c2.s4, c3.s4), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s5, c1.s5, c2.s5, c3.s5), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s6, c1.s6, c2.s6, c3.s6), 0, dst + idx); idx += m; } if(idx+3 < m*n_no_padding){ vstore4((float4)(c0.s7, c1.s7, c2.s7, c3.s7), 0, dst + idx); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mat_f16_f32.cl000066400000000000000000000107411512524704700241310ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #if defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #else #define REQD_SUBGROUP_SIZE_128 #endif #define OPWM 64 #define OPWN 64 #define CPWK 8 #define OPTM 4 #define OPTN 8 #define WG_M (OPWM / OPTM) #define WG_N (OPWN / OPTN) #define VEC_K (CPWK / 4) REQD_SUBGROUP_SIZE_128 __kernel void mul_mat_f16_f32( const int M, const int N, const int K, __global const void* A_void, ulong A_offset, __global const void* B_void, ulong B_offset, __global void* C_void, ulong C_offset) { __global const half* A = (__global const half* )((__global const char*)A_void + A_offset); __global const float* B = (__global const float*)((__global const char*)B_void + B_offset); __global float* C = (__global float*)((__global char*)C_void + C_offset); const int lidm = get_local_id(0); const int lidn = get_local_id(1); const int lid = lidn * WG_M + lidm; const int offsetM = get_group_id(0) * OPWM; const int offsetN = get_group_id(1) * OPWN; __local half4 Alocal[OPWM][VEC_K]; __local float4 Blocal[OPWN][VEC_K]; float sum[OPTM][OPTN]; for (int wm = 0; wm < OPTM; wm++) { for (int wn = 0; wn < OPTN; wn++) { sum[wm][wn] = 0.0f; } } const int numTiles = (K + CPWK - 1) / CPWK; const int load_row_a = lid % OPWM; const int load_vec_k_a = lid / OPWM; const int global_row_a = offsetM + load_row_a; const int load_row_b = lid % OPWN; const int load_vec_k_b = lid / OPWN; const int global_row_b = offsetN + load_row_b; for (int t = 0; t < numTiles; t++) { const int k_start = t * CPWK; const int k_vec_start_a = k_start + load_vec_k_a * 4; const int k_vec_start_b = k_start + load_vec_k_b * 4; if (global_row_a < M && k_vec_start_a < K) { if (k_vec_start_a + 3 < K) { Alocal[load_row_a][load_vec_k_a] = vload4(0, A + global_row_a * K + k_vec_start_a); } else { half4 tempA = (half4)(0.0h); if (k_vec_start_a < K) tempA.s0 = A[global_row_a * K + k_vec_start_a]; if (k_vec_start_a + 1 < K) tempA.s1 = A[global_row_a * K + k_vec_start_a + 1]; if (k_vec_start_a + 2 < K) tempA.s2 = A[global_row_a * K + k_vec_start_a + 2]; Alocal[load_row_a][load_vec_k_a] = tempA; } } else { Alocal[load_row_a][load_vec_k_a] = (half4)(0.0h); } if (global_row_b < N && k_vec_start_b < K) { if (k_vec_start_b + 3 < K) { Blocal[load_row_b][load_vec_k_b] = vload4(0, B + global_row_b * K + k_vec_start_b); } else { float4 tempB = (float4)(0.0f); if (k_vec_start_b < K) tempB.s0 = B[global_row_b * K + k_vec_start_b]; if (k_vec_start_b + 1 < K) tempB.s1 = B[global_row_b * K + k_vec_start_b + 1]; if (k_vec_start_b + 2 < K) tempB.s2 = B[global_row_b * K + k_vec_start_b + 2]; Blocal[load_row_b][load_vec_k_b] = tempB; } } else { Blocal[load_row_b][load_vec_k_b] = (float4)(0.0f); } barrier(CLK_LOCAL_MEM_FENCE); #pragma unroll for (int k_vec = 0; k_vec < VEC_K; k_vec++) { float4 a_fvecs[OPTM]; int current_row_a = lidm; for (int wm = 0; wm < OPTM; wm++) { a_fvecs[wm] = convert_float4(Alocal[current_row_a][k_vec]); current_row_a += WG_M; } float4 b_fvecs[OPTN]; int current_row_b = lidn; for (int wn = 0; wn < OPTN; wn++) { b_fvecs[wn] = Blocal[current_row_b][k_vec]; current_row_b += WG_N; } for (int wm = 0; wm < OPTM; wm++) { for (int wn = 0; wn < OPTN; wn++) { sum[wm][wn] += dot(a_fvecs[wm], b_fvecs[wn]); } } } barrier(CLK_LOCAL_MEM_FENCE); } for (int wm = 0; wm < OPTM; wm++) { int globalRow = offsetM + lidm + wm * WG_M; if (globalRow < M) { for (int wn = 0; wn < OPTN; wn++) { int globalCol = offsetN + lidn + wn * WG_N; if (globalCol < N) { C[globalCol * M + globalRow] = sum[wm][wn]; } } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl000066400000000000000000000257431512524704700253450ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #pragma OPENCL EXTENSION cl_khr_subgroups : enable #define LM_FIRST_256B 0 #define LM_SECOND_256B 64 #define LM_THIRD_256B 128 #define LM_FOURTH_256B 192 inline float16 mm_load_a( image1d_buffer_t matrix_A, uint subMatrixAStartInElements, int nb01, int line_stride_matrix_A_in_bytes ) { __private float8 regA; size_t sub_block_id_m = get_local_id(0); #ifdef KQV uint a_texCoord = subMatrixAStartInElements/2 + (sub_block_id_m * nb01/4); #else // KQ uint a_texCoord = subMatrixAStartInElements/2 + (sub_block_id_m * line_stride_matrix_A_in_bytes/4); #endif regA.s0123 = read_imagef(matrix_A, a_texCoord/4); regA.s4567 = read_imagef(matrix_A, (a_texCoord+4)/4); return convert_float16(as_half16(regA)); } inline float4 alu_32( float16 regA, __local float4* matrix_B_vec ) { __private float4 rC = 0; int i = get_sub_group_id() * 64; rC += regA.s0 * matrix_B_vec[i]; rC += regA.s1 * matrix_B_vec[i + 16]; rC += regA.s4 * matrix_B_vec[i + 1]; rC += regA.s5 * matrix_B_vec[i + 17]; rC += regA.s8 * matrix_B_vec[i + 2]; rC += regA.s9 * matrix_B_vec[i + 18]; rC += regA.sc * matrix_B_vec[i + 3]; rC += regA.sd * matrix_B_vec[i + 19]; i += 32; rC += regA.s2 * matrix_B_vec[i]; rC += regA.s3 * matrix_B_vec[i + 16]; rC += regA.s6 * matrix_B_vec[i + 1]; rC += regA.s7 * matrix_B_vec[i + 17]; rC += regA.sa * matrix_B_vec[i + 2]; rC += regA.sb * matrix_B_vec[i + 18]; rC += regA.se * matrix_B_vec[i + 3]; rC += regA.sf * matrix_B_vec[i + 19]; return rC; } inline float16 alu_16( float16 regA, __local float* matrix_B_local ) { float16 out; __local float4* matrix_B_vec = (__local float4*)matrix_B_local; out.s0123 = alu_32(regA, matrix_B_vec); out.s4567 = alu_32(regA, matrix_B_vec + 4); out.s89ab = alu_32(regA, matrix_B_vec + 8); out.scdef = alu_32(regA, matrix_B_vec + 12); return out; } inline void mm_mad( __local float* matrix_B_local, float16 regA, float8 regB, uint b_localOffsetInWords, float16* regC0_ptr, float16* regC1_ptr ) { int offset = b_localOffsetInWords + get_sub_group_id() * 256; matrix_B_local[offset + LM_FIRST_256B] = regB.s0; matrix_B_local[offset + LM_SECOND_256B] = regB.s1; matrix_B_local[offset + LM_THIRD_256B] = regB.s2; matrix_B_local[offset + LM_FOURTH_256B] = regB.s3; float16 add0 = alu_16(regA, matrix_B_local); *regC0_ptr += add0; matrix_B_local[offset + LM_FIRST_256B] = regB.s4; matrix_B_local[offset + LM_SECOND_256B] = regB.s5; matrix_B_local[offset + LM_THIRD_256B] = regB.s6; matrix_B_local[offset + LM_FOURTH_256B] = regB.s7; float16 add1 = alu_16(regA, matrix_B_local); *regC1_ptr += add1; } inline void mm_store_c_N( __write_only image1d_buffer_t matrix_C, float16 regC0, float16 regC1, uint subMatrixCStartInElements, int line_stride_matrix_C_in_bytes, int mask ) { size_t sub_block_id_m = get_local_id(0); uint strideInWords = line_stride_matrix_C_in_bytes/4; uint c_coordInWords_0 = (subMatrixCStartInElements + sub_block_id_m); uint c_coordInWords_1 = c_coordInWords_0 + 1 * strideInWords; uint c_coordInWords_2 = c_coordInWords_0 + 2 * strideInWords; uint c_coordInWords_3 = c_coordInWords_0 + 3 * strideInWords; uint c_coordInWords_4 = c_coordInWords_0 + 4 * strideInWords; uint c_coordInWords_5 = c_coordInWords_0 + 5 * strideInWords; uint c_coordInWords_6 = c_coordInWords_0 + 6 * strideInWords; uint c_coordInWords_7 = c_coordInWords_0 + 7 * strideInWords; uint c_coordInWords_8 = c_coordInWords_0 + 8 * strideInWords; uint c_coordInWords_9 = c_coordInWords_0 + 9 * strideInWords; uint c_coordInWords_10 = c_coordInWords_0 + 10 * strideInWords; uint c_coordInWords_11 = c_coordInWords_0 + 11 * strideInWords; uint c_coordInWords_12 = c_coordInWords_0 + 12 * strideInWords; uint c_coordInWords_13 = c_coordInWords_0 + 13 * strideInWords; uint c_coordInWords_14 = c_coordInWords_0 + 14 * strideInWords; uint c_coordInWords_15 = c_coordInWords_0 + 15 * strideInWords; uint c_coordInWords_16 = c_coordInWords_0 + 16 * strideInWords; uint c_coordInWords_17 = c_coordInWords_0 + 17 * strideInWords; uint c_coordInWords_18 = c_coordInWords_0 + 18 * strideInWords; uint c_coordInWords_19 = c_coordInWords_0 + 19 * strideInWords; uint c_coordInWords_20 = c_coordInWords_0 + 20 * strideInWords; uint c_coordInWords_21 = c_coordInWords_0 + 21 * strideInWords; uint c_coordInWords_22 = c_coordInWords_0 + 22 * strideInWords; uint c_coordInWords_23 = c_coordInWords_0 + 23 * strideInWords; uint c_coordInWords_24 = c_coordInWords_0 + 24 * strideInWords; uint c_coordInWords_25 = c_coordInWords_0 + 25 * strideInWords; uint c_coordInWords_26 = c_coordInWords_0 + 26 * strideInWords; uint c_coordInWords_27 = c_coordInWords_0 + 27 * strideInWords; uint c_coordInWords_28 = c_coordInWords_0 + 28 * strideInWords; uint c_coordInWords_29 = c_coordInWords_0 + 29 * strideInWords; uint c_coordInWords_30 = c_coordInWords_0 + 30 * strideInWords; uint c_coordInWords_31 = c_coordInWords_0 + 31 * strideInWords; if (mask > 0) { write_imagef(matrix_C, c_coordInWords_0, regC0.s0); } if (mask > 1) { write_imagef(matrix_C, c_coordInWords_1, regC0.s1); } if (mask > 2) { write_imagef(matrix_C, c_coordInWords_2, regC0.s2); } if (mask > 3) { write_imagef(matrix_C, c_coordInWords_3, regC0.s3); } if (mask > 4) { write_imagef(matrix_C, c_coordInWords_4, regC0.s4); } if (mask > 5) { write_imagef(matrix_C, c_coordInWords_5, regC0.s5); } if (mask > 6) { write_imagef(matrix_C, c_coordInWords_6, regC0.s6); } if (mask > 7) { write_imagef(matrix_C, c_coordInWords_7, regC0.s7); } if (mask > 8) { write_imagef(matrix_C, c_coordInWords_8, regC0.s8); } if (mask > 9) { write_imagef(matrix_C, c_coordInWords_9, regC0.s9); } if (mask > 10) { write_imagef(matrix_C, c_coordInWords_10, regC0.sa); } if (mask > 11) { write_imagef(matrix_C, c_coordInWords_11, regC0.sb); } if (mask > 12) { write_imagef(matrix_C, c_coordInWords_12, regC0.sc); } if (mask > 13) { write_imagef(matrix_C, c_coordInWords_13, regC0.sd); } if (mask > 14) { write_imagef(matrix_C, c_coordInWords_14, regC0.se); } if (mask > 15) { write_imagef(matrix_C, c_coordInWords_15, regC0.sf); } if (mask > 16) { write_imagef(matrix_C, c_coordInWords_16, regC1.s0); } if (mask > 17) { write_imagef(matrix_C, c_coordInWords_17, regC1.s1); } if (mask > 18) { write_imagef(matrix_C, c_coordInWords_18, regC1.s2); } if (mask > 19) { write_imagef(matrix_C, c_coordInWords_19, regC1.s3); } if (mask > 20) { write_imagef(matrix_C, c_coordInWords_20, regC1.s4); } if (mask > 21) { write_imagef(matrix_C, c_coordInWords_21, regC1.s5); } if (mask > 22) { write_imagef(matrix_C, c_coordInWords_22, regC1.s6); } if (mask > 23) { write_imagef(matrix_C, c_coordInWords_23, regC1.s7); } if (mask > 24) { write_imagef(matrix_C, c_coordInWords_24, regC1.s8); } if (mask > 25) { write_imagef(matrix_C, c_coordInWords_25, regC1.s9); } if (mask > 26) { write_imagef(matrix_C, c_coordInWords_26, regC1.sa); } if (mask > 27) { write_imagef(matrix_C, c_coordInWords_27, regC1.sb); } if (mask > 28) { write_imagef(matrix_C, c_coordInWords_28, regC1.sc); } if (mask > 29) { write_imagef(matrix_C, c_coordInWords_29, regC1.sd); } if (mask > 30) { write_imagef(matrix_C, c_coordInWords_30, regC1.se); } if (mask > 31) { write_imagef(matrix_C, c_coordInWords_31, regC1.sf); } } #define TILESIZE_K 16 #define TILESIZE_M 64 #define TILESIZE_N 32 #ifdef KQV __kernel void mul_mm_f16_f32_kqv( #else __kernel void mul_mm_f16_f32_kq( #endif __read_only image1d_buffer_t matrix_A, int offset0, __global float* matrix_B, int offset1, __write_only image1d_buffer_t matrix_C, int offsetd, int M, int K, int N, int D_A, int D_B, int nb01 ) { uint block_id_m = get_global_id(1); uint block_id_n = get_global_id(2) % ((N+TILESIZE_N-1)/TILESIZE_N); uint block_id_d = get_global_id(2) / ((N+TILESIZE_N-1)/TILESIZE_N); __private float16 regA; __private float8 regB; __private float16 regC0; __private float16 regC1; const uint col = block_id_m * TILESIZE_M; const uint row = block_id_n * TILESIZE_N; const uint depth_A = block_id_d / (D_B/D_A); const uint depth_B = block_id_d; #ifdef KQV int line_stride_matrix_A_in_bytes = nb01 * M; int line_stride_matrix_B_in_bytes = K * N * 4; #else int line_stride_matrix_A_in_bytes = K * D_A * 2; int line_stride_matrix_B_in_bytes = K * D_B * 4; #endif int line_stride_matrix_C_in_bytes = M * 4; const uint strideAinElements = line_stride_matrix_A_in_bytes / 2; const uint strideBinElements = line_stride_matrix_B_in_bytes / 4; size_t sub_block_id_m = get_local_id(0); uint b_localOffsetInWords = (sub_block_id_m/16)*16 + ((((sub_block_id_m)>>0)&1)<<2) + ((((sub_block_id_m)>>1)&1)<<3) + ((((sub_block_id_m)>>2)&1)<<0) + ((((sub_block_id_m)>>3)&1)<<1); uint2 b_globalOffsetInWords_xy = {((sub_block_id_m%4)*4), (sub_block_id_m>>2)}; uint b_globalOffsetInWords00, b_globalOffsetInWords16; #ifdef KQV b_globalOffsetInWords00 = b_globalOffsetInWords_xy.x + b_globalOffsetInWords_xy.y*K; b_globalOffsetInWords16 = b_globalOffsetInWords00 + (16 * K); uint subMatrixAStartInElements = depth_A * strideAinElements + col * nb01 / 2; uint subMatrixBStartInElements = depth_B * strideBinElements + row * K; #else b_globalOffsetInWords00 = b_globalOffsetInWords_xy.x + b_globalOffsetInWords_xy.y*line_stride_matrix_B_in_bytes/4; b_globalOffsetInWords16 = b_globalOffsetInWords00 + (16 * line_stride_matrix_B_in_bytes/4); uint subMatrixAStartInElements = col * strideAinElements + depth_A * K; uint subMatrixBStartInElements = row * strideBinElements + depth_B * K; #endif __local float matrix_B_local[1024]; for (uint step=0; step < K; step+=TILESIZE_K) { size_t sub_block_id_m = get_local_id(0); regA = mm_load_a(matrix_A, subMatrixAStartInElements, nb01, line_stride_matrix_A_in_bytes); uint b_coordInWords00 = subMatrixBStartInElements + b_globalOffsetInWords00; uint b_coordInWords16 = subMatrixBStartInElements + b_globalOffsetInWords16; regB.s0123 = vload4(b_coordInWords00/4, matrix_B); regB.s4567 = vload4(b_coordInWords16/4, matrix_B); mm_mad(matrix_B_local, regA, regB, b_localOffsetInWords, ®C0, ®C1); subMatrixAStartInElements += TILESIZE_K; subMatrixBStartInElements += TILESIZE_K; } uint subMatrixCStartInElements = depth_B * N * M + row * M + col; mm_store_c_N(matrix_C, regC0, regC1, subMatrixCStartInElements, line_stride_matrix_C_in_bytes, (N-block_id_n*32)); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl000066400000000000000000000113271512524704700250510ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define LOAD_VEC_A 4 #define LOAD_VEC_B 4 #define BM 64 #define BN 64 #define BK 16 #define TM 4 #define TN 8 kernel void kernel_mul_mm_f16_f32_l4_lm( global half4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne11, int ne12, int stride_a, int stride_b, int stride_d, int batch_stride_a, int batch_stride_b, int batch_stride_d, int r2, int r3 ) { src0 = (global half4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); local half buf_a[BM * BK]; local float buf_b[BN * BK]; const int batch_idx = get_global_id(2); const int i13 = batch_idx / ne12; const int i12 = batch_idx % ne12; const int i03 = i13 / r3; const int i02 = i12 / r2; const int batch_idx_a = i03 * ne02 + i02; const int ir = get_group_id(0); const int ic = get_group_id(1); const int tid = get_local_id(0); const int th_r = tid % (BM / TM); const int th_c = tid / (BM / TM); const int loadr_a = get_local_id(0) % (BK / LOAD_VEC_A); const int loadc_a = get_local_id(0) / (BK / LOAD_VEC_A); const int loadr_b = get_local_id(0) % (BK / LOAD_VEC_B); const int loadc_b = get_local_id(0) / (BK / LOAD_VEC_B); const int loadstride_a = get_local_size(0) * LOAD_VEC_A / BK; const int loadstride_b = get_local_size(0) * LOAD_VEC_B / BK; int pos_a = (batch_idx_a * batch_stride_a + ir * BM * stride_a) / LOAD_VEC_A; int pos_b = (batch_idx * batch_stride_b + ic * BN * stride_b) / LOAD_VEC_B; float sums[TM * TN]; half cache_a[TM]; float cache_b[TN]; for (int i = 0; i < TM * TN; i++) { sums[i] = 0.0f; } for (int block = 0; block < ne00; block += BK) { for (int l = 0; l < BM; l += loadstride_a) { if (ir*BM + loadc_a + l < ne01) { const int idx = pos_a + (loadc_a + l) * stride_a / LOAD_VEC_A + loadr_a; buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = src0[idx].s0; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = src0[idx].s1; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = src0[idx].s2; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = src0[idx].s3; } else { buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = 0.0h; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = 0.0h; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = 0.0h; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = 0.0h; } } for (int l = 0; l < BN; l += loadstride_b) { if (ic*BN + loadc_b + l < ne11) { const int idx = pos_b + (loadc_b + l) * stride_b / LOAD_VEC_B + loadr_b; buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = src1[idx].s0; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = src1[idx].s1; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = src1[idx].s2; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = src1[idx].s3; } else { buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = 0.0h; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = 0.0h; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = 0.0h; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = 0.0h; } } barrier(CLK_LOCAL_MEM_FENCE); pos_a += BK / LOAD_VEC_A; pos_b += BK / LOAD_VEC_B; for (int i = 0; i < BK; i++) { for (int j = 0; j < TM; j++) { cache_a[j] = buf_a[(i) * BM + th_r * TM + j]; } for (int j = 0; j < TN; j++) { cache_b[j] = buf_b[(i) * BN + th_c * TN + j]; } for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { const int sums_idx = cc*TM + cr; sums[sums_idx] = mad(convert_float(cache_a[cr]), cache_b[cc], sums[sums_idx]); } } } barrier(CLK_LOCAL_MEM_FENCE); } const int dr = ir * BM + th_r * TM; const int dc = ic * BN + th_c * TN; const int offsets = batch_idx * batch_stride_d; for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { if (dr + cr < ne01 && dc + cc < ne11) { dst[offsets + (dc + cc) * stride_d + dr + cr] = sums[cc * TM + cr]; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl000066400000000000000000000113131512524704700250420ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define LOAD_VEC_A 4 #define LOAD_VEC_B 4 #define BM 64 #define BN 64 #define BK 16 #define TM 4 #define TN 8 kernel void kernel_mul_mm_f32_f32_l4_lm( global float4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne11, int ne12, int stride_a, int stride_b, int stride_d, int batch_stride_a, int batch_stride_b, int batch_stride_d, int r2, int r3 ) { src0 = (global float4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); local float buf_a[BM * BK]; local float buf_b[BN * BK]; const int batch_idx = get_global_id(2); const int i13 = batch_idx / ne12; const int i12 = batch_idx % ne12; const int i03 = i13 / r3; const int i02 = i12 / r2; const int batch_idx_a = i03 * ne02 + i02; const int ir = get_group_id(0); const int ic = get_group_id(1); const int tid = get_local_id(0); const int th_r = tid % (BM / TM); const int th_c = tid / (BM / TM); const int loadr_a = get_local_id(0) % (BK / LOAD_VEC_A); const int loadc_a = get_local_id(0) / (BK / LOAD_VEC_A); const int loadr_b = get_local_id(0) % (BK / LOAD_VEC_B); const int loadc_b = get_local_id(0) / (BK / LOAD_VEC_B); const int loadstride_a = get_local_size(0) * LOAD_VEC_A / BK; const int loadstride_b = get_local_size(0) * LOAD_VEC_B / BK; int pos_a = (batch_idx_a * batch_stride_a + ir * BM * stride_a) / LOAD_VEC_A; int pos_b = (batch_idx * batch_stride_b + ic * BN * stride_b) / LOAD_VEC_B; float sums[TM * TN]; float cache_a[TM]; float cache_b[TN]; for (int i = 0; i < TM * TN; i++) { sums[i] = 0.0f; } for (int block = 0; block < ne00; block += BK) { for (int l = 0; l < BM; l += loadstride_a) { if (ir*BM + loadc_a + l < ne01) { const int idx = pos_a + (loadc_a + l) * stride_a / LOAD_VEC_A + loadr_a; buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = src0[idx].s0; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = src0[idx].s1; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = src0[idx].s2; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = src0[idx].s3; } else { buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = 0.0f; } } for (int l = 0; l < BN; l += loadstride_b) { if (ic*BN + loadc_b + l < ne11) { const int idx = pos_b + (loadc_b + l) * stride_b / LOAD_VEC_B + loadr_b; buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = src1[idx].s0; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = src1[idx].s1; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = src1[idx].s2; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = src1[idx].s3; } else { buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = 0.0f; } } barrier(CLK_LOCAL_MEM_FENCE); pos_a += BK / LOAD_VEC_A; pos_b += BK / LOAD_VEC_B; for (int i = 0; i < BK; i++) { for (int j = 0; j < TM; j++) { cache_a[j] = buf_a[(i) * BM + th_r * TM + j]; } for (int j = 0; j < TN; j++) { cache_b[j] = buf_b[(i) * BN + th_c * TN + j]; } for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { const int sums_idx = cc*TM + cr; sums[sums_idx] = mad(cache_a[cr], cache_b[cc], sums[sums_idx]); } } } barrier(CLK_LOCAL_MEM_FENCE); } const int dr = ir * BM + th_r * TM; const int dc = ic * BN + th_c * TN; const int offsets = batch_idx * batch_stride_d; for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { if (dr + cr < ne01 && dc + cc < ne11) { dst[offsets + (dc + cc) * stride_d + dr + cr] = sums[cc * TM + cr]; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mm_q8_0_f32_l4_lm.cl000066400000000000000000000115611512524704700252240ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #define LOAD_VEC_A 4 #define LOAD_VEC_B 4 #define BM 64 #define BN 64 #define BK 32 #define TM 4 #define TN 8 kernel void kernel_mul_mm_q8_0_f32_l4_lm( global char4 * src0_q, global half * src0_d, global float4 * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne11, int ne12, int stride_a, int stride_b, int stride_d, int batch_stride_a, int batch_stride_b, int batch_stride_d, int r2, int r3 ) { src1 = (global float4*)((global char*)src1 + offset1); dst = (global float *)((global char*)dst + offsetd); local float buf_a[BM * BK]; local float buf_b[BN * BK]; const int batch_idx = get_global_id(2); const int i13 = batch_idx / ne12; const int i12 = batch_idx % ne12; const int i03 = i13 / r3; const int i02 = i12 / r2; const int batch_idx_a = i03 * ne02 + i02; const int ir = get_group_id(0); const int ic = get_group_id(1); const int tid = get_local_id(0); const int th_r = tid % (BM / TM); const int th_c = tid / (BM / TM); const int loadr_a = get_local_id(0) % (BK / LOAD_VEC_A); const int loadc_a = get_local_id(0) / (BK / LOAD_VEC_A); const int loadr_b = get_local_id(0) % (BK / LOAD_VEC_B); const int loadc_b = get_local_id(0) / (BK / LOAD_VEC_B); const int loadstride_a = get_local_size(0) * LOAD_VEC_A / BK; const int loadstride_b = get_local_size(0) * LOAD_VEC_B / BK; int pos_a = (batch_idx_a * batch_stride_a + ir * BM * stride_a) / LOAD_VEC_A; int pos_b = (batch_idx * batch_stride_b + ic * BN * stride_b) / LOAD_VEC_B; float sums[TM * TN]; float cache_a[TM]; float cache_b[TN]; for (int i = 0; i < TM * TN; i++) { sums[i] = 0.0f; } for (int block = 0; block < ne00; block += BK) { for (int l = 0; l < BM; l += loadstride_a) { if (ir*BM + loadc_a + l < ne01) { int idx = pos_a + (loadc_a + l) * stride_a / LOAD_VEC_A + loadr_a; int ib = idx / 8; int iqs = idx % 8; float d = (float)src0_d[ib]; global char4 * qs = src0_q + ib*8 + iqs; char4 q = *qs; float4 v = convert_float4(q)*d; buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = v.s0; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = v.s1; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = v.s2; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = v.s3; } else { buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = 0.0f; buf_a[(loadr_a * LOAD_VEC_A + 3) * BM + loadc_a + l] = 0.0f; } } for (int l = 0; l < BN; l += loadstride_b) { if (ic*BN + loadc_b + l < ne11) { int idx = pos_b + (loadc_b + l) * stride_b / LOAD_VEC_B + loadr_b; buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = src1[idx].s0; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = src1[idx].s1; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = src1[idx].s2; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = src1[idx].s3; } else { buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 2) * BN + loadc_b + l] = 0.0f; buf_b[(loadr_b * LOAD_VEC_B + 3) * BN + loadc_b + l] = 0.0f; } } barrier(CLK_LOCAL_MEM_FENCE); pos_a += BK / LOAD_VEC_A; pos_b += BK / LOAD_VEC_B; for (int i = 0; i < BK; i++) { for (int j = 0; j < TM; j++) { cache_a[j] = buf_a[(i) * BM + th_r * TM + j]; } for (int j = 0; j < TN; j++) { cache_b[j] = buf_b[(i) * BN + th_c * TN + j]; } for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { const int sums_idx = cc*TM + cr; sums[sums_idx] = mad(cache_a[cr], cache_b[cc], sums[sums_idx]); } } } barrier(CLK_LOCAL_MEM_FENCE); } const int dr = ir * BM + th_r * TM; const int dc = ic * BN + th_c * TN; const int offsets = batch_idx * batch_stride_d; for (int cc = 0; cc < TN; cc++) { for (int cr = 0; cr < TM; cr++) { if (dr + cr < ne01 && dc + cc < ne11) { dst[offsets + (dc + cc) * stride_d + dr + cr] = sums[cc * TM + cr]; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_f16_f16.cl000066400000000000000000000067641512524704700240060ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define N_F16_F16 4 #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_f16_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int r0 = get_group_id(0); int rb = get_group_id(1)*N_F16_F16; int im = get_group_id(2); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; global half * x = (global half *) (src0 + offset_src0); if (ne00 < 128) { for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global half * y = (global half *) (src1 + offset_src1); float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { sumf += (half) x[i] * (half) y[i]; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { global half4 * x4 = (global half4 *)x; for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global half * y = (global half *) (src1 + offset_src1); global half4 * y4 = (global half4 *) y; float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { sumf += (half) x4[i].s0 * y4[i].s0; sumf += (half) x4[i].s1 * y4[i].s1; sumf += (half) x4[i].s2 * y4[i].s2; sumf += (half) x4[i].s3 * y4[i].s3; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) { all_sum += (half) x[i] * y[i]; } dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_f16_f32.cl000066400000000000000000000070351512524704700237740ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define N_F16_F32 4 #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_f16_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int r0 = get_group_id(0); int rb = get_group_id(1)*N_F16_F32; int im = get_group_id(2); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; global half * x = (global half *) (src0 + offset_src0); if (ne00 < 128) { for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global float * y = (global float *) (src1 + offset_src1); float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { sumf += convert_float(x[i]) * y[i]; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { global half4 * x4 = (global half4 *)x; for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global float * y = (global float *) (src1 + offset_src1); global float4 * y4 = (global float4 *) y; float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { sumf += convert_float(x4[i].s0) * y4[i].s0; sumf += convert_float(x4[i].s1) * y4[i].s1; sumf += convert_float(x4[i].s2) * y4[i].s2; sumf += convert_float(x4[i].s3) * y4[i].s3; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) { all_sum += (float) x[i] * y[i]; } dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl000066400000000000000000000056141512524704700247450ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_f16_f32_1row( global char * src0, ulong offset0, global char * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global half * x = (global half *) (src0 + offset_src0); global float * y = (global float *) (src1 + offset_src1); float sumf = 0; if (ne00 < 128) { for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { sumf += (float) x[i] * (float) y[i]; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } else { global half4 * x4 = (global half4 *) x; global float4 * y4 = (global float4 *) y; for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { sumf += (float) x4[i].s0 * y4[i].s0; sumf += (float) x4[i].s1 * y4[i].s1; sumf += (float) x4[i].s2 * y4[i].s2; sumf += (float) x4[i].s3 * y4[i].s3; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) { all_sum += (float) x[i] * y[i]; } dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl000066400000000000000000000047271512524704700244000ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif // Assumes row size (ne00) is a multiple of 4 #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_f16_f32_l4( global char * src0, ulong offset0, global char * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int nrows = ne11; int r0 = get_group_id(0); int im = get_group_id(2); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; global half4 * x4 = (global half4 *) (src0 + offset_src0); for (int r1 = 0; r1 < nrows; ++r1) { ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global float4 * y4 = (global float4 *) (src1 + offset_src1); float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { sumf += convert_float(x4[i].s0) * y4[i].s0; sumf += convert_float(x4[i].s1) * y4[i].s1; sumf += convert_float(x4[i].s2) * y4[i].s2; sumf += convert_float(x4[i].s3) * y4[i].s3; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_f32_f32.cl000066400000000000000000000070061512524704700237700ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define N_F32_F32 4 #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_f32_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); int r0 = get_group_id(0); int rb = get_group_id(1)*N_F32_F32; int im = get_group_id(2); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; global float * x = (global float *) (src0 + offset_src0); if (ne00 < 128) { for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global float * y = (global float *) (src1 + offset_src1); float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { sumf += (float) x[i] * (float) y[i]; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { global float4 * x4 = (global float4 *)x; for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global float * y = (global float *) (src1 + offset_src1); global float4 * y4 = (global float4 *) y; float sumf = 0; for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { sumf += (float) x4[i].s0 * y4[i].s0; sumf += (float) x4[i].s1 * y4[i].s1; sumf += (float) x4[i].s2 * y4[i].s2; sumf += (float) x4[i].s3 * y4[i].s3; } float all_sum = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) { all_sum += (float) x[i] * y[i]; } dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl000066400000000000000000000131251512524704700251070ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK_MXFP4 32 typedef struct { uchar e; // E8M0 uchar qs[QK_MXFP4/2]; } block_mxfp4; constant static float kvalues_mxfp4_f[16] = { 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f }; static inline float e8m0_to_fp32(uchar x) { int bits; if (x == 0) { bits = 0x00400000; } else { bits = (uint) x << 23; } return as_float(bits); } #ifdef INTEL_GPU #define N_R0_MXFP4 2 // number of rows each subgroup works on #define N_SG_MXFP4 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_MXFP4 2 #define N_SG_MXFP4 2 #define N_SIMDWIDTH 64 #endif inline void mul_mv_mxfp4_f32( global char * src0, global char * src1, global char * dst, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3, local char * shmem ) { local float * shmem_f32 = (local float *) shmem; int nb = ne00/QK_MXFP4; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = 0; int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; uint i12 = im%ne12; uint i13 = im/ne12; ulong offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global block_mxfp4 * x = (global block_mxfp4 *) (src0 + offset_src0); global float * y = (global float *) (src1 + offset_src1); const short ix = get_sub_group_local_id()/2; // 0...15 const short it = get_sub_group_local_id()%2; // 0 or 1 shmem_f32[get_sub_group_local_id()] = kvalues_mxfp4_f[get_sub_group_local_id()%16]; barrier(CLK_LOCAL_MEM_FENCE); float4 yl[4]; float sumf[N_R0_MXFP4] = {0.f}; global float * yb = y + ix * QK_MXFP4 + it * 8; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { global float4 * y4 = (global float4 *)yb; yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; for (short row = 0; row < N_R0_MXFP4; row++) { global block_mxfp4 * xb = x + row*nb + ib; global uchar * q2 = (global uchar *)(xb->qs + 8*it); float4 acc1 = yl[0]*(float4)(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); float4 acc2 = yl[1]*(float4)(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); float4 acc3 = yl[2]*(float4)(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); float4 acc4 = yl[3]*(float4)(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); acc1 = (acc1 + acc3) + (acc2 + acc4); sumf[row] += e8m0_to_fp32(xb->e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); } yb += (N_SIMDWIDTH/2) * QK_MXFP4; } global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { float sum_all = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0) { dst_f32[first_row + row] = sum_all; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_id_mxfp4_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne11, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne20, int ne21, ulong nb21, int ne0, int ne1, int r2, int r3, local char * shmem ) { src0 = (global char *)((global char *)src0 + offset0); src1 = (global char *)((global char *)src1 + offset1); src2 = (global char *)((global char *)src2 + offset2); dst = (global char *)((global char *)dst + offsetd); const int iid1 = get_group_id(2)/ne20; const int idx = get_group_id(2)%ne20; int i02 = ((global int *) (src2 + iid1*nb21))[idx]; int i11 = idx % ne11; int i12 = iid1; int i1 = idx; int i2 = i12; global char * src0_cur = src0 + i02*nb02; global char * src1_cur = src1 + i11*nb11 + i12*nb12; global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float); mul_mv_mxfp4_f32(src0_cur, src1_cur, dst_cur, ne00, nb01, nb02, nb03, ne12, nb11, nb12, nb13, ne0, ne1, r2, r3, shmem); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl000066400000000000000000000132761512524704700261240ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK_MXFP4 32 static inline half4 mxfp4_to_fp16_packed(ushort fp4x4) { ushort2 fp16_packed_a, fp16_packed_b, bias_a, bias_b, sign_a, sign_b; fp16_packed_a.lo = (fp4x4 << 9) & 0x0E00; fp16_packed_a.hi = (fp4x4 << 5) & 0x0E00; fp16_packed_b.lo = (fp4x4 << 1) & 0x0E00; fp16_packed_b.hi = (fp4x4 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a.lo == 0) ? 0x0 : 0x3800; bias_a.hi = (fp16_packed_a.hi == 0) ? 0x0 : 0x3800; bias_b.lo = (fp16_packed_b.lo == 0) ? 0x0 : 0x3800; bias_b.hi = (fp16_packed_b.hi == 0) ? 0x0 : 0x3800; fp16_packed_a.lo = (fp16_packed_a.lo == 0x0200) ? 0x0 : fp16_packed_a.lo; fp16_packed_a.hi = (fp16_packed_a.hi == 0x0200) ? 0x0 : fp16_packed_a.hi; fp16_packed_b.lo = (fp16_packed_b.lo == 0x0200) ? 0x0 : fp16_packed_b.lo; fp16_packed_b.hi = (fp16_packed_b.hi == 0x0200) ? 0x0 : fp16_packed_b.hi; sign_a.lo = (fp4x4 << 12) & 0x8000; sign_a.hi = (fp4x4 << 8) & 0x8000; sign_b.lo = (fp4x4 << 4) & 0x8000; sign_b.hi = fp4x4 & 0x8000; fp16_packed_a = sign_a + bias_a + fp16_packed_a; fp16_packed_b = sign_b + bias_b + fp16_packed_b; return as_half4((ushort4)(fp16_packed_a, fp16_packed_b)); } static inline float e8m0_to_fp32(uchar x) { int bits; bits = (x == 0) ? 0x00400000 : ((uint) x << 23); return as_float(bits); } #ifdef INTEL_GPU #define N_R0_MXFP4 2 // number of rows each subgroup works on #define N_SG_MXFP4 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_MXFP4 4 #define N_SG_MXFP4 1 #define N_SIMDWIDTH 64 #define SRC0Q_IMG #endif kernel void kernel_mul_mv_id_mxfp4_f32_flat( #ifdef SRC0Q_IMG __read_only image1d_buffer_t src0_q, #else global uchar * src0_q, #endif global uchar * src0_e, global uchar * src1, ulong offset1, global uchar * src2, ulong offset2, global uchar * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne11, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne20, int ne21, ulong nb21, int ne0, int ne1, int r2, int r3 ) { dst = dst + offsetd; const int iid1 = get_group_id(2) / ne20; const int idx = get_group_id(2) % ne20; uint i02 = ((global uint *) (src2 + offset2 + iid1 * nb21))[idx]; int i11 = idx % ne11; int nb = ne00 / QK_MXFP4; uint src0_off = i02*nb02; src0_off /= 17; // 17 = sizeof(block_mxfp4) src0_e = src0_e + src0_off; dst = dst + (idx * ne0 + iid1 * ne1 * ne0) * sizeof(float); int r0 = get_group_id(0); int r1 = get_group_id(1); int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; uint offset_src0 = first_row*nb01; offset_src0 /= 17; // 17 = sizeof(block_mxfp4) #ifdef SRC0Q_IMG ulong offset_q = src0_off + offset_src0; #else src0_q = src0_q + src0_off*16; global uchar16 * x_q = (global uchar16 *)(src0_q) + offset_src0; #endif global uchar * x_e = src0_e + offset_src0; const short ix = get_sub_group_local_id() >> 1; const short it = get_sub_group_local_id() & 1; float sumf[N_R0_MXFP4] = {0.f}; src1 = src1 + offset1 + i11 * nb11 + iid1 * nb12; global float * y = (global float *) (src1 + r1 * nb11); global float * yb = y + ix * QK_MXFP4 + it * 8; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH / 2) { global float4 * y4 = (global float4 *)yb; #pragma unroll for (short row = 0; row < N_R0_MXFP4; row++) { uchar xb_e = x_e[row * nb + ib]; #ifdef SRC0Q_IMG ushort4 xb_q = as_ushort4(read_imageui(src0_q, (offset_q + row * nb + ib) * 2 + it).xy); #else ushort4 xb_q = vload4(0, (global ushort *)((global uchar *)(x_q + row * nb + ib) + 8 * it)); #endif half4 fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s0); half4 fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s1); float4 acc1 = y4[0] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2); acc1 += y4[4] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3); fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s2); fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s3); acc1 += y4[1] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2); acc1 += y4[5] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3); sumf[row] += e8m0_to_fp32(xb_e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); } yb += (N_SIMDWIDTH / 2) * QK_MXFP4; } global float * dst_f32 = (global float *)dst + (ulong)r1 * ne0; for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { float sum_all = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0) { dst_f32[first_row + row] = sum_all; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl000066400000000000000000000206731512524704700262470ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; // This function requires the original shuffled weights. // As a reminder, the original weights are shuffled so that (q[0], q[16]) are // packed together in a byte, so are (q[1], q[17]) and so on. inline float block_q_4_0_dot_y_flat( global uchar * x, global half * dh, float sumy, float16 yl, int il ) { float d = *dh; global ushort * qs = ((global ushort *)x + il/2); float acc = 0.f; acc += yl.s0 * (qs[0] & 0x000F); acc += yl.s1 * (qs[0] & 0x0F00); acc += yl.s8 * (qs[0] & 0x00F0); acc += yl.s9 * (qs[0] & 0xF000); acc += yl.s2 * (qs[1] & 0x000F); acc += yl.s3 * (qs[1] & 0x0F00); acc += yl.sa * (qs[1] & 0x00F0); acc += yl.sb * (qs[1] & 0xF000); acc += yl.s4 * (qs[2] & 0x000F); acc += yl.s5 * (qs[2] & 0x0F00); acc += yl.sc * (qs[2] & 0x00F0); acc += yl.sd * (qs[2] & 0xF000); acc += yl.s6 * (qs[3] & 0x000F); acc += yl.s7 * (qs[3] & 0x0F00); acc += yl.se * (qs[3] & 0x00F0); acc += yl.sf * (qs[3] & 0xF000); return d * (sumy * -8.f + acc); } // // This variant outputs 8 values. // #undef N_DST #undef N_SIMDGROUP #undef N_SIMDWIDTH #ifdef INTEL_GPU #define N_DST 8 // each SIMD group works on 8 rows #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_DST 8 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif inline void mul_vec_q_n_f32_8x_flat( global char * src0_q, global half * src0_d, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const ulong nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = 0; int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; // The number of scales is the same as the number of blocks. ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; global uchar * x = (global uchar *) src0_q + offset0_q; global half * d = (global half *) src0_d + offset0_d; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float16 yl; float8 sumf = 0.f; int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix*QK4_0 + il; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0.f; sumy += yb[0]; sumy += yb[1]; sumy += yb[2]; sumy += yb[3]; sumy += yb[4]; sumy += yb[5]; sumy += yb[6]; sumy += yb[7]; sumy += yb[16]; sumy += yb[17]; sumy += yb[18]; sumy += yb[19]; sumy += yb[20]; sumy += yb[21]; sumy += yb[22]; sumy += yb[23]; yl.s0 = yb[0]; yl.s1 = yb[1]/256.f; yl.s2 = yb[2]; yl.s3 = yb[3]/256.f; yl.s4 = yb[4]; yl.s5 = yb[5]/256.f; yl.s6 = yb[6]; yl.s7 = yb[7]/256.f; yl.s8 = yb[16]/16.f; yl.s9 = yb[17]/4096.f; yl.sa = yb[18]/16.f; yl.sb = yb[19]/4096.f; yl.sc = yb[20]/16.f; yl.sd = yb[21]/4096.f; yl.se = yb[22]/16.f; yl.sf = yb[23]/4096.f; sumf.s0 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); sumf.s1 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); sumf.s2 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); sumf.s3 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); sumf.s4 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); sumf.s5 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); sumf.s6 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); sumf.s7 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); yb += QK4_0 * (N_SIMDWIDTH/2); } float8 tot = (float8)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; } if (first_row + 4 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; } if (first_row + 5 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; } if (first_row + 6 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; } if (first_row + 7 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_id_q4_0_f32_8x_flat( global char * src0_q, global half * src0_d, global float * src1, ulong offset1, global char * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, ulong nb00, ulong nb02, int ne10, int ne11, int ne12, ulong nb11, ulong nb12, int ne20, int ne21, ulong nb21, int ne0, int ne1, int r2, int r3 ) { src1 = (global float *)((global char *)src1 + offset1); src2 = (global char *)((global char *)src2 + offset2); dst = (global float *)((global char *)dst + offsetd); const int iid1 = get_group_id(2)/ne20; const int idx = get_group_id(2)%ne20; const int i02 = ((global int *)(src2 + iid1*nb21))[idx]; const int i11 = idx%ne11; const int i12 = iid1; const int i1 = idx; const int i2 = i12; global char * src0_q_cur = src0_q + (i02*nb02/nb00)*(QK4_0/2); global half * src0_d_cur = src0_d + (i02*nb02/nb00); global float * src1_cur = (global float *)((global char *) src1 + i11*nb11 + i12*nb12); global float * dst_cur = dst + i1*ne0 + i2*ne1*ne0; mul_vec_q_n_f32_8x_flat(src0_q_cur, src0_d_cur, src1_cur, dst_cur, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32.cl000066400000000000000000000076701512524704700246300ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK8_0 32 typedef struct { half d; // delta char qs[QK8_0]; // quants } block_q8_0; #define NB_Q8_0 8 #ifdef INTEL_GPU #define N_R0_Q8_0 4 // number of rows each subgroup works on #define N_SG_Q8_0 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 #define N_SIMDWIDTH 64 #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_id_q8_0_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, int ne01, ulong nb01, ulong nb02, int ne11, int ne12, ulong nb11, ulong nb12, int ne20, int ne21, ulong nb21, int ne0, int ne1 ) { src0 = (global char *)((global char *)src0 + offset0); src1 = (global char *)((global char *)src1 + offset1); src2 = (global char *)((global char *)src2 + offset2); dst = (global char *)((global char *)dst + offsetd); int iid1 = get_group_id(2)/ne20; int idx = get_group_id(2)%ne20; int i02 = ((global int *) (src2 + iid1*nb21))[idx]; int i11_ = idx % ne11; int i12_ = iid1; int i1 = idx; int i2 = i12_; global char * src0_cur = src0 + i02*nb02; global char * src1_cur = src1 + i11_*nb11 + i12_*nb12; global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float); int nb = ne00/QK8_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0; ulong offset_src1 = r1*nb11; global float * y = (global float *) (src1_cur + offset_src1); // pointers to src0 rows global block_q8_0 * ax[N_R0_Q8_0]; for (int row = 0; row < N_R0_Q8_0; ++row) { ulong offset_src0 = (first_row + row)*nb01; ax[row] = (global block_q8_0 *) ((global char *) src0_cur + offset_src0); } float yl[NB_Q8_0]; float sumf[N_R0_Q8_0] = { 0.f }; const short ix = get_sub_group_local_id()/4; const short il = get_sub_group_local_id()%4; global float * yb = y + ix*QK8_0 + il*NB_Q8_0; // each thread handles NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) { for (short i = 0; i < NB_Q8_0; ++i) { yl[i] = yb[i]; } for (short row = 0; row < N_R0_Q8_0; row++) { global char * qs = ax[row][ib].qs + il*NB_Q8_0; float sumq = 0.f; for (short iq = 0; iq < NB_Q8_0; ++iq) { sumq += qs[iq] * yl[iq]; } sumf[row] += sumq*ax[row][ib].d; } yb += N_SIMDWIDTH*NB_Q8_0; } global float * dst_f32 = (global float *) dst_cur + (ulong)r1*ne0; for (int row = 0; row < N_R0_Q8_0; ++row) { float tot = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0 && first_row + row < ne01) { dst_f32[first_row + row] = tot; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32_flat.cl000066400000000000000000000147721512524704700256370ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK8_0 32 typedef struct { half d; // delta char qs[QK8_0]; // quants } block_q8_0; #define NB_Q8_0 8 #ifdef INTEL_GPU #define N_R0_Q8_0 4 // number of rows each subgroup works on #define N_SG_Q8_0 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 #define N_SIMDWIDTH 64 #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_id_q8_0_f32_flat( global char * src0_q, global half * src0_d, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, int ne01, ulong nb01, ulong nb02, int ne11, int ne12, ulong nb11, ulong nb12, int ne20, int ne21, ulong nb21, int ne0, int ne1 ) { src1 = (global char *)((global char *)src1 + offset1); src2 = (global char *)((global char *)src2 + offset2); dst = (global char *)((global char *)dst + offsetd); int iid1 = (int)get_group_id(2)/ne20; int idx = (int)get_group_id(2)%ne20; int i02 = ((global int *) (src2 + iid1*nb21))[idx]; int i11_ = idx % ne11; int i12_ = iid1; int i1 = idx; int i2 = i12_; // 34 == sizeof(block_q8_0) uint src0_off = i02*nb02; src0_off /= 34; global char * src0_q_cur = src0_q + src0_off*sizeof(char)*QK8_0; global half * src0_d_cur = src0_d + src0_off; global char * src1_cur = src1 + i11_*nb11 + i12_*nb12; global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float); int nb = ne00/QK8_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0; ulong offset_src1 = r1*nb11; global float * y = (global float *) (src1_cur + offset_src1); // pointers to src0 rows uint offset_src0_base = first_row*nb01; global char * ax0, * ax1, * ax2, * ax3; global half * ad0, * ad1, * ad2, * ad3; uint offset_src0; offset_src0 = offset_src0_base + 0*nb01; offset_src0 = offset_src0/34; ax0 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0); ad0 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 1*nb01; offset_src0 = offset_src0/34; ax1 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0); ad1 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 2*nb01; offset_src0 = offset_src0/34; ax2 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0); ad2 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 3*nb01; offset_src0 = offset_src0/34; ax3 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0); ad3 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half)); const short ix = get_sub_group_local_id()/4; const short il = get_sub_group_local_id()%4; global float * yb = y + ix*QK8_0 + il*NB_Q8_0; float8 yl; float8 qv; float4 sumf = 0.f; float sumq = 0.f; global char * qs; // each thread handles NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) { yl = vload8(0, yb); qs = ax0 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s0 += sumq*ad0[ib]; qs = ax1 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s1 += sumq*ad1[ib]; qs = ax2 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s2 += sumq*ad2[ib]; qs = ax3 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s3 += sumq*ad3[ib]; yb += N_SIMDWIDTH*NB_Q8_0; } global float * dst_f32 = (global float *) dst_cur + (ulong)r1*ne0; float4 tot = (float4)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst_f32[first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst_f32[first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst_f32[first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst_f32[first_row + 3] = tot.s3; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl000066400000000000000000000107261512524704700244370ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK_MXFP4 32 typedef struct { uchar e; // E8M0 uchar qs[QK_MXFP4/2]; } block_mxfp4; constant static float kvalues_mxfp4_f[16] = { 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f }; static inline float e8m0_to_fp32(uchar x) { int bits; if (x == 0) { bits = 0x00400000; } else { bits = (uint) x << 23; } return as_float(bits); } #ifdef INTEL_GPU #define N_R0_MXFP4 2 // number of rows each subgroup works on #define N_SG_MXFP4 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_MXFP4 2 #define N_SG_MXFP4 2 #define N_SIMDWIDTH 64 #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_mxfp4_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3, local char * shmem ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); local float * shmem_f32 = (local float *) shmem; int nb = ne00/QK_MXFP4; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; uint i12 = im%ne12; uint i13 = im/ne12; ulong offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; global block_mxfp4 * x = (global block_mxfp4 *) (src0 + offset_src0); global float * y = (global float *) (src1 + offset_src1); const short ix = get_sub_group_local_id()/2; // 0...15 const short it = get_sub_group_local_id()%2; // 0 or 1 shmem_f32[get_sub_group_local_id()] = kvalues_mxfp4_f[get_sub_group_local_id()%16]; barrier(CLK_LOCAL_MEM_FENCE); float4 yl[4]; float sumf[N_R0_MXFP4] = {0.f}; global float * yb = y + ix * QK_MXFP4 + it * 8; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { global float4 * y4 = (global float4 *)yb; yl[0] = y4[0]; yl[1] = y4[4]; yl[2] = y4[1]; yl[3] = y4[5]; for (short row = 0; row < N_R0_MXFP4; row++) { global block_mxfp4 * xb = x + row*nb + ib; global uchar * q2 = (global uchar *)(xb->qs + 8*it); float4 acc1 = yl[0]*(float4)(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); float4 acc2 = yl[1]*(float4)(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); float4 acc3 = yl[2]*(float4)(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); float4 acc4 = yl[3]*(float4)(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); acc1 = (acc1 + acc3) + (acc2 + acc4); sumf[row] += e8m0_to_fp32(xb->e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); } yb += (N_SIMDWIDTH/2) * QK_MXFP4; } global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { float sum_all = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0) { dst_f32[first_row + row] = sum_all; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl000066400000000000000000000124741512524704700254470ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK_MXFP4 32 static inline half4 mxfp4_to_fp16_packed(ushort fp4x4) { ushort2 fp16_packed_a, fp16_packed_b, bias_a, bias_b, sign_a, sign_b; fp16_packed_a.lo = (fp4x4 << 9) & 0x0E00; fp16_packed_a.hi = (fp4x4 << 5) & 0x0E00; fp16_packed_b.lo = (fp4x4 << 1) & 0x0E00; fp16_packed_b.hi = (fp4x4 >> 3) & 0x0E00; bias_a.lo = (fp16_packed_a.lo == 0) ? 0x0 : 0x3800; bias_a.hi = (fp16_packed_a.hi == 0) ? 0x0 : 0x3800; bias_b.lo = (fp16_packed_b.lo == 0) ? 0x0 : 0x3800; bias_b.hi = (fp16_packed_b.hi == 0) ? 0x0 : 0x3800; fp16_packed_a.lo = (fp16_packed_a.lo == 0x0200) ? 0x0 : fp16_packed_a.lo; fp16_packed_a.hi = (fp16_packed_a.hi == 0x0200) ? 0x0 : fp16_packed_a.hi; fp16_packed_b.lo = (fp16_packed_b.lo == 0x0200) ? 0x0 : fp16_packed_b.lo; fp16_packed_b.hi = (fp16_packed_b.hi == 0x0200) ? 0x0 : fp16_packed_b.hi; sign_a.lo = (fp4x4 << 12) & 0x8000; sign_a.hi = (fp4x4 << 8) & 0x8000; sign_b.lo = (fp4x4 << 4) & 0x8000; sign_b.hi = fp4x4 & 0x8000; fp16_packed_a = sign_a + bias_a + fp16_packed_a; fp16_packed_b = sign_b + bias_b + fp16_packed_b; return as_half4((ushort4)(fp16_packed_a, fp16_packed_b)); } static inline float e8m0_to_fp32(uchar x) { int bits; bits = (x == 0) ? 0x00400000 : ((uint) x << 23); return as_float(bits); } #ifdef INTEL_GPU #define N_R0_MXFP4 2 // number of rows each subgroup works on #define N_SG_MXFP4 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_MXFP4 2 #define N_SG_MXFP4 2 #define N_SIMDWIDTH 64 #define SRC0Q_IMG #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_mxfp4_f32_flat( #ifdef SRC0Q_IMG __read_only image1d_buffer_t src0_q, #else global uchar * src0_q, #endif global uchar * src0_e, global uchar * src1, ulong offset1, global uchar * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src1 = src1 + offset1; dst = dst + offsetd; int nb = ne00 / QK_MXFP4; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; uint i12 = im % ne12; uint i13 = im / ne12; uint offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; // 17 = sizeof(block_mxfp4) offset_src0 /= 17; #ifdef SRC0Q_IMG ulong offset_q = offset_src0; #else global uchar16 * x_q = (global uchar16 *)(src0_q) + offset_src0; #endif global uchar * x_e = src0_e + offset_src0; ulong offset_src1 = r1 * nb11 + i12 * nb12 + i13 * nb13; global float * y = (global float *)(src1 + offset_src1); const short ix = get_sub_group_local_id() >> 1; // 0...15 const short it = get_sub_group_local_id() & 1; // 0 or 1 float sumf[N_R0_MXFP4] = {0.f}; global float * yb = y + ix * QK_MXFP4 + it * 8; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { global float4 * y4 = (global float4 *)yb; #pragma unroll for (short row = 0; row < N_R0_MXFP4; row++) { uchar xb_e = x_e[row * nb + ib]; #ifdef SRC0Q_IMG ushort4 xb_q = as_ushort4(read_imageui(src0_q, (offset_q + row * nb + ib) * 2 + it).xy); #else ushort4 xb_q = vload4(0, (global ushort *)((global uchar *)(x_q + row * nb + ib) + 8 * it)); #endif half4 fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s0); half4 fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s1); float4 acc1 = y4[0] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2); acc1 += y4[4] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3); fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s2); fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s3); acc1 += y4[1] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2); acc1 += y4[5] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3); sumf[row] += e8m0_to_fp32(xb_e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); } yb += (N_SIMDWIDTH/2) * QK_MXFP4; } global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { float sum_all = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0) { dst_f32[first_row + row] = sum_all; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl000066400000000000000000000143571512524704700241500ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; //------------------------------------------------------------------------------ // mul_vec_q_n_f32 //------------------------------------------------------------------------------ // function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_4_0_dot_y( global struct block_q4_0 * qb_curr, float sumy, private float * yl, int il ) { float d = qb_curr->d; float2 acc = 0.f; global ushort * qs = ((global ushort *)qb_curr + 1 + il/2); for (int i = 0; i < 8; i+=2) { acc.s0 += yl[i + 0] * (qs[i / 2] & 0x000F) + yl[i + 1] * (qs[i / 2] & 0x0F00); acc.s1 += yl[i + 8] * (qs[i / 2] & 0x00F0) + yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (sumy * -8.f + acc.s0 + acc.s1); } #ifdef INTEL_GPU #define N_DST 4 // each SIMD group works on 4 rows #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // assuming SIMD group size is 16 #elif defined (ADRENO_GPU) #define N_DST 4 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif inline void mul_vec_q_n_f32( global void * src0, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const ulong nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); // (r0 * N_SIMDGROUP + get_sub_group_id()) is essenatially the linear global // id of a SIMD group in the grid. int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; ulong offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); global struct block_q4_0 * x = (global struct block_q4_0 *) src0 + offset0; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; // src1 vector cache float sumf[N_DST]={0.f}; int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix * QK4_0 + il; // each thread in a SIMD group deals with half a block. for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0; for (int i = 0; i < 8; i += 2) { sumy += yb[i] + yb[i+1]; yl[i+0] = yb[i+ 0]; yl[i+1] = yb[i+ 1]/256.f; sumy += yb[i+16] + yb[i+17]; yl[i+8] = yb[i+16]/16.f; yl[i+9] = yb[i+17]/4096.f; } for (int row = 0; row < N_DST; row++) { sumf[row] += block_q_4_0_dot_y(x+ib+row*nb, sumy, yl, il); } // One thread in a SIMD group (i.e., subgroup) handles a half block, // hence then entire SIMD group handles SIMDWIDTH/2 blocks. // y points to the activation matrix (of type float). Therefore for // one thread, the # of blocks y should advance is SIMDWIDTH/2 (because // SIMDWIDTH/2 blocks are processed by a SIMD group) - in terms of // floats, it is QK4_0 * (SIMDWIDTH/2), where QK4_0 is the block size. yb += QK4_0 * (N_SIMDWIDTH/2); } // The above does not work for Adreno - it produces incorrect results for // row = 1, 2, 3 and only row = 0 gives the correct result. // If N_DST is changed, the below array must be initialized accordingly. // This also seems to perform better on Intel. float tot[N_DST] = { sub_group_reduce_add(sumf[0]), sub_group_reduce_add(sumf[1]), sub_group_reduce_add(sumf[2]), sub_group_reduce_add(sumf[3])}; for (int row = 0; row < N_DST; ++row) { if (get_sub_group_local_id() == 0 && first_row + row < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot[row]; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_q4_0_f32( global void * src0, ulong offset0, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); mul_vec_q_n_f32(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl000066400000000000000000000242211512524704700262270ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; inline float mm_block_q_4_0_dot_y_flat( global uchar * x, global half * dh, float sumy, float16 yl, int il ) { float d = *dh; global ushort * qs = ((global ushort *)x + il/2); float acc = 0.f; acc += yl.s0 * (qs[0] & 0x000F); acc += yl.s1 * (qs[0] & 0x0F00); acc += yl.s8 * (qs[0] & 0x00F0); acc += yl.s9 * (qs[0] & 0xF000); acc += yl.s2 * (qs[1] & 0x000F); acc += yl.s3 * (qs[1] & 0x0F00); acc += yl.sa * (qs[1] & 0x00F0); acc += yl.sb * (qs[1] & 0xF000); acc += yl.s4 * (qs[2] & 0x000F); acc += yl.s5 * (qs[2] & 0x0F00); acc += yl.sc * (qs[2] & 0x00F0); acc += yl.sd * (qs[2] & 0xF000); acc += yl.s6 * (qs[3] & 0x000F); acc += yl.s7 * (qs[3] & 0x0F00); acc += yl.se * (qs[3] & 0x00F0); acc += yl.sf * (qs[3] & 0xF000); return d * (sumy * -8.f + acc); } #ifdef INTEL_GPU #define N_DST 16 // each SIMD group works on 8 rows (in weights matrix) #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // assuming SIMD group size is 16 #elif defined (ADRENO_GPU) #define N_DST 16 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif // // This variant performs 1d blocking with 16x output. // Eeach simdgroup outputs 16 values on `n0` dim (row in the output matrix). // inline void mul_mat_q_n_f32_1d_16x_flat( global uchar * src0_q, global half * src0_d, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const int nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of // a SIMD group in the grid. Each SIMD group produces N_DST values in the // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. // Currently with llama2 7B, im is always 0. // TODO: how to handle im/gqa*(nb*ne0)? int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; // The number of scales is the same as the number of blocks. ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; global uchar * x = (global uchar *) src0_q + offset0_q; global half * d = (global half *) src0_d + offset0_d; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float16 yl; float16 sumf = (float16)(0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f); int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix*QK4_0 + il; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0.f; sumy += yb[0]; sumy += yb[1]; sumy += yb[2]; sumy += yb[3]; sumy += yb[4]; sumy += yb[5]; sumy += yb[6]; sumy += yb[7]; sumy += yb[16]; sumy += yb[17]; sumy += yb[18]; sumy += yb[19]; sumy += yb[20]; sumy += yb[21]; sumy += yb[22]; sumy += yb[23]; yl.s0 = yb[0]; yl.s1 = yb[1]/256.f; yl.s2 = yb[2]; yl.s3 = yb[3]/256.f; yl.s4 = yb[4]; yl.s5 = yb[5]/256.f; yl.s6 = yb[6]; yl.s7 = yb[7]/256.f; yl.s8 = yb[16]/16.f; yl.s9 = yb[17]/4096.f; yl.sa = yb[18]/16.f; yl.sb = yb[19]/4096.f; yl.sc = yb[20]/16.f; yl.sd = yb[21]/4096.f; yl.se = yb[22]/16.f; yl.sf = yb[23]/4096.f; sumf.s0 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); sumf.s1 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); sumf.s2 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); sumf.s3 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); sumf.s4 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); sumf.s5 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); sumf.s6 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); sumf.s7 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); sumf.s8 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 8*nb*QK4_0/2, d + ib + 8*nb, sumy, yl, il); sumf.s9 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 9*nb*QK4_0/2, d + ib + 9*nb, sumy, yl, il); sumf.sa += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 10*nb*QK4_0/2, d + ib + 10*nb, sumy, yl, il); sumf.sb += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 11*nb*QK4_0/2, d + ib + 11*nb, sumy, yl, il); sumf.sc += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 12*nb*QK4_0/2, d + ib + 12*nb, sumy, yl, il); sumf.sd += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 13*nb*QK4_0/2, d + ib + 13*nb, sumy, yl, il); sumf.se += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 14*nb*QK4_0/2, d + ib + 14*nb, sumy, yl, il); sumf.sf += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 15*nb*QK4_0/2, d + ib + 15*nb, sumy, yl, il); yb += QK4_0 * (N_SIMDWIDTH/2); } float16 tot = (float16)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7), sub_group_reduce_add(sumf.s8), sub_group_reduce_add(sumf.s9), sub_group_reduce_add(sumf.sa), sub_group_reduce_add(sumf.sb), sub_group_reduce_add(sumf.sc), sub_group_reduce_add(sumf.sd), sub_group_reduce_add(sumf.se), sub_group_reduce_add(sumf.sf) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; } if (first_row + 4 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; } if (first_row + 5 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; } if (first_row + 6 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; } if (first_row + 7 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; } if (first_row + 8 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 8] = tot.s8; } if (first_row + 9 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 9] = tot.s9; } if (first_row + 10 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 10] = tot.sa; } if (first_row + 11 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 11] = tot.sb; } if (first_row + 12 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 12] = tot.sc; } if (first_row + 13 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 13] = tot.sd; } if (first_row + 14 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 14] = tot.se; } if (first_row + 15 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 15] = tot.sf; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_q4_0_f32_1d_16x_flat( global uchar * src0_q, global half * src0_d, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); mul_mat_q_n_f32_1d_16x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl000066400000000000000000000201001512524704700261400ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; inline float mm_block_q_4_0_dot_y_flat( global uchar * x, global half * dh, float sumy, float16 yl, int il ) { float d = *dh; global ushort * qs = ((global ushort *)x + il/2); float acc = 0.f; acc += yl.s0 * (qs[0] & 0x000F); acc += yl.s1 * (qs[0] & 0x0F00); acc += yl.s8 * (qs[0] & 0x00F0); acc += yl.s9 * (qs[0] & 0xF000); acc += yl.s2 * (qs[1] & 0x000F); acc += yl.s3 * (qs[1] & 0x0F00); acc += yl.sa * (qs[1] & 0x00F0); acc += yl.sb * (qs[1] & 0xF000); acc += yl.s4 * (qs[2] & 0x000F); acc += yl.s5 * (qs[2] & 0x0F00); acc += yl.sc * (qs[2] & 0x00F0); acc += yl.sd * (qs[2] & 0xF000); acc += yl.s6 * (qs[3] & 0x000F); acc += yl.s7 * (qs[3] & 0x0F00); acc += yl.se * (qs[3] & 0x00F0); acc += yl.sf * (qs[3] & 0xF000); return d * (sumy * -8.f + acc); } #ifdef INTEL_GPU #define N_DST 8 // each SIMD group works on 8 rows (in weights matrix) #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // assuming SIMD group size is 16 #elif defined (ADRENO_GPU) #define N_DST 8 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif // // This variant performs 1d blocking with 8x output. // Eeach simdgroup outputs 8 values on `n0` dim (row in the output matrix). // inline void mul_mat_q_n_f32_1d_8x_flat( global uchar * src0_q, global half * src0_d, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const int nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of // a SIMD group in the grid. Each SIMD group produces N_DST values in the // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. // Currently with llama2 7B, im is always 0. // TODO: how to handle im/gqa*(nb*ne0)? int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; // The number of scales is the same as the number of blocks. ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; global uchar * x = (global uchar *) src0_q + offset0_q; global half * d = (global half *) src0_d + offset0_d; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float16 yl; float8 sumf = (float8)(0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f); int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix*QK4_0 + il; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0.f; sumy += yb[0]; sumy += yb[1]; sumy += yb[2]; sumy += yb[3]; sumy += yb[4]; sumy += yb[5]; sumy += yb[6]; sumy += yb[7]; sumy += yb[16]; sumy += yb[17]; sumy += yb[18]; sumy += yb[19]; sumy += yb[20]; sumy += yb[21]; sumy += yb[22]; sumy += yb[23]; yl.s0 = yb[0]; yl.s1 = yb[1]/256.f; yl.s2 = yb[2]; yl.s3 = yb[3]/256.f; yl.s4 = yb[4]; yl.s5 = yb[5]/256.f; yl.s6 = yb[6]; yl.s7 = yb[7]/256.f; yl.s8 = yb[16]/16.f; yl.s9 = yb[17]/4096.f; yl.sa = yb[18]/16.f; yl.sb = yb[19]/4096.f; yl.sc = yb[20]/16.f; yl.sd = yb[21]/4096.f; yl.se = yb[22]/16.f; yl.sf = yb[23]/4096.f; sumf.s0 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); sumf.s1 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); sumf.s2 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); sumf.s3 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); sumf.s4 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); sumf.s5 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); sumf.s6 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); sumf.s7 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); yb += QK4_0 * (N_SIMDWIDTH/2); } float8 tot = (float8)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; } if (first_row + 4 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; } if (first_row + 5 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; } if (first_row + 6 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; } if (first_row + 7 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_q4_0_f32_1d_8x_flat( global uchar * src0_q, global half * src0_d, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); mul_mat_q_n_f32_1d_8x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl000066400000000000000000000201701512524704700255630ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; // This function requires the original shuffled weights. // As a reminder, the original weights are shuffled so that (q[0], q[16]) are // packed together in a byte, so are (q[1], q[17]) and so on. inline float block_q_4_0_dot_y_flat( global uchar * x, global half * dh, float sumy, float16 yl, int il ) { float d = *dh; global ushort * qs = ((global ushort *)x + il/2); float acc = 0.f; acc += yl.s0 * (qs[0] & 0x000F); acc += yl.s1 * (qs[0] & 0x0F00); acc += yl.s8 * (qs[0] & 0x00F0); acc += yl.s9 * (qs[0] & 0xF000); acc += yl.s2 * (qs[1] & 0x000F); acc += yl.s3 * (qs[1] & 0x0F00); acc += yl.sa * (qs[1] & 0x00F0); acc += yl.sb * (qs[1] & 0xF000); acc += yl.s4 * (qs[2] & 0x000F); acc += yl.s5 * (qs[2] & 0x0F00); acc += yl.sc * (qs[2] & 0x00F0); acc += yl.sd * (qs[2] & 0xF000); acc += yl.s6 * (qs[3] & 0x000F); acc += yl.s7 * (qs[3] & 0x0F00); acc += yl.se * (qs[3] & 0x00F0); acc += yl.sf * (qs[3] & 0xF000); return d * (sumy * -8.f + acc); } // // This variant outputs 8 values. // #undef N_DST #undef N_SIMDGROUP #undef N_SIMDWIDTH #ifdef INTEL_GPU #define N_DST 8 // each SIMD group works on 8 rows #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // assuming SIMD group size is 32 #elif defined (ADRENO_GPU) #define N_DST 8 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif inline void mul_vec_q_n_f32_8x_flat( global uchar * src0_q, global half * src0_d, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const ulong nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of // a SIMD group in the grid. Each SIMD group produces N_DST values in the // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. // Currently with llama2 7B, im is always 0. // TODO: how to handle im/gqa*(nb*ne0)? int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; // The number of scales is the same as the number of blocks. ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; global uchar * x = (global uchar *) src0_q + offset0_q; global half * d = (global half *) src0_d + offset0_d; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float16 yl; float8 sumf = 0.f; int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix*QK4_0 + il; for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0.f; sumy += yb[0]; sumy += yb[1]; sumy += yb[2]; sumy += yb[3]; sumy += yb[4]; sumy += yb[5]; sumy += yb[6]; sumy += yb[7]; sumy += yb[16]; sumy += yb[17]; sumy += yb[18]; sumy += yb[19]; sumy += yb[20]; sumy += yb[21]; sumy += yb[22]; sumy += yb[23]; yl.s0 = yb[0]; yl.s1 = yb[1]/256.f; yl.s2 = yb[2]; yl.s3 = yb[3]/256.f; yl.s4 = yb[4]; yl.s5 = yb[5]/256.f; yl.s6 = yb[6]; yl.s7 = yb[7]/256.f; yl.s8 = yb[16]/16.f; yl.s9 = yb[17]/4096.f; yl.sa = yb[18]/16.f; yl.sb = yb[19]/4096.f; yl.sc = yb[20]/16.f; yl.sd = yb[21]/4096.f; yl.se = yb[22]/16.f; yl.sf = yb[23]/4096.f; sumf.s0 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); sumf.s1 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); sumf.s2 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); sumf.s3 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); sumf.s4 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); sumf.s5 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); sumf.s6 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); sumf.s7 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); yb += QK4_0 * (N_SIMDWIDTH/2); } float8 tot = (float8)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; } if (first_row + 4 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; } if (first_row + 5 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; } if (first_row + 6 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; } if (first_row + 7 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_q4_0_f32_8x_flat( global uchar * src0_q, global half * src0_d, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); mul_vec_q_n_f32_8x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl000066400000000000000000000163761512524704700245000ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q4_0 //------------------------------------------------------------------------------ struct block_q4_0 { half d; uint8_t qs[QK4_0 / 2]; }; // // This variant unrolls the loops and uses vector types instead of pointers. // It improves performance on Adreno but not so much on Intel. // inline float block_q_4_0_dot_y_v( global struct block_q4_0 * qb_curr, float sumy, float16 yl, int il ) { float d = qb_curr->d; float acc = 0.f; global ushort * qs = ((global ushort *)qb_curr + 1 + il/2); acc += yl.s0 * (qs[0] & 0x000F); acc += yl.s1 * (qs[0] & 0x0F00); acc += yl.s8 * (qs[0] & 0x00F0); acc += yl.s9 * (qs[0] & 0xF000); acc += yl.s2 * (qs[1] & 0x000F); acc += yl.s3 * (qs[1] & 0x0F00); acc += yl.sa * (qs[1] & 0x00F0); acc += yl.sb * (qs[1] & 0xF000); acc += yl.s4 * (qs[2] & 0x000F); acc += yl.s5 * (qs[2] & 0x0F00); acc += yl.sc * (qs[2] & 0x00F0); acc += yl.sd * (qs[2] & 0xF000); acc += yl.s6 * (qs[3] & 0x000F); acc += yl.s7 * (qs[3] & 0x0F00); acc += yl.se * (qs[3] & 0x00F0); acc += yl.sf * (qs[3] & 0xF000); return d * (sumy * -8.f + acc); } #undef N_DST #undef N_SIMDGROUP #undef N_SIMDWIDTH #ifdef INTEL_GPU #define N_DST 4 // each SIMD group works on 4 rows #define N_SIMDGROUP 1 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // assuming SIMD group size is 16 #elif defined (ADRENO_GPU) #define N_DST 4 #define N_SIMDGROUP 1 #define N_SIMDWIDTH 64 #endif inline void mul_vec_q_n_f32_v( global void * src0, global float * src1, global float * dst, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { const ulong nb = ne00/QK4_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); // (r0 * N_SIMDGROUP + get_sub_group_id()) is essenatially the linear global // id of a SIMD group in the grid. int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; int i12 = im%ne12; int i13 = im/ne12; ulong offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); global struct block_q4_0 * x = (global struct block_q4_0 *) src0 + offset0; global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; float16 yl; // src1 vector cache float4 sumf = (float4)(0.f, 0.f, 0.f, 0.f); int ix = get_sub_group_local_id()/2; int il = 8*(get_sub_group_local_id()%2); global float * yb = y + ix * QK4_0 + il; // each thread in a SIMD group deals with half a block. for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { float sumy = 0; sumy += yb[0]; sumy += yb[1]; sumy += yb[2]; sumy += yb[3]; sumy += yb[4]; sumy += yb[5]; sumy += yb[6]; sumy += yb[7]; sumy += yb[16]; sumy += yb[17]; sumy += yb[18]; sumy += yb[19]; sumy += yb[20]; sumy += yb[21]; sumy += yb[22]; sumy += yb[23]; yl.s0 = yb[0]; yl.s1 = yb[1]/256.f; yl.s2 = yb[2]; yl.s3 = yb[3]/256.f; yl.s4 = yb[4]; yl.s5 = yb[5]/256.f; yl.s6 = yb[6]; yl.s7 = yb[7]/256.f; yl.s8 = yb[16]/16.f; yl.s9 = yb[17]/4096.f; yl.sa = yb[18]/16.f; yl.sb = yb[19]/4096.f; yl.sc = yb[20]/16.f; yl.sd = yb[21]/4096.f; yl.se = yb[22]/16.f; yl.sf = yb[23]/4096.f; sumf.s0 += block_q_4_0_dot_y_v(x+ib+0*nb, sumy, yl, il); sumf.s1 += block_q_4_0_dot_y_v(x+ib+1*nb, sumy, yl, il); sumf.s2 += block_q_4_0_dot_y_v(x+ib+2*nb, sumy, yl, il); sumf.s3 += block_q_4_0_dot_y_v(x+ib+3*nb, sumy, yl, il); // One thread in a SIMD group (i.e., subgroup) handles a half block, // hence then entire SIMD group handles SIMDWIDTH/2 blocks. // y points to the activation matrix (of type float). Therefore for // one thread, the # of blocks y should advance is SIMDWIDTH/2 (because // SIMDWIDTH/2 blocks are processed by a SIMD group) - in terms of // floats, it is QK4_0 * (SIMDWIDTH/2), where QK4_0 is the block size. yb += QK4_0 * (N_SIMDWIDTH/2); } // The above does not work for Adreno - it produces incorrect results for // row = 1, 2, 3 and only row = 0 gives the correct result. // If N_DST is changed, the below array must be initialized accordingly. // This also seems to perform better on Intel. float4 tot = (float4)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; } } } #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mat_q4_0_f32_v( global void * src0, ulong offset0, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); mul_vec_q_n_f32_v(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q6_k.cl000066400000000000000000000162011512524704700235610ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK4_0 32 #define QR4_0 2 #define QK4_1 32 #define QR4_1 2 #define QK5_0 32 #define QR5_0 2 #define QK5_1 32 #define QR5_1 2 #define QK8_0 32 #define QR8_0 1 #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 typedef char int8_t; typedef uchar uint8_t; typedef short int16_t; typedef ushort uint16_t; typedef int int32_t; typedef uint uint32_t; //------------------------------------------------------------------------------ // block_q6_K //------------------------------------------------------------------------------ // 6-bit quantization // weight is represented as x = a * q // 16 blocks of 16 elements each // Effectively 6.5625 bits per weight typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales, quantized with 8 bits half d; // super-block scale } block_q6_K; //------------------------------------------------------------------------------ // kernel_mul_mv_q6_K_f32 //------------------------------------------------------------------------------ #undef N_DST #undef N_SIMDGROUP #undef N_SIMDWIDTH #ifdef INTEL_GPU #define N_DST 1 // number of rows each SIMD group works on #define N_SIMDGROUP 2 // number of SIMD groups in a thread group #define N_SIMDWIDTH 16 // SIMD group size #elif defined (ADRENO_GPU) #define N_DST 1 #define N_SIMDGROUP 2 #define N_SIMDWIDTH 64 #endif #define BLOCK_STRIDE (N_SIMDWIDTH/16) // number of blocks each subgroup processes #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_q6_K_f32( global void * src0, ulong offset0, global float * src1, ulong offset1, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne10, int ne12, int ne0, int ne1, int r2, int r3 ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global float*)((global char*)src1 + offset1); dst = (global float*)((global char*)dst + offsetd); uchar kmask1 = 0x03; uchar kmask2 = 0x0C; uchar kmask3 = 0x30; uchar kmask4 = 0xC0; int nb = ne00/QK_K; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int row = N_SIMDGROUP * r0 + get_sub_group_id(); int i12 = im%ne12; int i13 = im/ne12; ulong offset_src0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); global block_q6_K * x = (global block_q6_K *) src0 + row*nb + offset_src0; global float * yy = (global float *) src1 + r1*ne10 + im*ne00*ne1; float sumf = 0; // For Q6_K quantization, 16 values forms a subblock, 16 subblock forms a // block. Values in a subblock shares a scale that is quantized with 8 bits; // the entire block shares a single floating point scale. // For work distribution, each thread processes a subblock (16 weights), hence // 16 threads process a (super) block -- a subgroup thus handles SIMDWIDTH/16 // (super) blocks -- this is the block stride. // The 16 threads that process a (super) block are split into 2 portions, each has // 8 threads; each portion works on 8 subblocks. // For subgroup of 16 threads, the entire subgroup works on a single (super) block // before moving to the next (super) block. Thread0 - thread7 work on the // first 8 subblocks; thread8 - thread15 works on the last 8 subblocks. // Thread0 - thread3 work on subblocks 0, 2, 4, 6; thread4 - thread7 work on // subblocks 1, 3, 5, 7. Each thread does not work on an entire subblock, but // works on a total of 16 weight values. int tid = get_sub_group_local_id()/BLOCK_STRIDE; // first block_stride groups have tid=0 int ix = get_sub_group_local_id()%BLOCK_STRIDE; // first block is 0..block_stride-1 int ip = tid/8; // first or second half of (super) block (0 or 1) int il = tid%8; // each half has 8 parts, one per scale int n = 4; // 4 scales at a time (and 4 sums) int l0 = n*il; // offset into half-block, 0..28 int is = 8*ip + l0/16; // 0, 1, 8, 9 int y_offset = 128*ip + l0; int q_offset_l = 64*ip + l0; int q_offset_h = 32*ip + l0; for (int i = ix; i < nb; i += BLOCK_STRIDE) { global uint8_t * q1 = x[i].ql + q_offset_l; global uint8_t * q2 = q1 + QK_K/8; global uint8_t * qh = x[i].qh + q_offset_h; global int8_t * sc = x[i].scales + is; global float * y = yy + i * QK_K + y_offset; float dall = x[i].d; float4 sums = {0.f, 0.f, 0.f, 0.f}; sums.s0 += y[0+ 0] * ((float)((q1[0] & 0xF) | ((qh[0] & kmask1) << 4)) - 32.f); sums.s1 += y[0+32] * ((float)((q2[0] & 0xF) | ((qh[0] & kmask2) << 2)) - 32.f); sums.s2 += y[0+64] * ((float)((q1[0] >> 4) | ((qh[0] & kmask3) << 0)) - 32.f); sums.s3 += y[0+96] * ((float)((q2[0] >> 4) | ((qh[0] & kmask4) >> 2)) - 32.f); sums.s0 += y[1+ 0] * ((float)((q1[1] & 0xF) | ((qh[1] & kmask1) << 4)) - 32.f); sums.s1 += y[1+32] * ((float)((q2[1] & 0xF) | ((qh[1] & kmask2) << 2)) - 32.f); sums.s2 += y[1+64] * ((float)((q1[1] >> 4) | ((qh[1] & kmask3) << 0)) - 32.f); sums.s3 += y[1+96] * ((float)((q2[1] >> 4) | ((qh[1] & kmask4) >> 2)) - 32.f); sums.s0 += y[2+ 0] * ((float)((q1[2] & 0xF) | ((qh[2] & kmask1) << 4)) - 32.f); sums.s1 += y[2+32] * ((float)((q2[2] & 0xF) | ((qh[2] & kmask2) << 2)) - 32.f); sums.s2 += y[2+64] * ((float)((q1[2] >> 4) | ((qh[2] & kmask3) << 0)) - 32.f); sums.s3 += y[2+96] * ((float)((q2[2] >> 4) | ((qh[2] & kmask4) >> 2)) - 32.f); sums.s0 += y[3+ 0] * ((float)((q1[3] & 0xF) | ((qh[3] & kmask1) << 4)) - 32.f); sums.s1 += y[3+32] * ((float)((q2[3] & 0xF) | ((qh[3] & kmask2) << 2)) - 32.f); sums.s2 += y[3+64] * ((float)((q1[3] >> 4) | ((qh[3] & kmask3) << 0)) - 32.f); sums.s3 += y[3+96] * ((float)((q2[3] >> 4) | ((qh[3] & kmask4) >> 2)) - 32.f); sumf += dall * (sums.s0 * sc[0] + sums.s1 * sc[2] + sums.s2 * sc[4] + sums.s3 * sc[6]); } float tot = sub_group_reduce_add(sumf); if (get_sub_group_local_id() == 0) { dst[r1*ne0 + im*ne0*ne1 + row] = tot; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q8_0_f32.cl000066400000000000000000000070751512524704700241530ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK8_0 32 typedef struct { half d; // delta char qs[QK8_0]; // quants } block_q8_0; #define NB_Q8_0 8 #ifdef INTEL_GPU #define N_R0_Q8_0 4 // number of rows each subgroup works on #define N_SG_Q8_0 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 #define N_SIMDWIDTH 64 #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_q8_0_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, ulong nb01, ulong nb02, ulong nb03, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src0 = (global char*)((global char*)src0 + offset0); src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); int nb = ne00/QK8_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0; uint i12 = im%ne12; uint i13 = im/ne12; ulong offset_src1 = r1*nb11 + i12*nb12 + i13*nb13; global float * y = (global float *) (src1 + offset_src1); // pointers to src0 rows global block_q8_0 * ax[N_R0_Q8_0]; for (int row = 0; row < N_R0_Q8_0; ++row) { ulong offset_src0 = (first_row + row)*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; ax[row] = (global block_q8_0 *) ((global char *) src0 + offset_src0); } float yl[NB_Q8_0]; float sumf[N_R0_Q8_0] = { 0.f }; const short ix = get_sub_group_local_id()/4; const short il = get_sub_group_local_id()%4; global float * yb = y + ix*QK8_0 + il*NB_Q8_0; // each thread handles NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) { for (short i = 0; i < NB_Q8_0; ++i) { yl[i] = yb[i]; } for (short row = 0; row < N_R0_Q8_0; row++) { global char * qs = ax[row][ib].qs + il*NB_Q8_0; float sumq = 0.f; for (short iq = 0; iq < NB_Q8_0; ++iq) { sumq += qs[iq] * yl[iq]; } sumf[row] += sumq*ax[row][ib].d; } yb += N_SIMDWIDTH*NB_Q8_0; } global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; for (int row = 0; row < N_R0_Q8_0; ++row) { float tot = sub_group_reduce_add(sumf[row]); if (get_sub_group_local_id() == 0 && first_row + row < ne01) { dst_f32[first_row + row] = tot; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/mul_mv_q8_0_f32_flat.cl000066400000000000000000000136751512524704700251640ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #define QK8_0 32 typedef struct { half d; // delta char qs[QK8_0]; // quants } block_q8_0; #define NB_Q8_0 8 #ifdef INTEL_GPU #define N_R0_Q8_0 4 // number of rows each subgroup works on #define N_SG_Q8_0 2 // number of subgroups in a work group #define N_SIMDWIDTH 16 // subgroup size #elif defined (ADRENO_GPU) #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 #define N_SIMDWIDTH 64 #endif #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_16 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_mul_mv_q8_0_f32_flat( global char * src0_q, global half * src0_d, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, ulong nb01, ulong nb02, ulong nb03, int ne12, ulong nb11, ulong nb12, ulong nb13, int ne0, int ne1, int r2, int r3 ) { src1 = (global char*)((global char*)src1 + offset1); dst = (global char*)((global char*)dst + offsetd); int nb = ne00/QK8_0; int r0 = get_group_id(0); int r1 = get_group_id(1); int im = get_group_id(2); int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0; uint i12 = im%ne12; uint i13 = im/ne12; ulong offset_src1 = r1*nb11 + i12*nb12 + i13*nb13; global float * y = (global float *) (src1 + offset_src1); // pointers to src0 rows uint offset_src0_base = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; global char * ax0, * ax1, * ax2, * ax3; global half * ad0, * ad1, * ad2, * ad3; uint offset_src0; offset_src0 = offset_src0_base + 0*nb01; offset_src0 = offset_src0/34; ax0 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0); ad0 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 1*nb01; offset_src0 = offset_src0/34; ax1 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0); ad1 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 2*nb01; offset_src0 = offset_src0/34; ax2 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0); ad2 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half)); offset_src0 = offset_src0_base + 3*nb01; offset_src0 = offset_src0/34; ax3 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0); ad3 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half)); const short ix = get_sub_group_local_id()/4; const short il = get_sub_group_local_id()%4; global float * yb = y + ix*QK8_0 + il*NB_Q8_0; float8 yl; float8 qv; float4 sumf = 0.f; float sumq = 0.f; global char * qs; // each thread handles NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) { yl = vload8(0, yb); qs = ax0 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s0 += sumq*ad0[ib]; qs = ax1 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s1 += sumq*ad1[ib]; qs = ax2 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s2 += sumq*ad2[ib]; qs = ax3 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0; qv = convert_float8(vload8(0, qs)); sumq = 0; sumq += qv.s0*yl.s0; sumq += qv.s1*yl.s1; sumq += qv.s2*yl.s2; sumq += qv.s3*yl.s3; sumq += qv.s4*yl.s4; sumq += qv.s5*yl.s5; sumq += qv.s6*yl.s6; sumq += qv.s7*yl.s7; sumf.s3 += sumq*ad3[ib]; yb += N_SIMDWIDTH*NB_Q8_0; } global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; float4 tot = (float4)( sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) ); if (get_sub_group_local_id() == 0) { if (first_row + 0 < ne01) { dst_f32[first_row + 0] = tot.s0; } if (first_row + 1 < ne01) { dst_f32[first_row + 1] = tot.s1; } if (first_row + 2 < ne01) { dst_f32[first_row + 2] = tot.s2; } if (first_row + 3 < ne01) { dst_f32[first_row + 3] = tot.s3; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/norm.cl000066400000000000000000000127411512524704700223220ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif //------------------------------------------------------------------------------ // norm //------------------------------------------------------------------------------ kernel void kernel_norm( global void * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, float eps, local float * sum ) { src0 = (global void*)((global char*)src0 + offset0); dst = (global void*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); global float * x = (global float *) ((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01); // MEAN // parallel sum sum[get_local_id(0)] = 0.0f; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { sum[get_local_id(0)] += x[i00]; } // reduce barrier(CLK_LOCAL_MEM_FENCE); for (uint i = get_local_size(0)/2; i > 0; i /= 2) { if (get_local_id(0) < i) { sum[get_local_id(0)] += sum[get_local_id(0) + i]; } barrier(CLK_LOCAL_MEM_FENCE); } float mean = sum[0] / ne00; // recenter and VARIANCE barrier(CLK_LOCAL_MEM_FENCE); global float * y = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; sum[get_local_id(0)] = 0.0f; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { y[i00] = x[i00] - mean; sum[get_local_id(0)] += y[i00] * y[i00]; } // reduce barrier(CLK_LOCAL_MEM_FENCE); for (uint i = get_local_size(0)/2; i > 0; i /= 2) { if (get_local_id(0) < i) { sum[get_local_id(0)] += sum[get_local_id(0) + i]; } barrier(CLK_LOCAL_MEM_FENCE); } float variance = sum[0] / ne00; float scale = 1.0f/sqrt(variance + eps); for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { y[i00] = y[i00] * scale; } } //------------------------------------------------------------------------------ // norm_mul_add //------------------------------------------------------------------------------ #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_32 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_norm_mul_add( global char * src0_ptr, ulong src0_offset, global char * src1_ptr, ulong src1_offset, global char * src2_ptr, ulong src2_offset, global char * dst_ptr, ulong dst_offset, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, int ne20, int ne21, int ne22, int ne23, ulong nb21, ulong nb22, ulong nb23, ulong nbd1, ulong nbd2, ulong nbd3, float eps, local float2 * sums ) { const int i03 = get_group_id(2); const int i02 = get_group_id(1); const int i01 = get_group_id(0); global float4 * x = (global float4 *)(src0_ptr + src0_offset + i01*nb01 + i02*nb02 + i03*nb03); global float4 * w = (global float4 *)(src1_ptr + src1_offset + (i01%ne11)*nb11 + (i02%ne12)*nb12 + (i03%ne13)*nb13); global float4 * b = (global float4 *)(src2_ptr + src2_offset + (i01%ne21)*nb21 + (i02%ne22)*nb22 + (i03%ne23)*nb23); global float4 * y = (global float4 *)(dst_ptr + dst_offset + i01*nbd1 + i02*nbd2 + i03*nbd3); float p_sum = 0.0f; float p_sum_sq = 0.0f; const int n_chunks = ne00 / 4; for (int i00 = get_local_id(0); i00 < n_chunks; i00 += get_local_size(0)) { float4 val = x[i00]; p_sum += val.x + val.y + val.z + val.w; p_sum_sq += dot(val, val); } p_sum = sub_group_reduce_add(p_sum); p_sum_sq = sub_group_reduce_add(p_sum_sq); if (get_sub_group_local_id() == 0) { sums[get_sub_group_id()] = (float2)(p_sum, p_sum_sq); } barrier(CLK_LOCAL_MEM_FENCE); if (get_local_id(0) == 0) { float sum = 0.0f; float sum_sq = 0.0f; for (uint i = 0; i < get_num_sub_groups(); ++i) { float2 s = sums[i]; sum += s.x; sum_sq += s.y; } const float inv_ne00 = 1.0f / (float)ne00; const float mean = sum * inv_ne00; const float variance = mad(-mean, mean, sum_sq * inv_ne00); sums[0] = (float2)(mean, rsqrt(variance + eps)); } barrier(CLK_LOCAL_MEM_FENCE); const float2 mean_scale = sums[0]; const float mean = mean_scale.x; const float scale = mean_scale.y; const float neg_mean_scale = -mean * scale; for (int i00 = get_local_id(0); i00 < n_chunks; i00 += get_local_size(0)) { const int w_idx = ne10 > 1 ? i00 : 0; const int b_idx = ne20 > 1 ? i00 : 0; const float4 norm_x = mad(x[i00], (float4)scale, (float4)neg_mean_scale); y[i00] = mad(norm_x, w[w_idx], b[b_idx]); } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/pad.cl000066400000000000000000000025561512524704700221160ustar00rootroot00000000000000kernel void kernel_pad( global void * src0, ulong offset0, global void * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int lp0, int rp0, int lp1, int rp1, int lp2, int rp2, int lp3, int rp3 ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); int i0 = get_global_id(0); int i1 = get_group_id(1); int i2 = get_group_id(2) % ne2; int i3 = get_group_id(2) / ne2; if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } uint src0_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; uint dst_idx = i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0; global float * src0_ptr = (global float *)((global char *)src0 + src0_idx); global float * dst_ptr = (global float *)((global char *)dst + dst_idx); bool in_src_bounds = (i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3); *dst_ptr = in_src_bounds ? *src0_ptr : 0.0f; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/relu.cl000066400000000000000000000010211512524704700223030ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // relu //------------------------------------------------------------------------------ kernel void kernel_relu( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); dst[get_global_id(0)] = fmax(0.0f, src0[get_global_id(0)]); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/repeat.cl000066400000000000000000000027401512524704700226250ustar00rootroot00000000000000kernel void kernel_repeat( global const char * src0_data_in, global char * dst_data_in, ulong src0_offset, ulong dst_offset, int src0_ne0, int src0_ne1, int src0_ne2, int src0_ne3, ulong src0_nb0, ulong src0_nb1, ulong src0_nb2, ulong src0_nb3, int dst_ne0, int dst_ne1, int dst_ne2, int dst_ne3, ulong dst_nb0, ulong dst_nb1, ulong dst_nb2, ulong dst_nb3 ) { global const char * src0_data = src0_data_in + src0_offset; global char * dst_data = dst_data_in + dst_offset; const int d3 = get_global_id(2); const int d2 = get_global_id(1); const int d1 = get_global_id(0); if (d3 >= dst_ne3 || d2 >= dst_ne2 || d1 >= dst_ne1) { return; } const int s3 = d3 % src0_ne3; const int s2 = d2 % src0_ne2; const int s1 = d1 % src0_ne1; const global char * p_src0_slice = src0_data + (ulong)s3*src0_nb3 + (ulong)s2*src0_nb2 + (ulong)s1*src0_nb1; global char * p_dst_slice = dst_data + (ulong)d3*dst_nb3 + (ulong)d2*dst_nb2 + (ulong)d1*dst_nb1; for (int d0 = 0; d0 < dst_ne0; ++d0) { // Determine source index for dimension 0 based on tiling/broadcasting. const int s0 = d0 % src0_ne0; const global char * restrict current_src_el_ptr = p_src0_slice + (ulong)s0*src0_nb0; global char * restrict current_dst_el_ptr = p_dst_slice + (ulong)d0*dst_nb0; for (int k = 0; k < src0_nb0; ++k) { current_dst_el_ptr[k] = current_src_el_ptr[k]; } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/rms_norm.cl000066400000000000000000000132211512524704700231750ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif //------------------------------------------------------------------------------ // rms_norm //------------------------------------------------------------------------------ // This kernel depends on subgroup size. #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_32 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_rms_norm( global void * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, float eps, local float * sum // Note, the size depends on number of subgroups ) { src0 = (global void*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); global float4 * x = (global float4 *) ((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01); global float * x_scalar = (global float *) x; float4 sumf = 0; float all_sum = 0; // parallel sum for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { sumf += x[i00] * x[i00]; } all_sum = sumf.s0 + sumf.s1 + sumf.s2 + sumf.s3; all_sum = sub_group_reduce_add(all_sum); if (get_sub_group_local_id() == 0) { sum[get_sub_group_id()] = all_sum; } barrier(CLK_LOCAL_MEM_FENCE); // broadcast for (uint i = get_local_size(0) / get_max_sub_group_size() / 2; i > 0; i /= 2) { if (get_local_id(0) < i) { sum[get_local_id(0)] += sum[get_local_id(0) + i]; } } if (get_local_id(0) == 0) { for (int i = 4 * (ne00 / 4); i < ne00; i++) { sum[0] += x_scalar[i]; } sum[0] /= ne00; } barrier(CLK_LOCAL_MEM_FENCE); const float mean = sum[0]; const float scale = 1.0f/sqrt(mean + eps); global float4 * y = (global float4 *) (dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); global float * y_scalar = (global float *) y; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { y[i00] = x[i00] * scale; } if (get_local_id(0) == 0) { for (int i00 = 4 * (ne00 / 4); i00 < ne00; i00++) { y_scalar[i00] = x_scalar[i00] * scale; } } } //------------------------------------------------------------------------------ // rms_norm_mul //------------------------------------------------------------------------------ #ifdef INTEL_GPU REQD_SUBGROUP_SIZE_32 #elif defined (ADRENO_GPU) REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_rms_norm_mul( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, ulong nb1, ulong nb2, ulong nb3, float eps, local float * sum ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; // The size of sum is sizeof(float)*subgroup_size. // Each subgroup writes its partial sum to this array. // So the number of subgroups per workgroup for this kernel cannot exceed the subgroup size. // This is generally true - // for subgroup size 64, workgroup size should be less than 4096 (the max is usually 1024). if (get_sub_group_id() == 0) { sum[get_sub_group_local_id()] = 0.0f; } int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); global float4 * x = (global float4 *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); global float4 * f = (global float4 *) (src1 + (i03%ne13)*nb13 + (i02%ne12)*nb12 + (i01%ne11)*nb11); float sumf = 0; // parallel sum for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { sumf += dot(x[i00], x[i00]); } sumf = sub_group_reduce_add(sumf); barrier(CLK_LOCAL_MEM_FENCE); if (get_sub_group_local_id() == 0) { sum[get_sub_group_id()] = sumf; } barrier(CLK_LOCAL_MEM_FENCE); //for (uint i = get_local_size(0) / get_max_sub_group_size() / 2; i > 0; i /= 2) { // if (get_local_id(0) < i) { // sum[get_local_id(0)] += sum[get_local_id(0) + i]; // } //} //if (get_local_id(0) == 0) { // sum[0] /= ne00; //} //barrier(CLK_LOCAL_MEM_FENCE); sumf = sum[get_sub_group_local_id()]; sumf = sub_group_reduce_add(sumf); float mean = sumf / ne00; float scale = 1.0f/sqrt(mean + eps); global float4 * y = (global float4 *) (dst + i03*nb3 + i02*nb2 + i01*nb1); for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { y[i00] = (x[i00] * scale) * f[i00%(ne10/4)]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/rope.cl000066400000000000000000000575441512524704700223260ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // kernel_rope //------------------------------------------------------------------------------ float rope_yarn_ramp(float low, float high, int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. float2 rope_yarn( float theta_extrap, float freq_scale, float2 corr_dims, int i0, float ext_factor, float mscale ) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims.s0, corr_dims.s1, i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); } return (float2)(cos(theta) * mscale, sin(theta) * mscale); } // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get // `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) { return n_dims * log(n_ctx_orig / (n_rot * 2 * M_PI_F)) / (2 * log(base)); } float2 rope_yarn_corr_dims( int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow ) { // start and end correction dims return (float2)( max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base))), min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base))) ); } kernel void kernel_rope_norm_f32( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; float theta_base = (float) pos[i2]; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; float theta = theta_base * pow(freq_base, inv_ndims*i0); float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); float x0 = src[0]; float x1 = src[1]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[1] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_norm_f16( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; float theta_base = (float) pos[i2]; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; float theta = theta_base * pow(freq_base, inv_ndims*i0); float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); float x0 = src[0]; float x1 = src[1]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[1] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_neox_f32( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; float theta_base = (float) pos[i2]; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; const float theta = theta_base * pow(freq_base, inv_ndims*i0); const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global float * const src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_neox_f16( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; float theta_base = (float) pos[i2]; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; const float theta = theta_base * pow(freq_base, inv_ndims*i0); const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global half * const src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_multi_f32( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow, int4 sections, int is_imrope ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; const int sect_dims = sections.s0 + sections.s1 + sections.s2 + sections.s3; const int sec_w = sections.s1 + sections.s0; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; const int sector = (i0 / 2) % sect_dims; float theta_base = 0.0f; if (is_imrope) { if (sector % 3 == 1 && sector < 3 * sections.s1) { // h theta_base = (float) pos[i2 + ne02 * 1]; } else if (sector % 3 == 2 && sector < 3 * sections.s2) { // w theta_base = (float) pos[i2 + ne02 * 2]; } else if (sector % 3 == 0 && sector < 3 * sections.s0) { // t theta_base = (float) pos[i2 + ne02 * 0]; } else { // e theta_base = (float) pos[i2 + ne02 * 3]; } } else { if (sector < sections.s0) { theta_base = pos[i2]; } else if (sector >= sections.s0 && sector < sec_w) { theta_base = pos[i2 + ne2 * 1]; } else if (sector >= sec_w && sector < sec_w + sections.s2) { theta_base = pos[i2 + ne2 * 2]; } else if (sector >= sec_w + sections.s2) { theta_base = pos[i2 + ne2 * 3]; } } const float theta = theta_base * pow(freq_base, inv_ndims*i0); const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global float * const src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_multi_f16( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global half * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow, int4 sections, int is_imrope ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; const int sect_dims = sections.s0 + sections.s1 + sections.s2 + sections.s3; const int sec_w = sections.s1 + sections.s0; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { if (i0 < n_dims) { int ic = i0/2; const int sector = (i0 / 2) % sect_dims; float theta_base = 0.0f; if (is_imrope) { if (sector % 3 == 1 && sector < 3 * sections.s1) { // h theta_base = (float) pos[i2 + ne02 * 1]; } else if (sector % 3 == 2 && sector < 3 * sections.s2) { // w theta_base = (float) pos[i2 + ne02 * 2]; } else if (sector % 3 == 0 && sector < 3 * sections.s0) { // t theta_base = (float) pos[i2 + ne02 * 0]; } else { // e theta_base = (float) pos[i2 + ne02 * 3]; } } else { if (sector < sections.s0) { theta_base = pos[i2]; } else if (sector >= sections.s0 && sector < sec_w) { theta_base = pos[i2 + ne2 * 1]; } else if (sector >= sec_w && sector < sec_w + sections.s2) { theta_base = pos[i2 + ne2 * 2]; } else if (sector >= sec_w + sections.s2) { theta_base = pos[i2 + ne2 * 3]; } } const float theta = theta_base * pow(freq_base, inv_ndims*i0); const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } else { global half * const src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } kernel void kernel_rope_vision_f32( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow, int4 sections ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; const int sect_dims = sections.s0 + sections.s1; const int sec_w = sections.s1 + sections.s0; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { int ic = i0/2; const int sector = (i0/2) % sect_dims; float theta_base = 0.0f; if (sector < sections.s0) { const int p = sector; theta_base = pos[i2] * pow(freq_base, inv_ndims*2.0f*p); } else if (sector >= sections.s0 && sector < sec_w) { const int p = sector - sections.s0; theta_base = pos[i2 + ne2] * pow(freq_base, inv_ndims*2.0f*p); } const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } } kernel void kernel_rope_vision_f16( global void * src0, ulong offset0, global int * src1, ulong offset1, global float * src2, ulong offset2, global half * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne0, int ne1, int ne2, int ne3, ulong nb0, ulong nb1, ulong nb2, ulong nb3, int n_past, int n_dims, int n_ctx_orig, float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow, int4 sections ) { src0 = (global void*)((global char*)src0 + offset0); src1 = (global int*)((global char*)src1 + offset1); src2 = (global float*)((global char*)src2 + offset2); dst = (global float*)((global char*)dst + offsetd); int i3 = get_group_id(2); int i2 = get_group_id(1); int i1 = get_group_id(0); float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); global int * pos = src1; const int sect_dims = sections.s0 + sections.s1; const int sec_w = sections.s1 + sections.s0; float inv_ndims = -1.f/n_dims; for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { int ic = i0/2; const int sector = (i0/2) % sect_dims; float theta_base = 0.0f; if (sector < sections.s0) { const int p = sector; theta_base = pos[i2] * pow(freq_base, inv_ndims*2.0f*p); } else if (sector >= sections.s0 && sector < sec_w) { const int p = sector - sections.s0; theta_base = pos[i2 + ne2] * pow(freq_base, inv_ndims*2.0f*p); } const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; float2 cos_sin_theta = rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; const float x1 = src[n_dims]; dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; dst_data[n_dims] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/scale.cl000066400000000000000000000011021512524704700224230ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // scale //------------------------------------------------------------------------------ kernel void kernel_scale( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd, float scale, float bias ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); dst[get_global_id(0)] = src0[get_global_id(0)] * scale + bias; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/set_rows.cl000066400000000000000000000130641512524704700232130ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable // v = { mp, L, d } inline uint fastdiv(uint n, uint4 v) { uint msbs; msbs = mul_hi(n, v.s0); return (msbs + n) >> v.s1; } inline uint fastmod(uint n, uint4 v) { uint q = fastdiv(n, v); return n - q * v.s2; } kernel void kernel_set_rows_f32_i64( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne01, ulong nb01, ulong nb02, ulong nb03, uint4 ne11, uint4 ne12, ulong nb10, ulong nb11, ulong nb12, int nblk0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0)*get_local_size(1) + get_local_id(1); if (i01 >= ne01) { return; } //int i12 = i03%ne12; //int i11 = i02%ne11; int i12 = fastmod(i03, ne12); int i11 = fastmod(i02, ne11); int i10 = i01; long i1 = ((global long *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; global float * dst_row = (global float *) (dst + i1*nb1 + i02*nb2 + i03*nb3); global float * src_row = (global float *) (src0 + i01*nb01 + i02*nb02 + i03*nb03); for (int ind = get_local_id(0); ind < nblk0; ind += get_local_size(0)) { dst_row[ind] = (float)src_row[ind]; } } kernel void kernel_set_rows_f16_i64( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne01, ulong nb01, ulong nb02, ulong nb03, uint4 ne11, uint4 ne12, ulong nb10, ulong nb11, ulong nb12, int nblk0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0)*get_local_size(1) + get_local_id(1); if (i01 >= ne01) { return; } //int i12 = i03%ne12; //int i11 = i02%ne11; int i12 = fastmod(i03, ne12); int i11 = fastmod(i02, ne11); int i10 = i01; long i1 = ((global long *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; global half * dst_row = (global half *) (dst + i1*nb1 + i02*nb2 + i03*nb3); global float * src_row = (global float *) (src0 + i01*nb01 + i02*nb02 + i03*nb03); for (int ind = get_local_id(0); ind < nblk0; ind += get_local_size(0)) { dst_row[ind] = src_row[ind]; } } kernel void kernel_set_rows_f32_i32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne01, ulong nb01, ulong nb02, ulong nb03, uint4 ne11, uint4 ne12, ulong nb10, ulong nb11, ulong nb12, int nblk0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0)*get_local_size(1) + get_local_id(1); if (i01 >= ne01) { return; } //int i12 = i03%ne12; //int i11 = i02%ne11; int i12 = fastmod(i03, ne12); int i11 = fastmod(i02, ne11); int i10 = i01; int i1 = ((global int *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; global float * dst_row = (global float *) (dst + i1*nb1 + i02*nb2 + i03*nb3); global float * src_row = (global float *) (src0 + i01*nb01 + i02*nb02 + i03*nb03); for (int ind = get_local_id(0); ind < nblk0; ind += get_local_size(0)) { dst_row[ind] = (float)src_row[ind]; } } kernel void kernel_set_rows_f16_i32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, int ne01, ulong nb01, ulong nb02, ulong nb03, uint4 ne11, uint4 ne12, ulong nb10, ulong nb11, ulong nb12, int nblk0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0)*get_local_size(1) + get_local_id(1); if (i01 >= ne01) { return; } //int i12 = i03%ne12; //int i11 = i02%ne11; int i12 = fastmod(i03, ne12); int i11 = fastmod(i02, ne11); int i10 = i01; int i1 = ((global int *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; global half * dst_row = (global half *) (dst + i1*nb1 + i02*nb2 + i03*nb3); global float * src_row = (global float *) (src0 + i01*nb01 + i02*nb02 + i03*nb03); for (int ind = get_local_id(0); ind < nblk0; ind += get_local_size(0)) { dst_row[ind] = src_row[ind]; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/sigmoid.cl000066400000000000000000000015541512524704700230020ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // sigmoid //------------------------------------------------------------------------------ kernel void kernel_sigmoid_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); dst[get_global_id(0)] = 1.0f / (1.0f + exp(-src0[get_global_id(0)])); } kernel void kernel_sigmoid_f16( global half * src0, ulong offset0, global half * dst, ulong offsetd ) { src0 = (global half*)((global char*)src0 + offset0); dst = (global half*)((global char*)dst + offsetd); dst[get_global_id(0)] = 1.0f / (1.0f + exp(-src0[get_global_id(0)])); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/silu.cl000066400000000000000000000016011512524704700223140ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // silu //------------------------------------------------------------------------------ kernel void kernel_silu( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); float x = src0[get_global_id(0)]; dst[get_global_id(0)] = x / (1.0f + exp(-x)); } kernel void kernel_silu_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); float4 x = src0[get_global_id(0)]; dst[get_global_id(0)] = x / (1.0f + exp(-x)); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/softmax_4_f16.cl000066400000000000000000000062251512524704700237270ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_soft_max_4_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, ulong nb1, ulong nb2, ulong nb3, float scale, float max_bias, float m0, float m1, int n_head_log2 ) { src0 = src0 + offset0; src1 = src1 + offset1; src2 = src2 + offset2; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03%ne13; int i12 = i02%ne12; int i11 = i01; global float4 * psrc4 = (global float4 *)(src0 + i01*nb01 + i02*nb02 + i03*nb03); global half4 * pmask = src1 != src0 ? (global half4 *)(src1 + i11*nb11 + i12*nb12 + i13*nb13) : 0; global float * psrc2 = src2 != src0 ? (global float *)(src2) : 0; global float4 * pdst4 = (global float4 *)(dst + i01*nb1 + i02*nb2 + i03*nb3); float slope = 1.0f; // ALiBi if (max_bias > 0.0f) { int h = i02; float base = h < n_head_log2 ? m0 : m1; int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; slope = pow(base, exp); } // parallel max float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { lmax4 = fmax(lmax4, psrc4[i00]*scale + slope*(pmask ? convert_float4(pmask[i00]) : 0.0f)); } float lmax = fmax(fmax(lmax4.s0, lmax4.s1), fmax(lmax4.s2, lmax4.s3)); const float max = sub_group_reduce_max(lmax); // parallel sum float4 lsum4 = 0.0f; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { const float4 exp_psrc4 = exp((psrc4[i00]*scale + slope*(pmask ? convert_float4(pmask[i00]) : 0.0f)) - max); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } float lsum = lsum4.s0 + lsum4.s1 + lsum4.s2 + lsum4.s3; float sum = sub_group_reduce_add(lsum); if (psrc2) { sum += exp(psrc2[i02] - max); } for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { pdst4[i00] /= sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/softmax_4_f32.cl000066400000000000000000000061631512524704700237260ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_soft_max_4( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, ulong nb1, ulong nb2, ulong nb3, float scale, float max_bias, float m0, float m1, int n_head_log2 ) { src0 = src0 + offset0; src1 = src1 + offset1; src2 = src2 + offset2; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03%ne13; int i12 = i02%ne12; int i11 = i01; global float4 * psrc4 = (global float4 *)(src0 + i01*nb01 + i02*nb02 + i03*nb03); global float4 * pmask = src1 != src0 ? (global float4 *)(src1 + i11*nb11 + i12*nb12 + i13*nb13) : 0; global float * psrc2 = src2 != src0 ? (global float *)(src2) : 0; global float4 * pdst4 = (global float4 *)(dst + i01*nb1 + i02*nb2 + i03*nb3); float slope = 1.0f; // ALiBi if (max_bias > 0.0f) { int h = i02; float base = h < n_head_log2 ? m0 : m1; int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; slope = pow(base, exp); } // parallel max float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); } float lmax = fmax(fmax(lmax4.s0, lmax4.s1), fmax(lmax4.s2, lmax4.s3)); const float max = sub_group_reduce_max(lmax); // parallel sum float4 lsum4 = 0.0f; for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } float lsum = lsum4.s0 + lsum4.s1 + lsum4.s2 + lsum4.s3; float sum = sub_group_reduce_add(lsum); if (psrc2) { sum += exp(psrc2[i02] - max); } for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { pdst4[i00] /= sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/softmax_f16.cl000066400000000000000000000060771512524704700235110ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_soft_max_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, ulong nb1, ulong nb2, ulong nb3, float scale, float max_bias, float m0, float m1, int n_head_log2 ) { src0 = src0 + offset0; src1 = src1 + offset1; src2 = src2 + offset2; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03%ne13; int i12 = i02%ne12; int i11 = i01; global float * psrc0 = (global float *)(src0 + i01*nb01 + i02*nb02 + i03*nb03); global half * pmask = src1 != src0 ? (global half *)(src1 + i11*nb11 + i12*nb12 + i13*nb13) : 0; global float * psrc2 = src2 != src0 ? (global float *)(src2) : 0; global float * pdst = (global float *)(dst + i01*nb1 + i02*nb2 + i03*nb3); float slope = 1.0f; // ALiBi if (max_bias > 0.0f) { int h = i02; float base = h < n_head_log2 ? m0 : m1; int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; slope = pow(base, exp); } // parallel max float lmax = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { lmax = fmax(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); } float max = sub_group_reduce_max(lmax); // parallel sum float lsum = 0.0f; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max); lsum += exp_psrc0; // Remember the result of exp here. exp is expensive, so we really do not // wish to compute it twice. pdst[i00] = exp_psrc0; } float sum = sub_group_reduce_add(lsum); if (psrc2) { sum += exp(psrc2[i02] - max); } for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { pdst[i00] /= sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/softmax_f32.cl000066400000000000000000000060741512524704700235040ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_subgroups #pragma OPENCL EXTENSION cl_intel_subgroups : enable #else #pragma OPENCL EXTENSION cl_khr_subgroups : enable #endif #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif #ifdef ADRENO_GPU REQD_SUBGROUP_SIZE_64 #endif kernel void kernel_soft_max( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * src2, ulong offset2, global char * dst, ulong offsetd, int ne00, ulong nb01, ulong nb02, ulong nb03, int ne12, int ne13, ulong nb11, ulong nb12, ulong nb13, ulong nb1, ulong nb2, ulong nb3, float scale, float max_bias, float m0, float m1, int n_head_log2 ) { src0 = src0 + offset0; src1 = src1 + offset1; src2 = src2 + offset2; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03%ne13; int i12 = i02%ne12; int i11 = i01; global float * psrc0 = (global float *)(src0 + i01*nb01 + i02*nb02 + i03*nb03); global float * pmask = src1 != src0 ? (global float *)(src1 + i11*nb11 + i12*nb12 + i13*nb13) : 0; global float * psrc2 = src2 != src0 ? (global float *)(src2) : 0; global float * pdst = (global float *)(dst + i01*nb1 + i02*nb2 + i03*nb3); float slope = 1.0f; // ALiBi if (max_bias > 0.0f) { int h = i02; float base = h < n_head_log2 ? m0 : m1; int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; slope = pow(base, exp); } // parallel max float lmax = psrc2 ? psrc2[i02] : -INFINITY; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { lmax = fmax(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); } float max = sub_group_reduce_max(lmax); // parallel sum float lsum = 0.0f; for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max); lsum += exp_psrc0; // Remember the result of exp here. exp is expensive, so we really do not // wish to compute it twice. pdst[i00] = exp_psrc0; } float sum = sub_group_reduce_add(lsum); if (psrc2) { sum += exp(psrc2[i02] - max); } for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { pdst[i00] /= sum; } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/sqr.cl000066400000000000000000000025361512524704700221550ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable kernel void kernel_sqr_cont_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = src0[gid] * src0[gid]; } kernel void kernel_sqr_cont_f32_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = src0[gid] * src0[gid]; } kernel void kernel_sqr_cont_f16( global half * src0, ulong offset0, global half * dst, ulong offsetd ) { src0 = (global half*)((global char*)src0 + offset0); dst = (global half*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = src0[gid] * src0[gid]; } kernel void kernel_sqr_cont_f16_4( global half4 * src0, ulong offset0, global half4 * dst, ulong offsetd ) { src0 = (global half4*)((global char*)src0 + offset0); dst = (global half4*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = src0[gid] * src0[gid]; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/sqrt.cl000066400000000000000000000026061512524704700223370ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable kernel void kernel_sqrt_cont_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd ) { src0 = (global float*)((global char*)src0 + offset0); dst = (global float*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = sqrt(src0[gid]); } kernel void kernel_sqrt_cont_f32_4( global float4 * src0, ulong offset0, global float4 * dst, ulong offsetd ) { src0 = (global float4*)((global char*)src0 + offset0); dst = (global float4*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = sqrt(src0[gid]); } kernel void kernel_sqrt_cont_f16( global half * src0, ulong offset0, global half * dst, ulong offsetd ) { src0 = (global half*)((global char*)src0 + offset0); dst = (global half*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = convert_half(sqrt(convert_float(src0[gid]))); } kernel void kernel_sqrt_cont_f16_4( global half4 * src0, ulong offset0, global half4 * dst, ulong offsetd ) { src0 = (global half4*)((global char*)src0 + offset0); dst = (global half4*)((global char*)dst + offsetd); uint gid = get_global_id(0); dst[gid] = convert_half4(sqrt(convert_float4(src0[gid]))); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/ssm_conv.cl000066400000000000000000000034531512524704700231760ustar00rootroot00000000000000kernel void kernel_ssm_conv_f32_f32( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, int ne10, ulong nb11, ulong nb0, ulong nb1, ulong nb2 ){ src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int ir = get_global_id(0); int i2 = get_global_id(1); int i3 = get_global_id(2); int nc = ne10; global float * s = (global float *) (src0 + ir*nb01 + i2*nb00 + i3*nb02); global float * c = (global float *) (src1 + ir*nb11); global float * d = (global float *) (dst + ir*nb0 + i2*nb1 + i3*nb2); float sumf = 0.0f; for (int i0 = 0; i0 < nc; ++i0) { sumf += s[i0] * c[i0]; } d[0] = sumf; } kernel void kernel_ssm_conv_f32_f32_4( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, int ne10, ulong nb11, ulong nb0, ulong nb1, ulong nb2 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int ir = get_global_id(0); int i2 = get_global_id(1); int i3 = get_global_id(2); int nc = ne10; global float4 * s = (global float4 *) (src0 + ir*nb01 + i2*nb00 + i3*nb02); global float4 * c = (global float4 *) (src1 + ir*nb11); global float * d = (global float *) (dst + ir*nb0 + i2*nb1 + i3*nb2); float sumf = 0.0f; for (int i0 = 0; i0 < nc/4; ++i0) { sumf += dot(s[i0], c[i0]); } d[0] = sumf; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/sub.cl000066400000000000000000000073371512524704700221450ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable //------------------------------------------------------------------------------ // div //------------------------------------------------------------------------------ kernel void kernel_sub( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) - *((global float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_sub_row( global float4 * src0, ulong offset0, global float4 * src1, ulong offset1, global float4 * dst, ulong offsetd, int ne ) { src0 = (global float4*)((global char*)src0 + offset0); src1 = (global float4*)((global char*)src1 + offset1); dst = (global float4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] - src1[idx1]; } kernel void kernel_sub_f16( global char * src0, ulong offset0, global char * src1, ulong offset1, global char * dst, ulong offsetd, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13, int ne0, ulong nb0, ulong nb1, ulong nb2, ulong nb3 ) { src0 = src0 + offset0; src1 = src1 + offset1; dst = dst + offsetd; int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); int i13 = i03 % ne13; int i12 = i02 % ne12; int i11 = i01 % ne11; global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { const int i10 = i0 % ne10; *((global half *)(dst_ptr + i0*nb0)) = *((global half *)(src0_ptr + i0*nb00)) - *((global half *)(src1_ptr + i10*nb10)); } } kernel void kernel_sub_row_f16( global half4 * src0, ulong offset0, global half4 * src1, ulong offset1, global half4 * dst, ulong offsetd, int ne ) { src0 = (global half4*)((global char*)src0 + offset0); src1 = (global half4*)((global char*)src1 + offset1); dst = (global half4*)((global char*)dst + offsetd); // This performs better than using %. uint gid = get_global_id(0); uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne dst[gid] = src0[gid] - src1[idx1]; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/sum_rows.cl000066400000000000000000000017761512524704700232330ustar00rootroot00000000000000 kernel void kernel_sum_rows_f32( global float * src0, ulong offset0, global float * dst, ulong offsetd, int ne00, int ne01, int ne02, int ne03, ulong nb01, ulong nb02, ulong nb03, ulong nb1, ulong nb2, ulong nb3 ) { src0 = (global float *)((global char *)src0 + offset0); dst = (global float *)((global char *)dst + offsetd); int i3 = get_global_id(2); int i2 = get_global_id(1); int i1 = get_global_id(0); if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } global float * src_row = (global float *) ((global char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); global float * dst_row = (global float *) ((global char *) dst + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; for (int i0 = 0; i0 < ne00; i0++) { row_sum += src_row[i0]; } dst_row[0] = row_sum; } ggml-org-ggml-3678254/src/ggml-opencl/kernels/tanh.cl000066400000000000000000000052471512524704700223040ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable #ifdef cl_intel_required_subgroup_size #pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable #define INTEL_GPU 1 #define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) #define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) #elif defined(cl_qcom_reqd_sub_group_size) #pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable #define ADRENO_GPU 1 #define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) #define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) #endif kernel void kernel_tanh_f32_nd( global void * p_src0_base, ulong off_src0_abs, global void * p_dst_base, ulong off_dst_abs, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13 ) { int i0 = get_global_id(0); int i1 = get_global_id(1); int i2 = get_global_id(2); if (i0 < ne10 && i1 < ne11 && i2 < ne12) { for (int i3 = 0; i3 < ne13; ++i3) { ulong src_offset_in_tensor = (ulong)i0*nb00 + (ulong)i1*nb01 + (ulong)i2*nb02 + (ulong)i3*nb03; global const float *src_val_ptr = (global const float *)((global char *)p_src0_base + off_src0_abs + src_offset_in_tensor); ulong dst_offset_in_tensor = (ulong)i0*nb10 + (ulong)i1*nb11 + (ulong)i2*nb12 + (ulong)i3*nb13; global float *dst_val_ptr = (global float *)((global char *)p_dst_base + off_dst_abs + dst_offset_in_tensor); *dst_val_ptr = tanh(*src_val_ptr); } } } kernel void kernel_tanh_f16_nd( global void * p_src0_base, ulong off_src0_abs, global void * p_dst_base, ulong off_dst_abs, int ne00, int ne01, int ne02, int ne03, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, ulong nb10, ulong nb11, ulong nb12, ulong nb13 ) { int i0 = get_global_id(0); int i1 = get_global_id(1); int i2 = get_global_id(2); if (i0 < ne10 && i1 < ne11 && i2 < ne12) { for (int i3 = 0; i3 < ne13; ++i3) { ulong src_offset_in_tensor = (ulong)i0*nb00 + (ulong)i1*nb01 + (ulong)i2*nb02 + (ulong)i3*nb03; global const half *src_val_ptr = (global const half *)((global char *)p_src0_base + off_src0_abs + src_offset_in_tensor); ulong dst_offset_in_tensor = (ulong)i0*nb10 + (ulong)i1*nb11 + (ulong)i2*nb12 + (ulong)i3*nb13; global half *dst_val_ptr = (global half *)((global char *)p_dst_base + off_dst_abs + dst_offset_in_tensor); *dst_val_ptr = tanh(*src_val_ptr); } } } ggml-org-ggml-3678254/src/ggml-opencl/kernels/transpose.cl000066400000000000000000000104471512524704700233660ustar00rootroot00000000000000#pragma OPENCL EXTENSION cl_khr_fp16 : enable // 16-bit transpose, loading/storing a 4x4 tile of elements kernel void kernel_transpose_16( __read_only image1d_buffer_t input, __write_only image1d_buffer_t output, const uint rows, const uint cols ) { const int i = get_global_id(0); const int j = get_global_id(1); const int i_2 = i<<2; const int j_2 = j<<2; half4 temp0 = read_imageh(input, (j_2+0)*cols+i); half4 temp1 = read_imageh(input, (j_2+1)*cols+i); half4 temp2 = read_imageh(input, (j_2+2)*cols+i); half4 temp3 = read_imageh(input, (j_2+3)*cols+i); write_imageh(output, (i_2+0)*rows+j, (half4)(temp0.s0, temp1.s0, temp2.s0, temp3.s0)); write_imageh(output, (i_2+1)*rows+j, (half4)(temp0.s1, temp1.s1, temp2.s1, temp3.s1)); write_imageh(output, (i_2+2)*rows+j, (half4)(temp0.s2, temp1.s2, temp2.s2, temp3.s2)); write_imageh(output, (i_2+3)*rows+j, (half4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); } // Padded kernel for irregular shape kernel void kernel_transpose_16_4x1( __read_only image1d_buffer_t input, __write_only image1d_buffer_t output, const uint rows, const uint cols ) { const int i = get_global_id(0); const int j = get_global_id(1); const int j_2 = j << 2; half temp0 = read_imageh(input, (j_2 + 0) * cols + i).x; half temp1 = read_imageh(input, (j_2 + 1) * cols + i).x; half temp2 = read_imageh(input, (j_2 + 2) * cols + i).x; half temp3 = read_imageh(input, (j_2 + 3) * cols + i).x; write_imageh(output, i * rows + j, (half4)(temp0, temp1, temp2, temp3)); } // Transpose treating each element as 16-bit using buffer kernel void kernel_transpose_16_buf( global const ushort * input, global ushort * output, const int ldi, const int ldo ) { const int x = get_global_id(0); const int y = get_global_id(1); output[x*ldo + y] = input[y*ldi + x]; } // 32-bit transpose, loading/storing a 4x4 tile of elements kernel void kernel_transpose_32( __read_only image1d_buffer_t input, __write_only image1d_buffer_t output, const uint rows, const uint cols ) { const int i = get_global_id(0); const int j = get_global_id(1); const int i_2 = i<<2; const int j_2 = j<<2; float4 temp0 = read_imagef(input, (j_2+0)*cols+i); float4 temp1 = read_imagef(input, (j_2+1)*cols+i); float4 temp2 = read_imagef(input, (j_2+2)*cols+i); float4 temp3 = read_imagef(input, (j_2+3)*cols+i); write_imagef(output, (i_2+0)*rows+j, (float4)(temp0.s0, temp1.s0, temp2.s0, temp3.s0)); write_imagef(output, (i_2+1)*rows+j, (float4)(temp0.s1, temp1.s1, temp2.s1, temp3.s1)); write_imagef(output, (i_2+2)*rows+j, (float4)(temp0.s2, temp1.s2, temp2.s2, temp3.s2)); write_imagef(output, (i_2+3)*rows+j, (float4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); } // 32-bit transpose, loading/storing a 4x4 tile of elements // Only used for activations // converts to FP16 // also adds zero padding for non multiple of 8 prompt lengths kernel void kernel_transpose_32_16(__read_only image1d_buffer_t input, __write_only image1d_buffer_t output, const uint rows, const uint cols, const uint padded_rows) { const int i = get_global_id(0); const int j = get_global_id(1); const int i_2 = i<<2; const int j_2 = j<<2; half4 temp0 = {0,0,0,0}; // initialize outputs to 0 half4 temp1 = {0,0,0,0}; half4 temp2 = {0,0,0,0}; half4 temp3 = {0,0,0,0}; if((j_2+0)*cols+i*4+3 < rows*cols*16){ // only load from a valid location. Otherwise keep register data as 0 temp0 = read_imageh(input, (j_2+0)*cols+i); } if((j_2+1)*cols+i*4+3 < rows*cols*16){ temp1 = read_imageh(input, (j_2+1)*cols+i); } if((j_2+2)*cols+i*4+3 < rows*cols*16){ temp2 = read_imageh(input, (j_2+2)*cols+i); } if((j_2+3)*cols+i*4+3 < rows*cols*16){ temp3 = read_imageh(input, (j_2+3)*cols+i); } write_imageh(output, (i_2+0)*padded_rows+j, (half4)(temp0.s0, temp1.s0, temp2.s0, temp3.s0)); // no conditionals for output, includes zero padding write_imageh(output, (i_2+1)*padded_rows+j, (half4)(temp0.s1, temp1.s1, temp2.s1, temp3.s1)); write_imageh(output, (i_2+2)*padded_rows+j, (half4)(temp0.s2, temp1.s2, temp2.s2, temp3.s2)); write_imageh(output, (i_2+3)*padded_rows+j, (half4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/tsembd.cl000066400000000000000000000027001512524704700226170ustar00rootroot00000000000000kernel void kernel_timestep_embedding( global const void * p_timesteps, ulong off_timesteps, global void * p_dst, ulong off_dst, int dst_nb1_bytes, int logical_dim, int max_period ) { int local_i; int local_j; int local_half_dim; float local_timestep_val; float local_freq; float local_arg; global float * local_embed_data_ptr; global const float * local_timesteps_input_ptr; global float * local_dst_output_base_ptr; local_timesteps_input_ptr = (global const float *)((global char *)p_timesteps + off_timesteps); local_dst_output_base_ptr = (global float *)((global char *)p_dst + off_dst); local_i = get_global_id(1); local_j = get_global_id(0); local_half_dim = logical_dim / 2; local_embed_data_ptr = (global float *)((global char *)local_dst_output_base_ptr + local_i * dst_nb1_bytes); if (logical_dim % 2 != 0 && local_j == local_half_dim) { local_embed_data_ptr[2 * local_half_dim] = 0.0f; } if (local_j >= local_half_dim) { return; } local_timestep_val = local_timesteps_input_ptr[local_i]; if (local_half_dim == 0) { local_freq = 1.0f; } else { local_freq = exp(-log((float)max_period) * (float)local_j / (float)local_half_dim); } local_arg = local_timestep_val * local_freq; local_embed_data_ptr[local_j] = cos(local_arg); local_embed_data_ptr[local_j + local_half_dim] = sin(local_arg); } ggml-org-ggml-3678254/src/ggml-opencl/kernels/upscale.cl000066400000000000000000000073251512524704700230050ustar00rootroot00000000000000kernel void kernel_upscale( global const void * p_src0, ulong off_src0, global void * p_dst, ulong off_dst, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne10, int ne11, int ne12, int ne13, float sf0, float sf1, float sf2, float sf3 ) { global const char * src_base = (global const char *)p_src0 + off_src0; global float * dst_base = (global float *)((global char *)p_dst + off_dst); int index = get_global_id(0); int dst_total_elements = ne10 * ne11 * ne12 * ne13; if (index >= dst_total_elements) { return; } int i10 = index % ne10; int i11 = (index / ne10) % ne11; int i12 = (index / (ne10 * ne11)) % ne12; int i13 = index / (ne10 * ne11 * ne12); int i00 = (int)(i10 / sf0); int i01 = (int)(i11 / sf1); int i02 = (int)(i12 / sf2); int i03 = (int)(i13 / sf3); ulong offset_src_element = (ulong)i03 * nb03 + (ulong)i02 * nb02 + (ulong)i01 * nb01 + (ulong)i00 * nb00; global const float * src_element_ptr = (global const float *)(src_base + offset_src_element); dst_base[index] = *src_element_ptr; } kernel void kernel_upscale_bilinear( global const void * p_src0, ulong off_src0, global void * p_dst, ulong off_dst, ulong nb00, ulong nb01, ulong nb02, ulong nb03, int ne00_src, int ne01_src, int ne10_dst, int ne11_dst, int ne12_dst, int ne13_dst, float sf0, float sf1, float sf2, float sf3, float pixel_offset ) { global const char * src_base = (global const char *)p_src0 + off_src0; global float * dst_base = (global float *)((global char *)p_dst + off_dst); int index = get_global_id(0); int dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; if (index >= dst_total_elements) { return; } int i10_dst = index % ne10_dst; int i11_dst = (index / ne10_dst) % ne11_dst; int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); int i02_src = (int)(i12_dst / sf2); int i03_src = (int)(i13_dst / sf3); float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset; long y0_src = (long)floor(y_src_f); long y1_src = y0_src + 1; y0_src = max(0L, min(y0_src, (long)ne01_src - 1)); y1_src = max(0L, min(y1_src, (long)ne01_src - 1)); float dy = y_src_f - (float)y0_src; dy = max(0.0f, min(dy, 1.0f)); float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset; long x0_src = (long)floor(x_src_f); long x1_src = x0_src + 1; x0_src = max(0L, min(x0_src, (long)ne00_src - 1)); x1_src = max(0L, min(x1_src, (long)ne00_src - 1)); float dx = x_src_f - (float)x0_src; dx = max(0.0f, min(dx, 1.0f)); global const float * p_a = (global const float *)(src_base + (ulong)x0_src * nb00 + (ulong)y0_src * nb01 + (ulong)i02_src * nb02 + (ulong)i03_src * nb03); global const float * p_b = (global const float *)(src_base + (ulong)x1_src * nb00 + (ulong)y0_src * nb01 + (ulong)i02_src * nb02 + (ulong)i03_src * nb03); global const float * p_c = (global const float *)(src_base + (ulong)x0_src * nb00 + (ulong)y1_src * nb01 + (ulong)i02_src * nb02 + (ulong)i03_src * nb03); global const float * p_d = (global const float *)(src_base + (ulong)x1_src * nb00 + (ulong)y1_src * nb01 + (ulong)i02_src * nb02 + (ulong)i03_src * nb03); const float val_a = *p_a; const float val_b = *p_b; const float val_c = *p_c; const float val_d = *p_d; float result = val_a * (1.0f - dx) * (1.0f - dy) + val_b * dx * (1.0f - dy) + val_c * (1.0f - dx) * dy + val_d * dx * dy; dst_base[index] = result; } ggml-org-ggml-3678254/src/ggml-opt.cpp000066400000000000000000001220771512524704700174160ustar00rootroot00000000000000#include "ggml-opt.h" #include "ggml.h" #include "ggml-alloc.h" #include "ggml-backend.h" #include "ggml-impl.h" #include #include #include #include #include #include #include struct ggml_opt_dataset { struct ggml_context * ctx = nullptr; ggml_backend_buffer_t buf = nullptr; struct ggml_tensor * data = nullptr; struct ggml_tensor * labels = nullptr; int64_t ndata = -1; int64_t ndata_shard = -1; size_t nbs_data = -1; size_t nbs_labels = -1; std::vector permutation; }; struct ggml_opt_context { ggml_backend_sched_t backend_sched = nullptr; ggml_cgraph * allocated_graph = nullptr; ggml_cgraph * allocated_graph_copy = nullptr; struct ggml_context * ctx_static = nullptr; struct ggml_context * ctx_cpu = nullptr; struct ggml_context * ctx_compute = nullptr; struct ggml_context * ctx_copy = nullptr; ggml_backend_buffer_t buf_static = nullptr; ggml_backend_buffer_t buf_cpu = nullptr; std::mt19937 rng; enum ggml_opt_loss_type loss_type; enum ggml_opt_build_type build_type; enum ggml_opt_build_type build_type_alloc; struct ggml_tensor * inputs = nullptr; struct ggml_tensor * outputs = nullptr; struct ggml_tensor * labels = nullptr; struct ggml_tensor * loss = nullptr; struct ggml_tensor * pred = nullptr; struct ggml_tensor * ncorrect = nullptr; struct ggml_cgraph * gf = nullptr; struct ggml_cgraph * gb_grad = nullptr; struct ggml_cgraph * gb_opt = nullptr; bool static_graphs = false; bool eval_ready = false; std::vector grad_accs; std::vector grad_m; std::vector grad_v; int64_t iter = 1; int32_t opt_period = 1; int32_t opt_i = 0; bool loss_per_datapoint = false; ggml_opt_get_optimizer_params get_opt_pars = nullptr; void * get_opt_pars_ud = nullptr; struct ggml_tensor * opt_step_params = nullptr; // Stores output of get_opt_pars. enum ggml_opt_optimizer_type optimizer = GGML_OPT_OPTIMIZER_TYPE_ADAMW; }; struct ggml_opt_result { int64_t ndata = 0; std::vector loss; std::vector pred; int64_t ncorrect = 0; int64_t opt_period = -1; bool loss_per_datapoint = false; }; // ====== Dataset ====== ggml_opt_dataset_t ggml_opt_dataset_init( enum ggml_type type_data, enum ggml_type type_label, int64_t ne_datapoint, int64_t ne_label, int64_t ndata, int64_t ndata_shard) { GGML_ASSERT(ne_datapoint > 0); GGML_ASSERT(ne_label >= 0); GGML_ASSERT(ndata > 0); GGML_ASSERT(ndata_shard > 0); ggml_opt_dataset_t result = new ggml_opt_dataset; result->ndata = ndata; result->ndata_shard = ndata_shard; { struct ggml_init_params params = { /*.mem_size =*/ 2*ggml_tensor_overhead(), /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; result->ctx = ggml_init(params); } result->data = ggml_new_tensor_2d(result->ctx, type_data, ne_datapoint, ndata); result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata; if (ne_label > 0) { result->labels = ggml_new_tensor_2d(result->ctx, type_label, ne_label, ndata); result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata; } else { result->labels = nullptr; result->nbs_labels = 0; } result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type()); const int64_t nshards = ndata/ndata_shard; result->permutation.resize(nshards); for (int64_t i = 0; i < nshards; ++i) { result->permutation[i] = i; } return result; } void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) { ggml_backend_buffer_free(dataset->buf); ggml_free(dataset->ctx); delete dataset; } int64_t ggml_opt_dataset_ndata(ggml_opt_dataset_t dataset) { return dataset->ndata; } struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) { return dataset->data; } struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) { return dataset->labels; } void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) { GGML_ASSERT(idata <= dataset->ndata); if (idata < 0) { std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng); return; } GGML_ASSERT(idata % dataset->ndata_shard == 0); const int64_t ishard_max = idata / dataset->ndata_shard; std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng); } void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) { GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch)); GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch)); GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr)); GGML_ASSERT( data_batch->type == dataset->data->type); GGML_ASSERT(!labels_batch || labels_batch->type == dataset->labels->type); const size_t nb_data_batch = ggml_nbytes(data_batch); GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0); const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data; if (labels_batch) { const size_t nb_labels_batch = ggml_nbytes(labels_batch); GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels); } GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size())); for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) { const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch]; const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data; ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data); if (!labels_batch) { continue; } const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels; ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels); } } void ggml_opt_dataset_get_batch_host(ggml_opt_dataset_t dataset, void * data_batch, size_t nb_data_batch, void * labels_batch, int64_t ibatch) { GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr)); GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0); const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data; GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size())); for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) { const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch]; const char * ptr_data = (const char *) dataset->data->data + ishard *dataset->nbs_data; char * ptr_data_batch = (char *) data_batch + ishard_batch*dataset->nbs_data; memcpy(ptr_data_batch, ptr_data, dataset->nbs_data); if (!labels_batch) { continue; } const char * ptr_labels = (const char *) dataset->labels->data + ishard *dataset->nbs_labels; char * ptr_labels_batch = (char *) labels_batch + ishard_batch*dataset->nbs_labels; memcpy(ptr_labels_batch, ptr_labels, dataset->nbs_labels); } } // ====== Model / Context ====== struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) { GGML_UNUSED(userdata); ggml_opt_optimizer_params result; result.adamw.alpha = 0.001f; result.adamw.beta1 = 0.9f; result.adamw.beta2 = 0.999f; result.adamw.eps = 1e-8f; result.adamw.wd = 0.0f; result.sgd.alpha = 1e-3f; result.sgd.wd = 0.0f; return result; } struct ggml_opt_optimizer_params ggml_opt_get_constant_optimizer_params(void * userdata) { return *((struct ggml_opt_optimizer_params *) userdata); } struct ggml_opt_params ggml_opt_default_params( ggml_backend_sched_t backend_sched, enum ggml_opt_loss_type loss_type) { return { /*backend_sched =*/ backend_sched, /*ctx_compute =*/ nullptr, /*inputs =*/ nullptr, /*logits =*/ nullptr, /*loss_type =*/ loss_type, /*build_type =*/ GGML_OPT_BUILD_TYPE_OPT, /*opt_period =*/ 1, /*get_opt_pars =*/ ggml_opt_get_default_optimizer_params, /*get_opt_pars_ud =*/ nullptr, /*optimizer =*/ GGML_OPT_OPTIMIZER_TYPE_ADAMW, }; } static ggml_tensor * map_tensor(std::map & tensor_map, ggml_context * ctx, ggml_tensor * tensor) { if (!tensor) { return nullptr; } if (tensor_map.find(tensor) != tensor_map.end()) { return tensor_map[tensor]; } ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor); tensor_map[tensor] = new_tensor; new_tensor->op = tensor->op; for (int i = 0; i < GGML_MAX_DIMS; i++) { new_tensor->nb[i] = tensor->nb[i]; } new_tensor->flags = tensor->flags; memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params)); strcpy(new_tensor->name, tensor->name); new_tensor->data = tensor->data; new_tensor->buffer = tensor->buffer; new_tensor->extra = tensor->extra; new_tensor->view_offs = tensor->view_offs; new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src); for (int i = 0; i < GGML_MAX_SRC; i++) { new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]); } return new_tensor; } static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) { std::map tensor_map; ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true); for (int i = 0; i < src->n_leafs; i++) { ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i])); } GGML_ASSERT(dst->n_leafs == src->n_leafs); for (int i = 0; i < src->n_nodes; i++) { ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i])); } GGML_ASSERT(dst->n_nodes == src->n_nodes); for (int i = 0; i < src->n_nodes; ++i) { const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]); const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]); GGML_ASSERT(igrad_src != GGML_HASHSET_FULL); GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src)); GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL); GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst)); dst->grads[igrad_dst] = src->grads[igrad_src]; dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src]; } return dst; } static void ggml_opt_build(ggml_opt_context_t opt_ctx) { GGML_ASSERT(opt_ctx->ctx_compute && "no compute context set, either use static graphs or set one with ggml_opt_prepare_alloc"); GGML_ASSERT((!opt_ctx->static_graphs || opt_ctx->inputs->data) && "when using static graphs the inputs must be allocated statically"); const enum ggml_opt_optimizer_type optimizer = opt_ctx->optimizer; const bool accumulate = opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD && !(opt_ctx->static_graphs && opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period == 1); const bool need_momenta = opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->optimizer == GGML_OPT_OPTIMIZER_TYPE_ADAMW; ggml_set_input(opt_ctx->inputs); ggml_set_output(opt_ctx->outputs); int n_param = 0; for (int i = 0; i < opt_ctx->gf->n_nodes; ++i) { const struct ggml_tensor * node = opt_ctx->gf->nodes[i]; if (node->flags & GGML_TENSOR_FLAG_PARAM) { n_param++; } GGML_ASSERT(!(node->flags & GGML_TENSOR_FLAG_LOSS) && "support for extra loss terms not implemented"); } if (!opt_ctx->ctx_static) { // The static context is used for: // - gradients (1 per loss, 1 tensor per param if using gradient accumulation) // - optimizer momenta (2 tensors per param) // - labels (if using static graphs) // - loss (if using static graphs, up to 5 tensors) // - pred (if using static graphs) // - ncorrect (if using static graphs, 2 tensors). constexpr size_t n_loss = 1; const size_t tensors_per_param = (accumulate ? 1 : 0) + (need_momenta ? 2 : 0); const size_t tensors_const = opt_ctx->static_graphs ? 9 : 0; const size_t size_meta = (n_loss + tensors_per_param*n_param + tensors_const) * ggml_tensor_overhead(); struct ggml_init_params params = { /*.mem_size =*/ size_meta, /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; opt_ctx->ctx_static = ggml_init(params); } GGML_ASSERT(opt_ctx->build_type <= opt_ctx->build_type_alloc); { // The cpu context is allocated statically if using static graphs, dynamically otherwise. // It is used for: // - optimizer parameters (1 shared for all optimizer invocations) const size_t size_meta = 1 * ggml_tensor_overhead(); struct ggml_init_params params = { /*.mem_size =*/ size_meta, /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; ggml_free(opt_ctx->ctx_cpu); opt_ctx->ctx_cpu = ggml_init(params); ggml_backend_buffer_free(opt_ctx->buf_cpu); opt_ctx->buf_cpu = nullptr; } struct ggml_context * ctx_results = opt_ctx->static_graphs ? opt_ctx->ctx_static : opt_ctx->ctx_compute; switch (opt_ctx->loss_type) { case GGML_OPT_LOSS_TYPE_MEAN: { opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs); ggml_set_name(opt_ctx->loss, "loss_sum"); const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs)); opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale); ggml_set_name(opt_ctx->loss, "loss_mean"); opt_ctx->loss_per_datapoint = true; break; } case GGML_OPT_LOSS_TYPE_SUM: { opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->outputs); ggml_set_name(opt_ctx->loss, "loss_sum"); opt_ctx->loss_per_datapoint = false; break; } case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: { opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs); ggml_set_input(opt_ctx->labels); ggml_set_name(opt_ctx->labels, "labels"); opt_ctx->loss = ggml_cross_entropy_loss(ctx_results, opt_ctx->outputs, opt_ctx->labels); ggml_set_name(opt_ctx->loss, "loss_cross_entropy"); if (opt_ctx->opt_period > 1) { opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, 1.0f / opt_ctx->opt_period); ggml_set_name(opt_ctx->loss, "loss_cross_entropy_scaled"); } opt_ctx->loss_per_datapoint = true; break; } case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: { opt_ctx->labels = ggml_dup_tensor(ctx_results, opt_ctx->outputs); ggml_set_input(opt_ctx->labels); ggml_set_name(opt_ctx->labels, "labels"); opt_ctx->loss = ggml_sub(ctx_results, opt_ctx->outputs, opt_ctx->labels); ggml_set_name(opt_ctx->loss, "loss_error"); opt_ctx->loss = ggml_sqr(ctx_results, opt_ctx->loss); ggml_set_name(opt_ctx->loss, "loss_squared_error"); opt_ctx->loss = ggml_sum(ctx_results, opt_ctx->loss); ggml_set_name(opt_ctx->loss, "loss_sum_squared_error"); const float scale = 1.0f / (opt_ctx->opt_period * ggml_nelements(opt_ctx->outputs)); opt_ctx->loss = ggml_scale(ctx_results, opt_ctx->loss, scale); ggml_set_name(opt_ctx->loss, "loss_mean_squared_error"); opt_ctx->loss_per_datapoint = true; break; } } ggml_set_output(opt_ctx->loss); ggml_set_loss(opt_ctx->loss); ggml_build_forward_expand(opt_ctx->gf, opt_ctx->loss); if (opt_ctx->loss_type == GGML_OPT_LOSS_TYPE_CROSS_ENTROPY) { opt_ctx->pred = ggml_argmax(ctx_results, opt_ctx->outputs); ggml_set_name(opt_ctx->pred, "pred"); ggml_set_output(opt_ctx->pred); ggml_build_forward_expand(opt_ctx->gf, opt_ctx->pred); opt_ctx->ncorrect = ggml_count_equal(ctx_results, opt_ctx->pred, ggml_argmax(ctx_results, opt_ctx->labels)); ggml_set_name(opt_ctx->ncorrect, "ncorrect"); ggml_set_output(opt_ctx->ncorrect); ggml_build_forward_expand(opt_ctx->gf, opt_ctx->ncorrect); } if (opt_ctx->buf_static) { if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_FORWARD) { return; } } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_FORWARD) { opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors( opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0)); return; } if (opt_ctx->grad_accs.empty()) { GGML_ASSERT(opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_GRAD); const int n_nodes = opt_ctx->gf->n_nodes; opt_ctx->grad_accs.resize(n_nodes); for (int i = 0; i < n_nodes; ++i) { ggml_tensor * node = opt_ctx->gf->nodes[i]; if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) { opt_ctx->grad_accs[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne); } else { opt_ctx->grad_accs[i] = nullptr; } } if (need_momenta && opt_ctx->build_type_alloc >= GGML_OPT_BUILD_TYPE_OPT) { opt_ctx->grad_m.resize(n_nodes); opt_ctx->grad_v.resize(n_nodes); for (int i = 0; i < n_nodes; ++i) { ggml_tensor * node = opt_ctx->gf->nodes[i]; if (node->flags & GGML_TENSOR_FLAG_PARAM) { opt_ctx->grad_m[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne); opt_ctx->grad_v[i] = ggml_new_tensor(opt_ctx->ctx_static, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne); } else { opt_ctx->grad_m[i] = nullptr; opt_ctx->grad_v[i] = nullptr; } } } } // gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients. opt_ctx->gb_grad = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gf, /*force_grads =*/ true); ggml_build_backward_expand(opt_ctx->ctx_compute, opt_ctx->gb_grad, opt_ctx->grad_accs.data()); if (opt_ctx->buf_static) { if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_GRAD) { return; } } else if (opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_GRAD) { opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors(opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0)); ggml_graph_reset(opt_ctx->gb_grad); } GGML_ASSERT(opt_ctx->build_type_alloc == GGML_OPT_BUILD_TYPE_OPT); // gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step. opt_ctx->gb_opt = ggml_graph_dup(opt_ctx->ctx_compute, opt_ctx->gb_grad, /*force_grads =*/ true); opt_ctx->opt_step_params = ggml_new_tensor_1d(opt_ctx->ctx_cpu, GGML_TYPE_F32, need_momenta ? 7 : 2); ggml_tensor * adamw_params = opt_ctx->opt_step_params; ggml_set_input(adamw_params); const char * optimizer_name = ggml_opt_optimizer_name(opt_ctx->optimizer); ggml_format_name(adamw_params, "%s_params", optimizer_name); for (int i = opt_ctx->gf->n_nodes-1; i >= 0; --i) { struct ggml_tensor * node = opt_ctx->gb_opt->nodes[i]; struct ggml_tensor * grad = ggml_graph_get_grad(opt_ctx->gb_opt, node); if (grad && (node->flags & GGML_TENSOR_FLAG_PARAM)) { struct ggml_tensor * m = nullptr; struct ggml_tensor * v = nullptr; if (need_momenta) { m = opt_ctx->grad_m[i]; v = opt_ctx->grad_v[i]; ggml_format_name(m, "AdamW m for %s", node->name); ggml_format_name(v, "AdamW v for %s", node->name); } struct ggml_tensor * opt_step; switch (optimizer) { case GGML_OPT_OPTIMIZER_TYPE_ADAMW: opt_step = ggml_opt_step_adamw(opt_ctx->ctx_compute, node, grad, m, v, adamw_params); break; case GGML_OPT_OPTIMIZER_TYPE_SGD: opt_step = ggml_opt_step_sgd(opt_ctx->ctx_compute, node, grad, adamw_params); break; default: GGML_ABORT("fatal error"); } ggml_format_name(opt_step, "%s step for %s", optimizer_name, node->name); ggml_build_forward_expand(opt_ctx->gb_opt, opt_step); } } if (!opt_ctx->buf_static) { opt_ctx->buf_static = ggml_backend_alloc_ctx_tensors( opt_ctx->ctx_static, ggml_backend_sched_get_backend(opt_ctx->backend_sched, 0)); ggml_graph_reset(opt_ctx->gb_opt); } opt_ctx->buf_cpu = ggml_backend_alloc_ctx_tensors_from_buft(opt_ctx->ctx_cpu, ggml_backend_cpu_buffer_type()); } ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { ggml_opt_context_t result = new struct ggml_opt_context; result->backend_sched = params.backend_sched; result->ctx_compute = params.ctx_compute; result->loss_type = params.loss_type; result->build_type = params.build_type; result->build_type_alloc = params.build_type; result->inputs = params.inputs; result->outputs = params.outputs; result->opt_period = params.opt_period; result->get_opt_pars = params.get_opt_pars; result->get_opt_pars_ud = params.get_opt_pars_ud; result->optimizer = params.optimizer; GGML_ASSERT(result->opt_period >= 1); result->static_graphs = result->ctx_compute; if (!result->static_graphs) { GGML_ASSERT(!result->inputs); GGML_ASSERT(!result->outputs); return result; } GGML_ASSERT(result->inputs); GGML_ASSERT(result->outputs); result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass. ggml_build_forward_expand(result->gf, result->outputs); ggml_opt_build(result); return result; } void ggml_opt_free(ggml_opt_context_t opt_ctx) { if (opt_ctx == nullptr) { return; } ggml_backend_buffer_free(opt_ctx->buf_static); ggml_backend_buffer_free(opt_ctx->buf_cpu); ggml_free(opt_ctx->ctx_static); ggml_free(opt_ctx->ctx_cpu); delete opt_ctx; } void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) { if (optimizer) { ggml_graph_reset(opt_ctx->gb_opt); opt_ctx->iter = 1; } else { ggml_graph_reset(opt_ctx->gb_grad); } } bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx) { return opt_ctx->static_graphs; } struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) { return opt_ctx->inputs; } struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) { return opt_ctx->outputs; } struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) { return opt_ctx->labels; } struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) { return opt_ctx->loss; } struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) { return opt_ctx->pred; } struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) { return opt_ctx->ncorrect; } struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) { return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node); } // ====== Optimization Result ====== ggml_opt_result_t ggml_opt_result_init() { return new ggml_opt_result; } void ggml_opt_result_free(ggml_opt_result_t result) { delete result; } void ggml_opt_result_reset(ggml_opt_result_t result) { result->ndata = 0; result->loss.clear(); result->pred.clear(); result->ncorrect = 0; } void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) { *ndata = result->ndata; } void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) { const int64_t nbatches = result->loss.size(); // Number of physical batches. if (nbatches == 0) { *loss = 0.0; *unc = NAN; return; } double sum = 0.0; double sum_squared = 0.0; for (const float & loss : result->loss) { // If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch. const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss; sum += loss_scaled; sum_squared += loss_scaled*loss_scaled; } const double mean = sum/nbatches; *loss = result->loss_per_datapoint ? mean : sum; if (!unc) { return; } if (nbatches < 2) { *unc = NAN; return; } const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1) *unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1)); } void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) { for (size_t i = 0; i < result->pred.size(); ++i) { pred[i] = result->pred[i]; } } void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) { *accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN; if (!unc) { return; } *unc = result->ncorrect >= 0 && result->ndata >= 2 ? sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN; } // ====== Computation ====== void ggml_opt_prepare_alloc( ggml_opt_context_t opt_ctx, struct ggml_context * ctx_compute, struct ggml_cgraph * gf, struct ggml_tensor * inputs, struct ggml_tensor * outputs) { GGML_ASSERT(!opt_ctx->static_graphs); opt_ctx->ctx_compute = ctx_compute; opt_ctx->gf = gf; opt_ctx->inputs = inputs; opt_ctx->outputs = outputs; } void ggml_opt_alloc(ggml_opt_context_t opt_ctx, bool backward) { GGML_ASSERT(!opt_ctx->eval_ready); if (opt_ctx->build_type == GGML_OPT_BUILD_TYPE_OPT && opt_ctx->opt_period > 1 && opt_ctx->opt_i == 0) { ggml_graph_reset(opt_ctx->gb_grad); } if (backward) { const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period; opt_ctx->build_type = opt_i_next == 0 ? GGML_OPT_BUILD_TYPE_OPT : GGML_OPT_BUILD_TYPE_GRAD; } else { opt_ctx->build_type = GGML_OPT_BUILD_TYPE_FORWARD; } if (!opt_ctx->static_graphs) { ggml_opt_build(opt_ctx); } struct ggml_cgraph * graph = nullptr; switch (opt_ctx->build_type) { case GGML_OPT_BUILD_TYPE_FORWARD: { graph = opt_ctx->gf; } break; case GGML_OPT_BUILD_TYPE_GRAD: { graph = opt_ctx->gb_grad; } break; case GGML_OPT_BUILD_TYPE_OPT: { graph = opt_ctx->gb_opt; } break; } GGML_ASSERT(graph); if (opt_ctx->allocated_graph == graph) { opt_ctx->eval_ready = true; return; } ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph if (opt_ctx->static_graphs) { ggml_init_params params = { /*.mem_size =*/ graph->size*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph->size, graph->grads), /*.mem_buffer =*/ nullptr, /*.no_alloc =*/ true, }; ggml_free(opt_ctx->ctx_copy); opt_ctx->ctx_copy = ggml_init(params); opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph); } else { opt_ctx->allocated_graph_copy = graph; } ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy); opt_ctx->allocated_graph = graph; opt_ctx->eval_ready = true; } void ggml_opt_eval(ggml_opt_context_t opt_ctx, ggml_opt_result_t result) { GGML_ASSERT(opt_ctx->eval_ready); if (opt_ctx->allocated_graph == opt_ctx->gb_opt) { const ggml_opt_optimizer_params & opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud); switch (opt_ctx->optimizer) { case GGML_OPT_OPTIMIZER_TYPE_ADAMW: { GGML_ASSERT(opt_pars.adamw.alpha > 0.0f); GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f); GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f); GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f); GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f); GGML_ASSERT(opt_pars.adamw.eps >= 0.0f); GGML_ASSERT(opt_pars.adamw.wd >= 0.0f); GGML_ASSERT(opt_pars.adamw.wd <= 1.0f); // beta1, beta2 after applying warmup const float beta1h = 1.0f / (1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter)); const float beta2h = 1.0f / (1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter)); float * adamw_par_data = ggml_get_data_f32(opt_ctx->opt_step_params); adamw_par_data[0] = opt_pars.adamw.alpha; adamw_par_data[1] = opt_pars.adamw.beta1; adamw_par_data[2] = opt_pars.adamw.beta2; adamw_par_data[3] = opt_pars.adamw.eps; adamw_par_data[4] = opt_pars.adamw.wd; adamw_par_data[5] = beta1h; adamw_par_data[6] = beta2h; } break; case GGML_OPT_OPTIMIZER_TYPE_SGD: { GGML_ASSERT(opt_pars.sgd.alpha > 0.0f); GGML_ASSERT(opt_pars.sgd.wd >= 0.0f); GGML_ASSERT(opt_pars.sgd.wd <= 1.0f); float * sgd = ggml_get_data_f32(opt_ctx->opt_step_params); sgd[0] = opt_pars.sgd.alpha; sgd[1] = opt_pars.sgd.wd; } break; default: GGML_ABORT("fatal error"); } } ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy); opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt; opt_ctx->opt_i = (opt_ctx->opt_i + 1) % opt_ctx->opt_period; if (!opt_ctx->static_graphs) { opt_ctx->gf = nullptr; opt_ctx->gb_grad = nullptr; opt_ctx->gb_opt = nullptr; opt_ctx->allocated_graph = nullptr; opt_ctx->allocated_graph_copy = nullptr; } opt_ctx->eval_ready = false; if (!result) { return; } if (result->ndata == 0) { result->loss_per_datapoint = opt_ctx->loss_per_datapoint; result->opt_period = opt_ctx->opt_period; } else { GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint); GGML_ASSERT(result->opt_period == opt_ctx->opt_period); } const int64_t ndata = opt_ctx->outputs->ne[1]; GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported"); result->ndata += ndata; GGML_ASSERT(ggml_is_scalar(opt_ctx->loss)); GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32); float loss; ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss)); result->loss.push_back(loss); if (opt_ctx->pred) { GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32); std::vector pred(ndata); ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred)); result->pred.insert(result->pred.end(), pred.begin(), pred.end()); } if (!opt_ctx->ncorrect || result->ncorrect < 0) { result->ncorrect = -1; return; } GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect)); GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64); int64_t ncorrect; ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect)); result->ncorrect += ncorrect; } // ====== High-Level Functions ====== void ggml_opt_epoch( ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, ggml_opt_result_t result_train, ggml_opt_result_t result_eval, int64_t idata_split, ggml_opt_epoch_callback callback_train, ggml_opt_epoch_callback callback_eval) { GGML_ASSERT(ggml_opt_static_graphs(opt_ctx) && "ggml_opt_epoch requires static graphs"); struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx); struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); struct ggml_tensor * data = ggml_opt_dataset_data(dataset); GGML_ASSERT(data->ne[0] == inputs->ne[0]); const int64_t ndata = data->ne[1]; const int64_t ndata_batch = inputs->ne[1]; GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0); const int64_t nbatches = ndata/ndata_batch; idata_split = idata_split < 0 ? ndata : idata_split; GGML_ASSERT(idata_split % ndata_batch == 0); const int64_t ibatch_split = idata_split / ndata_batch; int64_t ibatch = 0; int64_t t_loop_start = ggml_time_us(); for (; ibatch < ibatch_split; ++ibatch) { ggml_opt_alloc(opt_ctx, /*backward =*/ true); ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch); ggml_opt_eval(opt_ctx, result_train); if (callback_train) { callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start); } } t_loop_start = ggml_time_us(); for (; ibatch < nbatches; ++ibatch) { ggml_opt_alloc(opt_ctx, /*backward =*/ false); ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch); ggml_opt_eval(opt_ctx, result_eval); if (callback_eval) { callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start); } } } void ggml_opt_epoch_callback_progress_bar( bool train, ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, ggml_opt_result_t result, int64_t ibatch, int64_t ibatch_max, int64_t t_start_us) { fprintf(stderr, "%s[", train ? "train: " : "val: "); // The progress bar consists of partially filled blocks, unicode has 8 separate fill levels. constexpr int64_t bar_length = 8; const int64_t ibatch8 = 8 * ibatch; for (int64_t j = 0; j < bar_length; ++j) { if (ibatch_max * (8*j + 8) / bar_length < ibatch8) { fprintf(stderr, "\u2588"); // full block } else if (ibatch_max * (8*j + 7) / bar_length < ibatch8) { fprintf(stderr, "\u2589"); // 7/8 filled } else if (ibatch_max * (8*j + 6) / bar_length < ibatch8) { fprintf(stderr, "\u258A"); // 6/8 filled } else if (ibatch_max * (8*j + 5) / bar_length < ibatch8) { fprintf(stderr, "\u258B"); // 5/8 filled } else if (ibatch_max * (8*j + 4) / bar_length < ibatch8) { fprintf(stderr, "\u258C"); // 4/8 filled } else if (ibatch_max * (8*j + 3) / bar_length < ibatch8) { fprintf(stderr, "\u258D"); // 3/8 filled } else if (ibatch_max * (8*j + 2) / bar_length < ibatch8) { fprintf(stderr, "\u258E"); // 2/8 filled } else if (ibatch_max * (8*j + 1) / bar_length < ibatch8) { fprintf(stderr, "\u258F"); // 1/8 filled } else { fprintf(stderr, " "); } } const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1]; const int64_t idata = ibatch*batch_size; const int64_t idata_max = ibatch_max*batch_size; double loss; double loss_unc; ggml_opt_result_loss(result, &loss, &loss_unc); double accuracy; double accuracy_unc; ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc); const int64_t t_ibatch_us = ggml_time_us() - t_start_us; int64_t t_ibatch_s = t_ibatch_us / 1000000; const int64_t t_ibatch_h = t_ibatch_s / 3600; t_ibatch_s -= t_ibatch_h * 3600; const int64_t t_ibatch_m = t_ibatch_s / 60; t_ibatch_s -= t_ibatch_m * 60; const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch; int64_t t_eta_s = t_eta_us / 1000000; const int64_t t_eta_h = t_eta_s / 3600; t_eta_s -= t_eta_h * 3600; const int64_t t_eta_m = t_eta_s / 60; t_eta_s -= t_eta_m * 60; fprintf(stderr, "] data=%07" PRId64 "/%07" PRId64 " loss=%.5lf±%.5lf acc=%.2lf±%.2lf%% " "t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 " \r", idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc, t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s); if (ibatch == ibatch_max) { fprintf(stderr, "\n"); } fflush(stderr); GGML_UNUSED(dataset); } void ggml_opt_fit( ggml_backend_sched_t backend_sched, ggml_context * ctx_compute, ggml_tensor * inputs, ggml_tensor * outputs, ggml_opt_dataset_t dataset, enum ggml_opt_loss_type loss_type, enum ggml_opt_optimizer_type optimizer, ggml_opt_get_optimizer_params get_opt_pars, int64_t nepoch, int64_t nbatch_logical, float val_split, bool silent) { ggml_time_init(); const int64_t t_start_us = ggml_time_us(); const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1]; const int64_t nbatch_physical = inputs->ne[1]; GGML_ASSERT(ndata % nbatch_logical == 0); GGML_ASSERT(nbatch_logical % nbatch_physical == 0); const int64_t opt_period = nbatch_logical / nbatch_physical; const int64_t nbatches_logical = ndata / nbatch_logical; GGML_ASSERT(val_split >= 0.0f); GGML_ASSERT(val_split < 1.0f); const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical) const int64_t idata_split = ibatch_split * nbatch_physical; int64_t epoch = 1; ggml_opt_params params = ggml_opt_default_params(backend_sched, loss_type); params.ctx_compute = ctx_compute; params.inputs = inputs; params.outputs = outputs; params.opt_period = opt_period; params.get_opt_pars = get_opt_pars; params.get_opt_pars_ud = &epoch; params.optimizer = optimizer; ggml_opt_context_t opt_ctx = ggml_opt_init(params); // Shuffling the data is generally useful but there is only a point if not all data is used in a single batch. if (nbatch_logical < ndata) { ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation). } ggml_opt_result_t result_train = ggml_opt_result_init(); ggml_opt_result_t result_val = ggml_opt_result_init(); ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar; for (; epoch <= nepoch; ++epoch) { if (nbatch_logical < idata_split) { ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split); } ggml_opt_result_reset(result_train); ggml_opt_result_reset(result_val); if (!silent) { fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch); } ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback); if (!silent) { fprintf(stderr, "\n"); } } if (!silent) { int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000; const int64_t t_total_h = t_total_s / 3600; t_total_s -= t_total_h * 3600; const int64_t t_total_m = t_total_s / 60; t_total_s -= t_total_m * 60; fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s); } ggml_opt_free(opt_ctx); ggml_opt_result_free(result_train); ggml_opt_result_free(result_val); } enum ggml_opt_optimizer_type ggml_opt_context_optimizer_type(ggml_opt_context_t c) { return c->optimizer; } GGML_API const char * ggml_opt_optimizer_name(enum ggml_opt_optimizer_type o) { switch (o) { case GGML_OPT_OPTIMIZER_TYPE_ADAMW: return "adamw"; case GGML_OPT_OPTIMIZER_TYPE_SGD: return "sgd"; default: return "undefined"; }; } ggml-org-ggml-3678254/src/ggml-quants.c000066400000000000000000006503171512524704700175720ustar00rootroot00000000000000#define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu/ggml-cpu-impl.h" #include "ggml-cpu.h" #include #include #include #include #include // for qsort #include // for GGML_ASSERT #define GROUP_MAX_EPS 1e-15f #define GROUP_MAX_EPS_IQ3_XXS 1e-8f #define GROUP_MAX_EPS_IQ2_S 1e-8f #define GROUP_MAX_EPS_IQ1_M 1e-7f #define GROUP_MAX_EPS_IQ1_S 1e-12f #define UNUSED GGML_UNUSED static inline int best_index_int8(int n, const int8_t * val, float x) { if (x <= val[0]) return 0; if (x >= val[n-1]) return n-1; int ml = 0, mu = n-1; while (mu-ml > 1) { int mav = (ml+mu)/2; if (x < val[mav]) mu = mav; else ml = mav; } return x - val[mu-1] < val[mu] - x ? mu-1 : mu; } // reference implementation for deterministic creation of model files void quantize_row_q4_0_ref(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_0; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < qk; j++) { const float v = x[i*qk + j]; if (amax < fabsf(v)) { amax = fabsf(v); max = v; } } const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < qk/2; ++j) { const float x0 = x[i*qk + 0 + j]*id; const float x1 = x[i*qk + qk/2 + j]*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); y[i].qs[j] = xi0; y[i].qs[j] |= xi1 << 4; } } } void quantize_row_q4_1_ref(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k) { const int qk = QK4_1; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { float min = FLT_MAX; float max = -FLT_MAX; for (int j = 0; j < qk; j++) { const float v = x[i*qk + j]; if (v < min) min = v; if (v > max) max = v; } const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); y[i].m = GGML_FP32_TO_FP16(min); for (int j = 0; j < qk/2; ++j) { const float x0 = (x[i*qk + 0 + j] - min)*id; const float x1 = (x[i*qk + qk/2 + j] - min)*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); y[i].qs[j] = xi0; y[i].qs[j] |= xi1 << 4; } } } void quantize_row_q5_0_ref(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k) { static const int qk = QK5_0; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < qk; j++) { const float v = x[i*qk + j]; if (amax < fabsf(v)) { amax = fabsf(v); max = v; } } const float d = max / -16; const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); uint32_t qh = 0; for (int j = 0; j < qk/2; ++j) { const float x0 = x[i*qk + 0 + j]*id; const float x1 = x[i*qk + qk/2 + j]*id; const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); } memcpy(&y[i].qh, &qh, sizeof(qh)); } } void quantize_row_q5_1_ref(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k) { const int qk = QK5_1; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { float min = FLT_MAX; float max = -FLT_MAX; for (int j = 0; j < qk; j++) { const float v = x[i*qk + j]; if (v < min) min = v; if (v > max) max = v; } const float d = (max - min) / ((1 << 5) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); y[i].m = GGML_FP32_TO_FP16(min); uint32_t qh = 0; for (int j = 0; j < qk/2; ++j) { const float x0 = (x[i*qk + 0 + j] - min)*id; const float x1 = (x[i*qk + qk/2 + j] - min)*id; const uint8_t xi0 = (uint8_t)(x0 + 0.5f); const uint8_t xi1 = (uint8_t)(x1 + 0.5f); y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); } memcpy(&y[i].qh, &qh, sizeof(y[i].qh)); } } // reference implementation for deterministic creation of model files void quantize_row_q8_0_ref(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k) { assert(k % QK8_0 == 0); const int nb = k / QK8_0; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = x[i*QK8_0 + j]; amax = MAX(amax, fabsf(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < QK8_0; ++j) { const float x0 = x[i*QK8_0 + j]*id; y[i].qs[j] = roundf(x0); } } } // reference implementation for deterministic creation of model files void quantize_row_q8_1_ref(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k) { assert(QK8_1 == 32); assert(k % QK8_1 == 0); const int nb = k / QK8_1; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK8_1; j++) { const float v = x[i*QK8_1 + j]; amax = MAX(amax, fabsf(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); int sum = 0; for (int j = 0; j < QK8_1/2; ++j) { const float v0 = x[i*QK8_1 + j]*id; const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id; y[i].qs[ j] = roundf(v0); y[i].qs[QK8_1/2 + j] = roundf(v1); sum += y[i].qs[ j]; sum += y[i].qs[QK8_1/2 + j]; } y[i].s = GGML_FP32_TO_FP16(sum*d); } } static inline int best_index_mxfp4(float x, float e) { int best_index = 0; float best_err = fabsf(kvalues_mxfp4[0]*e - x); for (int i = 1; i < 16; i++) { float err = fabsf(kvalues_mxfp4[i]*e - x); if (err < best_err) { best_index = i; best_err = err; } } return best_index; } void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k) { static const int qk = QK_MXFP4; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { float amax = 0.0f; // absolute max for (int j = 0; j < qk; j++) { const float v = x[i*qk + j]; if (amax < fabsf(v)) { amax = fabsf(v); } } const uint8_t e = amax > 0.0f ? (uint8_t) (floorf(log2f(amax)) - 2 + 127) : 0; const float d = GGML_E8M0_TO_FP32_HALF(e); y[i].e = e; for (int j = 0; j < qk/2; ++j) { const uint8_t x0 = best_index_mxfp4(x[i*qk + 0 + j], d); const uint8_t x1 = best_index_mxfp4(x[i*qk + qk/2 + j], d); y[i].qs[j] = x0; y[i].qs[j] |= x1 << 4; } } } void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_0; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F) - 8; const int x1 = (x[i].qs[j] >> 4) - 8; y[i*qk + j + 0 ] = x0*d; y[i*qk + j + qk/2] = x1*d; } } } void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_1; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const float m = GGML_FP16_TO_FP32(x[i].m); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F); const int x1 = (x[i].qs[j] >> 4); y[i*qk + j + 0 ] = x0*d + m; y[i*qk + j + qk/2] = x1*d + m; } } } void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK5_0; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; y[i*qk + j + 0 ] = x0*d; y[i*qk + j + qk/2] = x1*d; } } } void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK5_1; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const float m = GGML_FP16_TO_FP32(x[i].m); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); for (int j = 0; j < qk/2; ++j) { const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; const int x0 = (x[i].qs[j] & 0x0F) | xh_0; const int x1 = (x[i].qs[j] >> 4) | xh_1; y[i*qk + j + 0 ] = x0*d + m; y[i*qk + j + qk/2] = x1*d + m; } } } void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK8_0; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk; ++j) { y[i*qk + j] = x[i].qs[j]*d; } } } void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK_MXFP4; assert(k % qk == 0); const int nb = k / qk; for (int i = 0; i < nb; i++) { const float d = GGML_E8M0_TO_FP32_HALF(x[i].e); for (int j = 0; j < qk/2; ++j) { const int8_t x0 = kvalues_mxfp4[x[i].qs[j] & 0x0F]; const int8_t x1 = kvalues_mxfp4[x[i].qs[j] >> 4]; y[i*qk + j + 0 ] = x0*d; y[i*qk + j + qk/2] = x1*d; } } } // // 2-6 bit quantization in super-blocks // // // ===================== Helper functions // static inline int nearest_int(float fval) { assert(fabsf(fval) <= 4194303.f); float val = fval + 12582912.f; int i; memcpy(&i, &val, sizeof(int)); return (i & 0x007fffff) - 0x00400000; } static float make_qx_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, int rmse_type, const float * GGML_RESTRICT qw) { float max = 0; float amax = 0; for (int i = 0; i < n; ++i) { float ax = fabsf(x[i]); if (ax > amax) { amax = ax; max = x[i]; } } if (amax < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } return 0.f; } float iscale = -nmax / max; if (rmse_type == 0) { for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); } return 1/iscale; } bool return_early = false; if (rmse_type < 0) { rmse_type = -rmse_type; return_early = true; } float sumlx = 0; float suml2 = 0; #ifdef HAVE_BUGGY_APPLE_LINKER // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 for (volatile int i = 0; i < n; ++i) { #else for (int i = 0; i < n; ++i) { #endif int l = nearest_int(iscale * x[i]); l = MAX(-nmax, MIN(nmax-1, l)); L[i] = l + nmax; float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); sumlx += w*x[i]*l; suml2 += w*l*l; } float scale = suml2 ? sumlx/suml2 : 0.0f; if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; float best = scale * sumlx; for (int is = -9; is <= 9; ++is) { if (is == 0) { continue; } iscale = -(nmax + 0.1f*is) / max; sumlx = suml2 = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = MAX(-nmax, MIN(nmax-1, l)); float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); sumlx += w*x[i]*l; suml2 += w*l*l; } if (suml2 > 0 && sumlx*sumlx > best*suml2) { for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); } scale = sumlx/suml2; best = scale*sumlx; } } return scale; } static float make_q3_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, bool do_rmse) { float max = 0; float amax = 0; for (int i = 0; i < n; ++i) { float ax = fabsf(x[i]); if (ax > amax) { amax = ax; max = x[i]; } } if (amax < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } return 0.f; } float iscale = -nmax / max; if (do_rmse) { float sumlx = 0; float suml2 = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = MAX(-nmax, MIN(nmax-1, l)); L[i] = l; float w = x[i]*x[i]; sumlx += w*x[i]*l; suml2 += w*l*l; } for (int itry = 0; itry < 5; ++itry) { int n_changed = 0; for (int i = 0; i < n; ++i) { float w = x[i]*x[i]; float slx = sumlx - w*x[i]*L[i]; if (slx > 0) { float sl2 = suml2 - w*L[i]*L[i]; int new_l = nearest_int(x[i] * sl2 / slx); new_l = MAX(-nmax, MIN(nmax-1, new_l)); if (new_l != L[i]) { slx += w*x[i]*new_l; sl2 += w*new_l*new_l; if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { L[i] = new_l; sumlx = slx; suml2 = sl2; ++n_changed; } } } } if (!n_changed) { break; } } for (int i = 0; i < n; ++i) { L[i] += nmax; } return suml2 > 0.0f ? sumlx / suml2 : 0.0f; } for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = MAX(-nmax, MIN(nmax-1, l)); L[i] = l + nmax; } return 1/iscale; } static float make_qkx1_quants(int n, int nmax, const float * GGML_RESTRICT x, uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, int ntry, float alpha) { float min = x[0]; float max = x[0]; for (int i = 1; i < n; ++i) { if (x[i] < min) min = x[i]; if (x[i] > max) max = x[i]; } if (max == min) { for (int i = 0; i < n; ++i) L[i] = 0; *the_min = 0; return 0.f; } if (min > 0) min = 0; float iscale = nmax/(max - min); float scale = 1/iscale; for (int itry = 0; itry < ntry; ++itry) { float sumlx = 0; int suml2 = 0; bool did_change = false; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); l = MAX(0, MIN(nmax, l)); if (l != L[i]) { L[i] = l; did_change = true; } sumlx += (x[i] - min)*l; suml2 += l*l; } scale = sumlx/suml2; float sum = 0; for (int i = 0; i < n; ++i) { sum += x[i] - scale*L[i]; } min = alpha*min + (1 - alpha)*sum/n; if (min > 0) min = 0; iscale = 1/scale; if (!did_change) break; } *the_min = -min; return scale; } static float make_qkx2_quants(int n, int nmax, const float * GGML_RESTRICT x, const float * GGML_RESTRICT weights, uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, uint8_t * GGML_RESTRICT Laux, float rmin, float rdelta, int nstep, bool use_mad) { float min = x[0]; float max = x[0]; float sum_w = weights[0]; float sum_x = sum_w * x[0]; #ifdef HAVE_BUGGY_APPLE_LINKER // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 for (volatile int i = 1; i < n; ++i) { #else for (int i = 1; i < n; ++i) { #endif if (x[i] < min) min = x[i]; if (x[i] > max) max = x[i]; float w = weights[i]; sum_w += w; sum_x += w * x[i]; } if (min > 0) min = 0; if (max == min) { for (int i = 0; i < n; ++i) L[i] = 0; *the_min = -min; return 0.f; } float iscale = nmax/(max - min); float scale = 1/iscale; float best_error = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); L[i] = MAX(0, MIN(nmax, l)); float diff = scale * L[i] + min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; best_error += w * diff; } if (nstep < 1) { *the_min = -min; return scale; } for (int is = 0; is <= nstep; ++is) { iscale = (rmin + rdelta*is + nmax)/(max - min); float sum_l = 0, sum_l2 = 0, sum_xl = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); l = MAX(0, MIN(nmax, l)); Laux[i] = l; float w = weights[i]; sum_l += w*l; sum_l2 += w*l*l; sum_xl += w*l*x[i]; } float D = sum_w * sum_l2 - sum_l * sum_l; if (D > 0) { float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; if (this_min > 0) { this_min = 0; this_scale = sum_xl / sum_l2; } float cur_error = 0; for (int i = 0; i < n; ++i) { float diff = this_scale * Laux[i] + this_min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; cur_error += w * diff; } if (cur_error < best_error) { for (int i = 0; i < n; ++i) { L[i] = Laux[i]; } best_error = cur_error; scale = this_scale; min = this_min; } } } *the_min = -min; return scale; } static inline void get_scale_min_k4(int j, const uint8_t * GGML_RESTRICT q, uint8_t * GGML_RESTRICT d, uint8_t * GGML_RESTRICT m) { if (j < 4) { *d = q[j] & 63; *m = q[j + 4] & 63; } else { *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } //========================- 2-bit (de)-quantization void quantize_row_q2_K_ref(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; uint8_t L[QK_K]; uint8_t Laux[16]; float weights[16]; float mins[QK_K/16]; float scales[QK_K/16]; const float q4scale = 15.f; for (int i = 0; i < nb; i++) { float max_scale = 0; // as we are deducting the min, scales are always positive float max_min = 0; for (int j = 0; j < QK_K/16; ++j) { for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]); scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true); float scale = scales[j]; if (scale > max_scale) { max_scale = scale; } float min = mins[j]; if (min > max_min) { max_min = min; } } if (max_scale > 0) { float iscale = q4scale/max_scale; for (int j = 0; j < QK_K/16; ++j) { int l = nearest_int(iscale*scales[j]); y[i].scales[j] = l; } y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale); } else { for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; y[i].d = GGML_FP32_TO_FP16(0.f); } if (max_min > 0) { float iscale = q4scale/max_min; for (int j = 0; j < QK_K/16; ++j) { int l = nearest_int(iscale*mins[j]); y[i].scales[j] |= (l << 4); } y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale); } else { y[i].dmin = GGML_FP32_TO_FP16(0.f); } for (int j = 0; j < QK_K/16; ++j) { const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF); if (!d) continue; const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4); for (int ii = 0; ii < 16; ++ii) { int l = nearest_int((x[16*j + ii] + dm)/d); l = MAX(0, MIN(3, l)); L[16*j + ii] = l; } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const float min = GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * q = x[i].qs; int is = 0; float dl, ml; for (int n = 0; n < QK_K; n += 128) { int shift = 0; for (int j = 0; j < 4; ++j) { uint8_t sc = x[i].scales[is++]; dl = d * (sc & 0xF); ml = min * (sc >> 4); for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; sc = x[i].scales[is++]; dl = d * (sc & 0xF); ml = min * (sc >> 4); for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; shift += 2; } q += 32; } } } static float make_qkx3_quants(int n, int nmax, const float * GGML_RESTRICT x, const float * GGML_RESTRICT weights, uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, uint8_t * GGML_RESTRICT Laux, float rmin, float rdelta, int nstep, bool use_mad) { float min = x[0]; float max = x[0]; float sum_w = weights ? weights[0] : x[0]*x[0]; float sum_x = sum_w * x[0]; #ifdef HAVE_BUGGY_APPLE_LINKER // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 for (volatile int i = 1; i < n; ++i) { #else for (int i = 1; i < n; ++i) { #endif if (x[i] < min) min = x[i]; if (x[i] > max) max = x[i]; float w = weights ? weights[i] : x[i]*x[i]; sum_w += w; sum_x += w * x[i]; } if (min > 0) { min = 0; } if (max <= min) { memset(L, 0, n); *the_min = -min; return 0.f; } float iscale = nmax/(max - min); float scale = 1/iscale; float best_mad = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); L[i] = MAX(0, MIN(nmax, l)); float diff = scale * L[i] + min - x[i]; diff = use_mad ? fabsf(diff) : diff*diff; float w = weights ? weights[i] : x[i]*x[i]; best_mad += w * diff; } if (nstep < 1) { *the_min = -min; return scale; } for (int is = 0; is <= nstep; ++is) { iscale = (rmin + rdelta*is + nmax)/(max - min); float sum_l = 0, sum_l2 = 0, sum_xl = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); l = MAX(0, MIN(nmax, l)); Laux[i] = l; float w = weights ? weights[i] : x[i]*x[i]; sum_l += w*l; sum_l2 += w*l*l; sum_xl += w*l*x[i]; } float D = sum_w * sum_l2 - sum_l * sum_l; if (D > 0) { float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; if (this_min > 0) { this_min = 0; this_scale = sum_xl / sum_l2; } float mad = 0; for (int i = 0; i < n; ++i) { float diff = this_scale * Laux[i] + this_min - x[i]; diff = use_mad ? fabsf(diff) : diff*diff; float w = weights ? weights[i] : x[i]*x[i]; mad += w * diff; } if (mad < best_mad) { for (int i = 0; i < n; ++i) { L[i] = Laux[i]; } best_mad = mad; scale = this_scale; min = this_min; } } } *the_min = -min; return scale; } static float make_qp_quants(int n, int nmax, const float * GGML_RESTRICT x, uint8_t * GGML_RESTRICT L, const float * quant_weights) { float max = 0; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); } if (max < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } return 0.f; } float iscale = nmax / max; for (int i = 0; i < n; ++i) { L[i] = nearest_int(iscale * x[i]); } float scale = 1/iscale; float best_mse = 0; for (int i = 0; i < n; ++i) { float diff = x[i] - scale*L[i]; float w = quant_weights[i]; best_mse += w*diff*diff; } for (int is = -4; is <= 4; ++is) { if (is == 0) continue; float iscale_is = (0.1f*is + nmax)/max; float scale_is = 1/iscale_is; float mse = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale_is*x[i]); l = MIN(nmax, l); float diff = x[i] - scale_is*l; float w = quant_weights[i]; mse += w*diff*diff; } if (mse < best_mse) { best_mse = mse; iscale = iscale_is; } } float sumlx = 0; float suml2 = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = MIN(nmax, l); L[i] = l; float w = quant_weights[i]; sumlx += w*x[i]*l; suml2 += w*l*l; } for (int itry = 0; itry < 5; ++itry) { int n_changed = 0; for (int i = 0; i < n; ++i) { float w = quant_weights[i]; float slx = sumlx - w*x[i]*L[i]; float sl2 = suml2 - w*L[i]*L[i]; if (slx > 0 && sl2 > 0) { int new_l = nearest_int(x[i] * sl2 / slx); new_l = MIN(nmax, new_l); if (new_l != L[i]) { slx += w*x[i]*new_l; sl2 += w*new_l*new_l; if (slx*slx*suml2 > sumlx*sumlx*sl2) { L[i] = new_l; sumlx = slx; suml2 = sl2; ++n_changed; } } } } if (!n_changed) { break; } } return suml2 > 0.0f ? sumlx / suml2 : 0.0f; } static void quantize_row_q2_K_impl(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int k, const float * GGML_RESTRICT quant_weights) { GGML_ASSERT(quant_weights); assert(k % QK_K == 0); const int nb = k / QK_K; const bool requantize = true; uint8_t L[QK_K]; uint8_t Laux[16]; float mins[QK_K/16]; float scales[QK_K/16]; float sw[QK_K/16]; float weight[16]; uint8_t Ls[QK_K/16], Lm[QK_K/16]; for (int i = 0; i < nb; i++) { memset(sw, 0, QK_K/16*sizeof(float)); float sumx2 = 0; for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j]; float sigma2 = sumx2/QK_K; for (int j = 0; j < QK_K/16; ++j) { const float * GGML_RESTRICT qw = quant_weights + QK_K * i + 16*j; for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]); for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l]; scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); } float dm, mm; dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw); mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw); y[i].d = GGML_FP32_TO_FP16(dm); y[i].dmin = GGML_FP32_TO_FP16(mm); dm = GGML_FP16_TO_FP32(y[i].d); mm = GGML_FP16_TO_FP32(y[i].dmin); for (int j = 0; j < QK_K/16; ++j) { y[i].scales[j] = Ls[j] | (Lm[j] << 4); } if (requantize) { for (int j = 0; j < QK_K/16; ++j) { const float d = dm * (y[i].scales[j] & 0xF); if (!d) continue; const float m = mm * (y[i].scales[j] >> 4); for (int ii = 0; ii < 16; ++ii) { int l = nearest_int((x[16*j + ii] + m)/d); l = MAX(0, MIN(3, l)); L[16*j + ii] = l; } } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row); if (!quant_weights) { quantize_row_q2_K_ref(src, dst, (int64_t)nrow*n_per_row); } else { char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } } return nrow * row_size; } //========================= 3-bit (de)-quantization void quantize_row_q3_K_ref(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; int8_t L[QK_K]; float scales[QK_K / 16]; for (int i = 0; i < nb; i++) { float max_scale = 0; float amax = 0; for (int j = 0; j < QK_K/16; ++j) { scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true); float scale = fabsf(scales[j]); if (scale > amax) { amax = scale; max_scale = scales[j]; } } memset(y[i].scales, 0, 12); if (max_scale) { float iscale = -32.f/max_scale; for (int j = 0; j < QK_K/16; ++j) { int8_t l = nearest_int(iscale*scales[j]); l = MAX(-32, MIN(31, l)) + 32; if (j < 8) { y[i].scales[j] = l & 0xF; } else { y[i].scales[j-8] |= ((l & 0xF) << 4); } l >>= 4; y[i].scales[j%4 + 8] |= (l << (2*(j/4))); } y[i].d = GGML_FP32_TO_FP16(1/iscale); } else { y[i].d = GGML_FP32_TO_FP16(0.f); } int8_t sc; for (int j = 0; j < QK_K/16; ++j) { sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) { continue; } for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); l = MAX(-4, MIN(3, l)); L[16*j + ii] = l + 4; } } memset(y[i].hmask, 0, QK_K/8); // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. int m = 0; uint8_t hm = 1; for (int j = 0; j < QK_K; ++j) { if (L[j] > 3) { y[i].hmask[m] |= hm; L[j] -= 4; } if (++m == QK_K/8) { m = 0; hm <<= 1; } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; uint32_t aux[4]; const int8_t * scales = (const int8_t*)aux; for (int i = 0; i < nb; i++) { const float d_all = GGML_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q = x[i].qs; const uint8_t * GGML_RESTRICT hm = x[i].hmask; uint8_t m = 1; memcpy(aux, x[i].scales, 12); uint32_t tmp = aux[2]; aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); int is = 0; float dl; for (int n = 0; n < QK_K; n += 128) { int shift = 0; for (int j = 0; j < 4; ++j) { dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); } dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); } shift += 2; m <<= 1; } q += 32; } } } static void quantize_row_q3_K_impl(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t n_per_row, const float * GGML_RESTRICT quant_weights) { assert(n_per_row % QK_K == 0); const int nb = n_per_row / QK_K; int8_t L[QK_K]; float scales[QK_K / 16]; float weight[16]; float sw[QK_K / 16]; int8_t Ls[QK_K / 16]; for (int i = 0; i < nb; i++) { float sumx2 = 0; for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j]; float sigma2 = 2*sumx2/QK_K; for (int j = 0; j < QK_K/16; ++j) { if (quant_weights) { const float * qw = quant_weights + QK_K * i + 16*j; for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]); } else { for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l]; } float sumw = 0; for (int l = 0; l < 16; ++l) sumw += weight[l]; sw[j] = sumw; scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight); } memset(y[i].scales, 0, 12); float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw); for (int j = 0; j < QK_K/16; ++j) { int l = Ls[j]; if (j < 8) { y[i].scales[j] = l & 0xF; } else { y[i].scales[j-8] |= ((l & 0xF) << 4); } l >>= 4; y[i].scales[j%4 + 8] |= (l << (2*(j/4))); } y[i].d = GGML_FP32_TO_FP16(d_block); int8_t sc; for (int j = 0; j < QK_K/16; ++j) { sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) { continue; } for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); l = MAX(-4, MIN(3, l)); L[16*j + ii] = l + 4; } } memset(y[i].hmask, 0, QK_K/8); // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. int m = 0; uint8_t hm = 1; for (int j = 0; j < QK_K; ++j) { if (L[j] > 3) { y[i].hmask[m] |= hm; L[j] -= 4; } if (++m == QK_K/8) { m = 0; hm <<= 1; } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row); if (!quant_weights) { quantize_row_q3_K_ref(src, dst, (int64_t)nrow*n_per_row); } else { char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } } return nrow * row_size; } // ====================== 4-bit (de)-quantization void quantize_row_q4_K_ref(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; uint8_t L[QK_K]; uint8_t Laux[32]; float weights[32]; float mins[QK_K/32]; float scales[QK_K/32]; for (int i = 0; i < nb; i++) { float max_scale = 0; // as we are deducting the min, scales are always positive float max_min = 0; for (int j = 0; j < QK_K/32; ++j) { //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); float sum_x2 = 0; for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; float av_x = sqrtf(sum_x2/32); for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false); float scale = scales[j]; if (scale > max_scale) { max_scale = scale; } float min = mins[j]; if (min > max_min) { max_min = min; } } float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; float inv_min = max_min > 0 ? 63.f/max_min : 0.f; for (int j = 0; j < QK_K/32; ++j) { uint8_t ls = nearest_int(inv_scale*scales[j]); uint8_t lm = nearest_int(inv_min*mins[j]); ls = MIN(63, ls); lm = MIN(63, lm); if (j < 4) { y[i].scales[j] = ls; y[i].scales[j+4] = lm; } else { y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); y[i].scales[j-4] |= ((ls >> 4) << 6); y[i].scales[j-0] |= ((lm >> 4) << 6); } } y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(15, l)); L[32*j + ii] = l; } } uint8_t * q = y[i].qs; for (int j = 0; j < QK_K; j += 64) { for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); q += 32; } x += QK_K; } } void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; for (int i = 0; i < nb; i++) { const uint8_t * q = x[i].qs; const float d = GGML_FP16_TO_FP32(x[i].d); const float min = GGML_FP16_TO_FP32(x[i].dmin); int is = 0; uint8_t sc, m; for (int j = 0; j < QK_K; j += 64) { get_scale_min_k4(is + 0, x[i].scales, &sc, &m); const float d1 = d * sc; const float m1 = min * m; get_scale_min_k4(is + 1, x[i].scales, &sc, &m); const float d2 = d * sc; const float m2 = min * m; for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1; for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2; q += 32; is += 2; } } } static void quantize_row_q4_K_impl(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { assert(n_per_row % QK_K == 0); const int64_t nb = n_per_row / QK_K; uint8_t L[QK_K]; uint8_t Laux[32]; uint8_t Ls[QK_K/32]; uint8_t Lm[QK_K/32]; float weights[32]; float sw[QK_K/32]; float mins[QK_K/32]; float scales[QK_K/32]; for (int i = 0; i < nb; i++) { float sum_x2 = 0; for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l]; float sigma2 = 2*sum_x2/QK_K; float av_x = sqrtf(sigma2); for (int j = 0; j < QK_K/32; ++j) { if (quant_weights) { const float * qw = quant_weights + QK_K*i + 32*j; for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]); } else { for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); } float sumw = 0; for (int l = 0; l < 32; ++l) sumw += weights[l]; sw[j] = sumw; scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); } float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw); float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw); for (int j = 0; j < QK_K/32; ++j) { uint8_t ls = Ls[j]; uint8_t lm = Lm[j]; if (j < 4) { y[i].scales[j] = ls; y[i].scales[j+4] = lm; } else { y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); y[i].scales[j-4] |= ((ls >> 4) << 6); y[i].scales[j-0] |= ((lm >> 4) << 6); } } y[i].d = GGML_FP32_TO_FP16(d_block); y[i].dmin = GGML_FP32_TO_FP16(m_block); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(15, l)); L[32*j + ii] = l; } } uint8_t * q = y[i].qs; for (int j = 0; j < QK_K; j += 64) { for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); q += 32; } x += QK_K; } } size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row); if (!quant_weights) { quantize_row_q4_K_ref(src, dst, (int64_t)nrow*n_per_row); } else { char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } } return nrow * row_size; } // ====================== 5-bit (de)-quantization void quantize_row_q5_K_ref(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; uint8_t L[QK_K]; float mins[QK_K/32]; float scales[QK_K/32]; float weights[32]; uint8_t Laux[32]; for (int i = 0; i < nb; i++) { float max_scale = 0; // as we are deducting the min, scales are always positive float max_min = 0; for (int j = 0; j < QK_K/32; ++j) { //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); float sum_x2 = 0; for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; float av_x = sqrtf(sum_x2/32); for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false); float scale = scales[j]; if (scale > max_scale) { max_scale = scale; } float min = mins[j]; if (min > max_min) { max_min = min; } } float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; float inv_min = max_min > 0 ? 63.f/max_min : 0.f; for (int j = 0; j < QK_K/32; ++j) { uint8_t ls = nearest_int(inv_scale*scales[j]); uint8_t lm = nearest_int(inv_min*mins[j]); ls = MIN(63, ls); lm = MIN(63, lm); if (j < 4) { y[i].scales[j] = ls; y[i].scales[j+4] = lm; } else { y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); y[i].scales[j-4] |= ((ls >> 4) << 6); y[i].scales[j-0] |= ((lm >> 4) << 6); } } y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(31, l)); L[32*j + ii] = l; } } uint8_t * GGML_RESTRICT qh = y[i].qh; uint8_t * GGML_RESTRICT ql = y[i].qs; memset(qh, 0, QK_K/8); uint8_t m1 = 1, m2 = 2; for (int n = 0; n < QK_K; n += 64) { for (int j = 0; j < 32; ++j) { int l1 = L[n + j]; if (l1 > 15) { l1 -= 16; qh[j] |= m1; } int l2 = L[n + j + 32]; if (l2 > 15) { l2 -= 16; qh[j] |= m2; } ql[j] = l1 | (l2 << 4); } m1 <<= 2; m2 <<= 2; ql += 32; } x += QK_K; } } void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { const uint8_t * ql = x[i].qs; const uint8_t * qh = x[i].qh; const float d = GGML_FP16_TO_FP32(x[i].d); const float min = GGML_FP16_TO_FP32(x[i].dmin); int is = 0; uint8_t sc, m; uint8_t u1 = 1, u2 = 2; for (int j = 0; j < QK_K; j += 64) { get_scale_min_k4(is + 0, x[i].scales, &sc, &m); const float d1 = d * sc; const float m1 = min * m; get_scale_min_k4(is + 1, x[i].scales, &sc, &m); const float d2 = d * sc; const float m2 = min * m; for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; ql += 32; is += 2; u1 <<= 2; u2 <<= 2; } } } static void quantize_row_q5_K_impl(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { assert(n_per_row % QK_K == 0); const int64_t nb = n_per_row / QK_K; uint8_t L[QK_K]; uint8_t Laux[32]; uint8_t Ls[QK_K/32]; uint8_t Lm[QK_K/32]; float mins[QK_K/32]; float scales[QK_K/32]; float sw[QK_K/32]; float weights[32]; for (int i = 0; i < nb; i++) { float sum_x2 = 0; for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l]; float sigma2 = 2*sum_x2/QK_K; float av_x = sqrtf(sigma2); for (int j = 0; j < QK_K/32; ++j) { if (quant_weights) { const float * qw = quant_weights + QK_K*i + 32*j; for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]); } else { for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); } float sumw = 0; for (int l = 0; l < 32; ++l) sumw += weights[l]; sw[j] = sumw; scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false); } float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw); float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw); for (int j = 0; j < QK_K/32; ++j) { uint8_t ls = Ls[j]; uint8_t lm = Lm[j]; ls = MIN(63, ls); lm = MIN(63, lm); if (j < 4) { y[i].scales[j] = ls; y[i].scales[j+4] = lm; } else { y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); y[i].scales[j-4] |= ((ls >> 4) << 6); y[i].scales[j-0] |= ((lm >> 4) << 6); } } y[i].d = GGML_FP32_TO_FP16(d_block); y[i].dmin = GGML_FP32_TO_FP16(m_block); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(31, l)); L[32*j + ii] = l; } } uint8_t * GGML_RESTRICT qh = y[i].qh; uint8_t * GGML_RESTRICT ql = y[i].qs; memset(qh, 0, QK_K/8); uint8_t m1 = 1, m2 = 2; for (int n = 0; n < QK_K; n += 64) { for (int j = 0; j < 32; ++j) { int l1 = L[n + j]; if (l1 > 15) { l1 -= 16; qh[j] |= m1; } int l2 = L[n + j + 32]; if (l2 > 15) { l2 -= 16; qh[j] |= m2; } ql[j] = l1 | (l2 << 4); } m1 <<= 2; m2 <<= 2; ql += 32; } x += QK_K; } } size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row); if (!quant_weights) { quantize_row_q5_K_ref(src, dst, (int64_t)nrow*n_per_row); } else { char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } } return nrow * row_size; } // ====================== 6-bit (de)-quantization void quantize_row_q6_K_ref(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; int8_t L[QK_K]; float scales[QK_K/16]; for (int i = 0; i < nb; i++) { float max_scale = 0; float max_abs_scale = 0; for (int ib = 0; ib < QK_K/16; ++ib) { const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL); scales[ib] = scale; const float abs_scale = fabsf(scale); if (abs_scale > max_abs_scale) { max_abs_scale = abs_scale; max_scale = scale; } } if (max_abs_scale < GROUP_MAX_EPS) { memset(&y[i], 0, sizeof(block_q6_K)); y[i].d = GGML_FP32_TO_FP16(0.f); x += QK_K; continue; } float iscale = -128.f/max_scale; y[i].d = GGML_FP32_TO_FP16(1/iscale); for (int ib = 0; ib < QK_K/16; ++ib) { y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); } for (int j = 0; j < QK_K/16; ++j) { float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; if (!d) { continue; } for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); l = MAX(-32, MIN(31, l)); L[16*j + ii] = l + 32; } } uint8_t * GGML_RESTRICT ql = y[i].ql; uint8_t * GGML_RESTRICT qh = y[i].qh; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { const uint8_t q1 = L[j + l + 0] & 0xF; const uint8_t q2 = L[j + l + 32] & 0xF; const uint8_t q3 = L[j + l + 64] & 0xF; const uint8_t q4 = L[j + l + 96] & 0xF; ql[l+ 0] = q1 | (q3 << 4); ql[l+32] = q2 | (q4 << 4); qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); } ql += 64; qh += 32; } x += QK_K; } } void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT ql = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; const int8_t * GGML_RESTRICT sc = x[i].scales; for (int n = 0; n < QK_K; n += 128) { for (int l = 0; l < 32; ++l) { int is = l/16; const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; y[l + 0] = d * sc[is + 0] * q1; y[l + 32] = d * sc[is + 2] * q2; y[l + 64] = d * sc[is + 4] * q3; y[l + 96] = d * sc[is + 6] * q4; } y += 128; ql += 64; qh += 32; sc += 8; } } } static void quantize_row_q6_K_impl(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { assert(n_per_row % QK_K == 0); const int64_t nb = n_per_row / QK_K; int8_t L[QK_K]; float scales[QK_K/16]; //float weights[16]; for (int i = 0; i < nb; i++) { //float sum_x2 = 0; //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j]; //float sigma2 = sum_x2/QK_K; float max_scale = 0; float max_abs_scale = 0; for (int ib = 0; ib < QK_K/16; ++ib) { float scale; if (quant_weights) { const float * qw = quant_weights + QK_K*i + 16*ib; //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]); //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights); scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw); } else { scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL); } scales[ib] = scale; const float abs_scale = fabsf(scale); if (abs_scale > max_abs_scale) { max_abs_scale = abs_scale; max_scale = scale; } } if (max_abs_scale < GROUP_MAX_EPS) { memset(&y[i], 0, sizeof(block_q6_K)); y[i].d = GGML_FP32_TO_FP16(0.f); x += QK_K; continue; } float iscale = -128.f/max_scale; y[i].d = GGML_FP32_TO_FP16(1/iscale); for (int ib = 0; ib < QK_K/16; ++ib) { y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); } for (int j = 0; j < QK_K/16; ++j) { float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; if (!d) { continue; } for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); l = MAX(-32, MIN(31, l)); L[16*j + ii] = l + 32; } } uint8_t * GGML_RESTRICT ql = y[i].ql; uint8_t * GGML_RESTRICT qh = y[i].qh; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { const uint8_t q1 = L[j + l + 0] & 0xF; const uint8_t q2 = L[j + l + 32] & 0xF; const uint8_t q3 = L[j + l + 64] & 0xF; const uint8_t q4 = L[j + l + 96] & 0xF; ql[l+ 0] = q1 | (q3 << 4); ql[l+32] = q2 | (q4 << 4); qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); } ql += 64; qh += 32; } x += QK_K; } } size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row); if (!quant_weights) { quantize_row_q6_K_ref(src, dst, (int64_t)nrow*n_per_row); } else { char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } } return nrow * row_size; } static void quantize_row_q4_0_impl(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { static_assert(QK4_0 == 32, "QK4_0 must be 32"); if (!quant_weights) { quantize_row_q4_0_ref(x, y, n_per_row); return; } float weight[QK4_0]; int8_t L[QK4_0]; float sum_x2 = 0; for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; float sigma2 = sum_x2/n_per_row; const int64_t nb = n_per_row/QK4_0; for (int ib = 0; ib < nb; ++ib) { const float * xb = x + QK4_0 * ib; const float * qw = quant_weights + QK4_0 * ib; for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight); y[ib].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < 16; ++j) { y[ib].qs[j] = L[j] | (L[j+16] << 4); } } } size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { if (!quant_weights) { quantize_row_q4_0_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * ggml_row_size(GGML_TYPE_Q4_0, n_per_row); } size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row); char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } return nrow * row_size; } static void quantize_row_q4_1_impl(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { static_assert(QK4_1 == 32, "QK4_1 must be 32"); if (!quant_weights) { quantize_row_q4_1_ref(x, y, n_per_row); return; } float weight[QK4_1]; uint8_t L[QK4_1], Laux[QK4_1]; float sum_x2 = 0; for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; float sigma2 = sum_x2/n_per_row; const int64_t nb = n_per_row/QK4_1; for (int ib = 0; ib < nb; ++ib) { const float * xb = x + QK4_1 * ib; const float * qw = quant_weights + QK4_1 * ib; for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); float min; float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false); y[ib].d = GGML_FP32_TO_FP16(d); y[ib].m = GGML_FP32_TO_FP16(-min); for (int j = 0; j < 16; ++j) { y[ib].qs[j] = L[j] | (L[j+16] << 4); } } } size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { if (!quant_weights) { quantize_row_q4_1_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * ggml_row_size(GGML_TYPE_Q4_1, n_per_row); } size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row); char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } return nrow * row_size; } static void quantize_row_q5_0_impl(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { static_assert(QK5_0 == 32, "QK5_0 must be 32"); if (!quant_weights) { quantize_row_q5_0_ref(x, y, n_per_row); return; } float weight[QK5_0]; int8_t L[QK5_0]; float sum_x2 = 0; for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; float sigma2 = sum_x2/n_per_row; const int64_t nb = n_per_row/QK5_0; for (int ib = 0; ib < nb; ++ib) { const float * xb = x + QK5_0 * ib; const float * qw = quant_weights + QK5_0 * ib; for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight); y[ib].d = GGML_FP32_TO_FP16(d); uint32_t qh = 0; for (int j = 0; j < 16; ++j) { const uint8_t xi0 = L[j]; const uint8_t xi1 = L[j+16]; y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); } memcpy(&y[ib].qh, &qh, sizeof(qh)); } } size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { if (!quant_weights) { quantize_row_q5_0_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * ggml_row_size(GGML_TYPE_Q5_0, n_per_row); } size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row); char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } return nrow * row_size; } static void quantize_row_q5_1_impl(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t n_per_row, const float * quant_weights) { static_assert(QK5_1 == 32, "QK5_1 must be 32"); if (!quant_weights) { quantize_row_q5_1_ref(x, y, n_per_row); return; } float weight[QK5_1]; uint8_t L[QK5_1], Laux[QK5_1]; float sum_x2 = 0; for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j]; float sigma2 = sum_x2/n_per_row; const int64_t nb = n_per_row/QK5_1; for (int ib = 0; ib < nb; ++ib) { const float * xb = x + QK5_1 * ib; const float * qw = quant_weights + QK5_1 * ib; for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); float min; float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false); y[ib].d = GGML_FP32_TO_FP16(d); y[ib].m = GGML_FP32_TO_FP16(-min); uint32_t qh = 0; for (int j = 0; j < 16; ++j) { const uint8_t xi0 = L[j]; const uint8_t xi1 = L[j+16]; y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); } memcpy(&y[ib].qh, &qh, sizeof(qh)); } } size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { if (!quant_weights) { quantize_row_q5_1_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * ggml_row_size(GGML_TYPE_Q5_1, n_per_row); } size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row); char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights); src += n_per_row; qrow += row_size; } return nrow * row_size; } size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { (void)quant_weights; // not used const size_t row_size = ggml_row_size(GGML_TYPE_Q8_0, n_per_row); quantize_row_q8_0_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * row_size; } size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_UNUSED(quant_weights); quantize_row_mxfp4_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * ggml_row_size(GGML_TYPE_MXFP4, n_per_row); } // ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) void quantize_row_tq1_0_ref(const float * GGML_RESTRICT x, block_tq1_0 * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int64_t i = 0; i < nb; i++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK_K; j++) { const float v = x[j]; amax = MAX(amax, fabsf(v)); } const float d = amax; const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); // 5 elements per byte, along 32 bytes for (size_t j = 0; j < sizeof(y->qs) - sizeof(y->qs) % 32; j += 32) { for (size_t m = 0; m < 32; ++m) { uint8_t q = 0; for (size_t n = 0; n < 5; ++n) { int xi = lroundf(x[m + n*32] * id) + 1; // -1, 0, 1 -> 0, 1, 2 q *= 3; q += xi; } // ceiling division (243 == pow(3, 5)) q = ((uint16_t)q * 256 + (243 - 1)) / 243; y[i].qs[j + m] = q; } x += 5*32; } // along 16 bytes for (size_t j = sizeof(y->qs) - sizeof(y->qs) % 32; j < sizeof(y->qs); j += 16) { for (size_t m = 0; m < 16; ++m) { uint8_t q = 0; for (size_t n = 0; n < 5; ++n) { int xi = lroundf(x[m + n*16] * id) + 1; // -1, 0, 1 -> 0, 1, 2 q *= 3; q += xi; } // ceiling division (243 == pow(3, 5)) q = ((uint16_t)q * 256 + (243 - 1)) / 243; y[i].qs[j + m] = q; } x += 5*16; } // 4 elements per byte for (size_t j = 0; j < sizeof(y->qh); ++j) { uint8_t q = 0; for (size_t m = 0; m < 4; ++m) { // -1, 0, 1 -> 0, 1, 2 int xi = lroundf(x[j + m*sizeof(y->qh)] * id) + 1; q *= 3; q += xi; } // shift the first value to the most significant trit q *= 3; // ceiling division (243 == pow(3, 5)) q = ((uint16_t)q * 256 + (243 - 1)) / 243; y[i].qh[j] = q; } x += 4*sizeof(y->qh); } } void quantize_row_tq2_0_ref(const float * GGML_RESTRICT x, block_tq2_0 * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int64_t i = 0; i < nb; i++) { float amax = 0.0f; // absolute max for (int j = 0; j < QK_K; j++) { const float v = x[j]; amax = MAX(amax, fabsf(v)); } const float d = amax; const float id = d ? 1.0f/d : 0.0f; y[i].d = GGML_FP32_TO_FP16(d); for (size_t j = 0; j < sizeof(y->qs); j += 32) { for (size_t m = 0; m < 32; ++m) { uint8_t q = 0; for (size_t n = 0; n < 4; ++n) { // -1, 0, 1 -> 0, 1, 2 int xi = lroundf(x[m + n*32] * id) + 1; q += (xi & 3) << (2*n); } y[i].qs[j + m] = q; } x += 4*32; } } } size_t quantize_tq1_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { (void)quant_weights; // not used const size_t row_size = ggml_row_size(GGML_TYPE_TQ1_0, n_per_row); quantize_row_tq1_0_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * row_size; } size_t quantize_tq2_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { (void)quant_weights; // not used const size_t row_size = ggml_row_size(GGML_TYPE_TQ2_0, n_per_row); quantize_row_tq2_0_ref(src, dst, (int64_t)nrow*n_per_row); return nrow * row_size; } void dequantize_row_tq1_0(const block_tq1_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; for (int64_t i = 0; i < nb; ++i) { const float d = GGML_FP16_TO_FP32(x[i].d); for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { for (size_t n = 0; n < 5; ++n) { for (size_t m = 0; m < 32; ++m) { uint8_t q = x[i].qs[j + m] * pow3[n]; int16_t xi = ((uint16_t) q * 3) >> 8; *y++ = (float) (xi - 1) * d; } } } for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { for (size_t n = 0; n < 5; ++n) { for (size_t m = 0; m < 16; ++m) { uint8_t q = x[i].qs[j + m] * pow3[n]; int16_t xi = ((uint16_t) q * 3) >> 8; *y++ = (float) (xi - 1) * d; } } } for (size_t n = 0; n < 4; ++n) { for (size_t j = 0; j < sizeof(x->qh); ++j) { uint8_t q = x[i].qh[j] * pow3[n]; int16_t xi = ((uint16_t) q * 3) >> 8; *y++ = (float) (xi - 1) * d; } } } } void dequantize_row_tq2_0(const block_tq2_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int64_t i = 0; i < nb; ++i) { const float d = GGML_FP16_TO_FP32(x[i].d); for (size_t j = 0; j < sizeof(x->qs); j += 32) { for (size_t l = 0; l < 4; ++l) { for (size_t m = 0; m < 32; ++m) { int8_t q = (x[i].qs[j + m] >> (l*2)) & 3; *y++ = (float) (q - 1) * d; } } } } } // ====================== "True" 2-bit (de)-quantization void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; uint32_t aux32[2]; const uint8_t * aux8 = (const uint8_t *)aux32; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t)); const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f; for (int l = 0; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; for (int j = 0; j < 8; ++j) { y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } y += 8; } } } } // ====================== 2.3125 bpw (de)-quantization void dequantize_row_iq2_xs(const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; float db[2]; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f; db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f; for (int l = 0; l < 4; ++l) { const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511)); const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9]; for (int j = 0; j < 8; ++j) { y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); } y += 8; } } } } // ====================== 2.5625 bpw (de)-quantization void dequantize_row_iq2_s(const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; float db[2]; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint8_t * signs = qs + QK_K/8; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f; db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f; for (int l = 0; l < 4; ++l) { const float dl = db[l/2]; const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); for (int j = 0; j < 8; ++j) { y[j] = dl * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1.f : 1.f); } y += 8; } qs += 4; signs += 4; } } } // ====================== 3.0625 bpw (de)-quantization void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; uint32_t aux32; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * qs = x[i].qs; const uint8_t * scales_and_signs = qs + QK_K/4; for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t)); const float db = d * (0.5f + (aux32 >> 28)) * 0.5f; for (int l = 0; l < 4; ++l) { const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]); const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]); for (int j = 0; j < 4; ++j) { y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } y += 8; } qs += 8; } } } // ====================== 3.3125 bpw (de)-quantization void dequantize_row_iq3_s(const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; const uint8_t * signs = x[i].signs; for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { const float db1 = d * (1 + 2*(x[i].scales[ib32/2] & 0xf)); const float db2 = d * (1 + 2*(x[i].scales[ib32/2] >> 4)); for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f); } y += 8; } qs += 8; signs += 4; for (int l = 0; l < 4; ++l) { const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256))); for (int j = 0; j < 4; ++j) { y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f); } y += 8; } qh += 2; qs += 8; signs += 4; } } } // ====================== 1.5625 bpw (de)-quantization void dequantize_row_iq1_s(const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * qs = x[i].qs; const uint16_t * qh = x[i].qh; for (int ib = 0; ib < QK_K/32; ++ib) { const float dl = d * (2*((qh[ib] >> 12) & 7) + 1); const float delta = qh[ib] & 0x8000 ? -IQ1S_DELTA : IQ1S_DELTA; for (int l = 0; l < 4; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); for (int j = 0; j < 8; ++j) { y[j] = dl * (grid[j] + delta); } y += 8; } qs += 4; } } } void dequantize_row_iq1_m(const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; float delta[4]; uint16_t idx[4]; iq1m_scale_t scale; for (int i = 0; i < nb; i++) { const uint16_t * sc = (const uint16_t *)x[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); const float d = GGML_FP16_TO_FP32(scale.f16); const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; for (int ib = 0; ib < QK_K/32; ++ib) { const float dl1 = d * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1); const float dl2 = d * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1); idx[0] = qs[0] | ((qh[0] << 8) & 0x700); idx[1] = qs[1] | ((qh[0] << 4) & 0x700); idx[2] = qs[2] | ((qh[1] << 8) & 0x700); idx[3] = qs[3] | ((qh[1] << 4) & 0x700); delta[0] = qh[0] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA; delta[1] = qh[0] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA; delta[2] = qh[1] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA; delta[3] = qh[1] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA; for (int l = 0; l < 2; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]); for (int j = 0; j < 8; ++j) { y[j] = dl1 * (grid[j] + delta[l]); } y += 8; } for (int l = 2; l < 4; ++l) { const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]); for (int j = 0; j < 8; ++j) { y[j] = dl2 * (grid[j] + delta[l]); } y += 8; } qs += 4; qh += 2; } } } void dequantize_row_iq4_nl(const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK4_NL == 0); const int64_t nb = k / QK4_NL; for (int i = 0; i < nb; i++) { const uint8_t * qs = x[i].qs; const float d = GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < QK4_NL/2; ++j) { y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf]; y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4]; } y += QK4_NL; qs += QK4_NL/2; } } void dequantize_row_iq4_xs(const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { const uint8_t * qs = x[i].qs; const float d = GGML_FP16_TO_FP32(x[i].d); for (int ib = 0; ib < QK_K/32; ++ib) { const int ls = ((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4); const float dl = d * (ls - 32); for (int j = 0; j < 16; ++j) { y[j+ 0] = dl * kvalues_iq4nl[qs[j] & 0xf]; y[j+16] = dl * kvalues_iq4nl[qs[j] >> 4]; } y += 32; qs += 16; } } } //===================================== Q8_K ============================================== void quantize_row_q8_K_ref(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { float max = 0; float amax = 0; for (int j = 0; j < QK_K; ++j) { float ax = fabsf(x[j]); if (ax > amax) { amax = ax; max = x[j]; } } if (!amax) { y[i].d = 0; memset(y[i].qs, 0, QK_K); x += QK_K; continue; } //const float iscale = -128.f/max; // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward const float iscale = -127.f/max; for (int j = 0; j < QK_K; ++j) { int v = nearest_int(iscale*x[j]); y[i].qs[j] = MIN(127, v); } for (int j = 0; j < QK_K/16; ++j) { int sum = 0; for (int ii = 0; ii < 16; ++ii) { sum += y[i].qs[j*16 + ii]; } y[i].bsums[j] = sum; } y[i].d = 1/iscale; x += QK_K; } } void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { for (int j = 0; j < QK_K; ++j) { *y++ = x[i].d * x[i].qs[j]; } } } // ================================ IQ2 quantization ============================================= typedef struct { uint64_t * grid; int * map; uint16_t * neighbours; } iq2_entry_t; static iq2_entry_t iq2_data[4] = { {NULL, NULL, NULL}, {NULL, NULL, NULL}, {NULL, NULL, NULL}, {NULL, NULL, NULL}, }; static inline int iq2_data_index(enum ggml_type type) { GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S); return type == GGML_TYPE_IQ2_XXS ? 0 : type == GGML_TYPE_IQ2_XS ? 1 : type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 2 : 3; } static inline int iq2_grid_size(enum ggml_type type) { GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S); return type == GGML_TYPE_IQ2_XXS ? 256 : type == GGML_TYPE_IQ2_XS ? 512 : type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? NGRID_IQ1S : 1024; } static int iq2_compare_func(const void * left, const void * right) { const int * l = (const int *)left; const int * r = (const int *)right; return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0; } void iq2xs_init_impl(enum ggml_type type) { const int gindex = iq2_data_index(type); const int grid_size = iq2_grid_size(type); if (iq2_data[gindex].grid) { return; } static const uint16_t kgrid_2bit_256[256] = { 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97, 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642, 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288, 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113, 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240, 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400, 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260, 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872, 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516, 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561, 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488, 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545, 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874, 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856, 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142, 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268, }; static const uint16_t kgrid_2bit_512[512] = { 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70, 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257, 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340, 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597, 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096, 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348, 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065, 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441, 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160, 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372, 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125, 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652, 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197, 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549, 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894, 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480, 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773, 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473, 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436, 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497, 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162, 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528, 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745, 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234, 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025, 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810, 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984, 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462, 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960, 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048, 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690, }; static const uint16_t kgrid_1bit_2048[NGRID_IQ1S] = { 0, 2, 5, 8, 10, 17, 21, 32, 34, 40, 42, 69, 81, 84, 86, 101, 128, 130, 136, 138, 149, 160, 162, 168, 170, 260, 261, 273, 276, 278, 281, 282, 293, 321, 326, 329, 338, 341, 346, 353, 356, 358, 360, 389, 401, 404, 406, 421, 512, 514, 520, 522, 533, 544, 546, 552, 554, 581, 593, 601, 612, 617, 640, 642, 648, 650, 657, 661, 665, 672, 674, 680, 682, 1041, 1044, 1046, 1061, 1089, 1097, 1109, 1114, 1124, 1125, 1169, 1177, 1189, 1281, 1284, 1285, 1286, 1301, 1304, 1306, 1321, 1344, 1349, 1354, 1360, 1361, 1364, 1365, 1366, 1369, 1376, 1378, 1381, 1384, 1386, 1409, 1425, 1429, 1432, 1434, 1441, 1444, 1445, 1446, 1449, 1556, 1561, 1601, 1604, 1616, 1618, 1621, 1624, 1632, 1633, 1638, 1641, 1669, 1681, 1684, 1689, 2048, 2050, 2056, 2058, 2069, 2080, 2082, 2088, 2090, 2117, 2129, 2134, 2149, 2176, 2178, 2184, 2186, 2197, 2208, 2210, 2216, 2218, 2309, 2321, 2324, 2329, 2340, 2341, 2369, 2384, 2385, 2389, 2401, 2404, 2409, 2449, 2452, 2454, 2457, 2469, 2560, 2562, 2568, 2570, 2581, 2592, 2594, 2600, 2602, 2629, 2641, 2649, 2657, 2661, 2688, 2690, 2693, 2696, 2698, 2709, 2720, 2722, 2728, 2730, 4112, 4113, 4116, 4121, 4132, 4133, 4161, 4164, 4176, 4181, 4184, 4193, 4196, 4197, 4201, 4241, 4244, 4246, 4257, 4261, 4353, 4356, 4358, 4361, 4368, 4370, 4373, 4376, 4385, 4388, 4393, 4421, 4426, 4432, 4433, 4434, 4436, 4437, 4438, 4441, 4448, 4453, 4484, 4498, 4501, 4513, 4516, 4625, 4628, 4630, 4645, 4672, 4678, 4681, 4690, 4693, 4696, 4698, 4708, 4710, 4741, 4753, 4756, 4758, 4773, 5121, 5126, 5129, 5140, 5141, 5144, 5145, 5153, 5158, 5185, 5189, 5190, 5192, 5194, 5201, 5204, 5205, 5206, 5209, 5218, 5221, 5224, 5252, 5257, 5264, 5268, 5269, 5272, 5273, 5274, 5281, 5284, 5285, 5289, 5378, 5381, 5386, 5393, 5396, 5397, 5398, 5401, 5408, 5410, 5413, 5416, 5418, 5441, 5444, 5445, 5446, 5457, 5458, 5460, 5461, 5462, 5465, 5466, 5473, 5476, 5477, 5478, 5481, 5504, 5506, 5508, 5509, 5512, 5514, 5520, 5521, 5524, 5525, 5526, 5529, 5530, 5536, 5538, 5541, 5633, 5636, 5637, 5638, 5653, 5654, 5656, 5658, 5665, 5670, 5696, 5698, 5700, 5701, 5704, 5706, 5713, 5717, 5718, 5720, 5721, 5729, 5732, 5733, 5736, 5737, 5738, 5766, 5770, 5778, 5781, 5796, 5801, 6161, 6166, 6181, 6209, 6212, 6214, 6217, 6224, 6229, 6232, 6234, 6240, 6241, 6244, 6246, 6249, 6277, 6289, 6292, 6309, 6416, 6418, 6421, 6426, 6433, 6437, 6466, 6468, 6469, 6472, 6481, 6484, 6485, 6486, 6489, 6490, 6496, 6501, 6506, 6537, 6545, 6546, 6549, 6552, 6561, 6566, 6569, 6665, 6678, 6692, 6694, 6724, 6726, 6729, 6736, 6738, 6741, 6744, 6753, 6758, 6761, 6789, 6801, 6806, 6810, 8192, 8194, 8200, 8202, 8213, 8224, 8226, 8229, 8232, 8234, 8261, 8273, 8281, 8289, 8293, 8320, 8322, 8328, 8330, 8341, 8352, 8354, 8357, 8360, 8362, 8453, 8465, 8468, 8473, 8485, 8514, 8516, 8521, 8533, 8536, 8538, 8545, 8548, 8549, 8550, 8581, 8592, 8598, 8601, 8613, 8705, 8712, 8714, 8721, 8725, 8736, 8738, 8744, 8746, 8773, 8785, 8790, 8793, 8805, 8833, 8840, 8842, 8849, 8853, 8864, 8866, 8872, 8874, 9221, 9236, 9238, 9241, 9253, 9284, 9285, 9286, 9289, 9298, 9301, 9304, 9306, 9318, 9349, 9361, 9364, 9369, 9377, 9381, 9481, 9493, 9505, 9513, 9536, 9541, 9544, 9553, 9556, 9557, 9561, 9570, 9573, 9576, 9609, 9616, 9620, 9621, 9624, 9626, 9633, 9636, 9638, 9641, 9733, 9744, 9746, 9753, 9765, 9793, 9801, 9813, 9824, 9825, 9833, 9860, 9862, 9872, 9882, 10240, 10242, 10248, 10250, 10261, 10272, 10274, 10280, 10282, 10309, 10321, 10324, 10341, 10368, 10370, 10376, 10378, 10400, 10402, 10408, 10410, 10505, 10513, 10516, 10521, 10533, 10566, 10569, 10578, 10581, 10593, 10596, 10598, 10601, 10629, 10640, 10646, 10649, 10660, 10661, 10752, 10754, 10760, 10762, 10784, 10786, 10792, 10794, 10821, 10833, 10838, 10841, 10853, 10880, 10882, 10888, 10890, 10901, 10912, 10914, 10920, 10922, 16389, 16401, 16406, 16421, 16457, 16466, 16469, 16472, 16474, 16481, 16484, 16486, 16532, 16537, 16545, 16550, 16640, 16641, 16644, 16646, 16649, 16658, 16661, 16662, 16664, 16666, 16673, 16678, 16681, 16709, 16712, 16714, 16721, 16724, 16725, 16726, 16729, 16730, 16741, 16744, 16746, 16769, 16772, 16774, 16784, 16786, 16789, 16800, 16801, 16802, 16901, 16913, 16916, 16918, 16933, 16961, 16978, 16981, 16986, 16996, 17001, 17033, 17044, 17061, 17409, 17429, 17433, 17449, 17477, 17480, 17482, 17489, 17492, 17493, 17494, 17505, 17506, 17509, 17512, 17514, 17537, 17542, 17545, 17552, 17554, 17557, 17568, 17569, 17577, 17665, 17666, 17669, 17674, 17681, 17684, 17685, 17686, 17689, 17696, 17701, 17706, 17729, 17732, 17733, 17734, 17737, 17744, 17745, 17748, 17749, 17750, 17752, 17753, 17761, 17764, 17765, 17766, 17769, 17794, 17796, 17797, 17800, 17809, 17812, 17813, 17814, 17817, 17818, 17829, 17832, 17834, 17921, 17925, 17929, 17940, 17941, 17944, 17946, 17953, 17956, 17961, 17984, 17986, 17989, 17992, 18000, 18001, 18002, 18005, 18006, 18009, 18018, 18021, 18024, 18049, 18053, 18058, 18068, 18069, 18081, 18084, 18086, 18437, 18449, 18453, 18458, 18469, 18498, 18505, 18512, 18517, 18520, 18529, 18532, 18534, 18537, 18565, 18577, 18580, 18582, 18585, 18597, 18689, 18693, 18694, 18698, 18704, 18708, 18709, 18712, 18721, 18724, 18726, 18752, 18757, 18762, 18769, 18770, 18772, 18773, 18774, 18777, 18784, 18786, 18789, 18790, 18794, 18822, 18825, 18834, 18837, 18838, 18840, 18849, 18852, 18854, 18857, 18966, 19012, 19014, 19017, 19029, 19032, 19034, 19044, 19049, 19092, 19109, 20481, 20484, 20485, 20486, 20489, 20498, 20501, 20506, 20513, 20516, 20521, 20544, 20549, 20552, 20561, 20564, 20565, 20566, 20569, 20581, 20584, 20614, 20617, 20629, 20632, 20640, 20641, 20646, 20649, 20741, 20744, 20745, 20746, 20753, 20756, 20757, 20758, 20760, 20761, 20768, 20773, 20774, 20776, 20778, 20801, 20804, 20805, 20806, 20809, 20816, 20817, 20818, 20820, 20821, 20822, 20824, 20825, 20826, 20833, 20836, 20837, 20838, 20841, 20866, 20869, 20881, 20884, 20885, 20886, 20889, 20896, 20901, 20906, 20993, 20998, 21010, 21013, 21018, 21025, 21028, 21058, 21061, 21066, 21073, 21076, 21077, 21078, 21081, 21090, 21093, 21125, 21136, 21138, 21141, 21145, 21146, 21156, 21508, 21509, 21521, 21524, 21525, 21526, 21528, 21529, 21537, 21541, 21544, 21546, 21569, 21572, 21573, 21574, 21577, 21578, 21584, 21585, 21588, 21589, 21590, 21592, 21593, 21594, 21601, 21602, 21604, 21605, 21606, 21609, 21632, 21640, 21642, 21649, 21652, 21653, 21654, 21657, 21665, 21668, 21669, 21674, 21761, 21762, 21764, 21765, 21766, 21769, 21776, 21777, 21778, 21780, 21781, 21782, 21785, 21786, 21793, 21796, 21797, 21798, 21801, 21824, 21825, 21826, 21828, 21829, 21830, 21832, 21833, 21840, 21841, 21842, 21844, 21845, 21846, 21848, 21849, 21850, 21856, 21857, 21860, 21861, 21862, 21864, 21865, 21866, 21889, 21892, 21893, 21897, 21898, 21904, 21905, 21908, 21909, 21910, 21912, 21913, 21921, 21924, 21925, 21926, 21929, 22016, 22017, 22018, 22020, 22022, 22024, 22025, 22033, 22036, 22037, 22040, 22041, 22048, 22049, 22050, 22052, 22053, 22054, 22056, 22057, 22081, 22085, 22086, 22088, 22089, 22090, 22096, 22097, 22098, 22100, 22101, 22102, 22104, 22105, 22106, 22113, 22116, 22117, 22121, 22146, 22149, 22150, 22152, 22153, 22154, 22161, 22165, 22170, 22178, 22181, 22182, 22184, 22185, 22532, 22533, 22534, 22537, 22544, 22549, 22552, 22561, 22570, 22597, 22600, 22602, 22609, 22612, 22613, 22614, 22616, 22617, 22624, 22626, 22628, 22629, 22658, 22665, 22672, 22674, 22677, 22680, 22689, 22697, 22785, 22786, 22789, 22794, 22801, 22804, 22805, 22806, 22809, 22821, 22849, 22852, 22853, 22854, 22857, 22864, 22865, 22866, 22868, 22869, 22870, 22872, 22873, 22874, 22881, 22884, 22885, 22886, 22889, 22913, 22917, 22921, 22929, 22932, 22933, 22934, 22936, 22937, 22949, 23044, 23048, 23061, 23066, 23072, 23077, 23078, 23081, 23109, 23112, 23113, 23121, 23125, 23126, 23128, 23129, 23138, 23141, 23144, 23146, 23169, 23178, 23186, 23189, 23190, 23192, 23194, 23201, 24581, 24596, 24598, 24601, 24613, 24644, 24656, 24661, 24662, 24664, 24666, 24673, 24676, 24678, 24681, 24705, 24726, 24741, 24833, 24836, 24838, 24841, 24850, 24853, 24865, 24866, 24870, 24873, 24901, 24905, 24913, 24917, 24918, 24921, 24933, 24934, 24938, 24964, 24970, 24978, 24981, 24993, 24998, 25001, 25105, 25110, 25113, 25152, 25153, 25158, 25173, 25174, 25176, 25184, 25221, 25233, 25238, 25253, 25617, 25618, 25621, 25622, 25626, 25633, 25638, 25641, 25664, 25666, 25669, 25672, 25674, 25681, 25684, 25685, 25686, 25689, 25690, 25696, 25698, 25701, 25732, 25733, 25737, 25744, 25746, 25748, 25749, 25750, 25752, 25754, 25761, 25764, 25769, 25861, 25864, 25866, 25873, 25877, 25878, 25881, 25924, 25925, 25926, 25929, 25936, 25937, 25940, 25941, 25942, 25945, 25953, 25956, 25957, 25958, 25961, 25990, 25993, 25994, 26001, 26005, 26006, 26009, 26010, 26018, 26021, 26022, 26024, 26114, 26121, 26133, 26144, 26150, 26152, 26153, 26176, 26181, 26184, 26186, 26193, 26196, 26197, 26198, 26200, 26202, 26208, 26213, 26216, 26240, 26242, 26245, 26250, 26260, 26262, 26264, 26265, 26272, 26276, 26278, 26282, 26646, 26649, 26661, 26689, 26706, 26709, 26714, 26721, 26729, 26757, 26769, 26776, 26790, 26881, 26884, 26896, 26901, 26913, 26916, 26918, 26921, 26944, 26945, 26949, 26950, 26952, 26961, 26964, 26965, 26966, 26969, 26976, 26981, 26986, 27010, 27012, 27018, 27029, 27041, 27044, 27045, 27049, 27153, 27158, 27160, 27201, 27204, 27209, 27216, 27221, 27224, 27226, 27236, 27237, 27241, 27270, 27284, 27288, 27290, 27302, 32768, 32770, 32776, 32778, 32800, 32802, 32808, 32810, 32837, 32848, 32849, 32852, 32854, 32857, 32869, 32896, 32898, 32904, 32906, 32917, 32928, 32930, 32936, 32938, 33029, 33041, 33044, 33046, 33049, 33061, 33089, 33092, 33097, 33104, 33106, 33109, 33110, 33112, 33113, 33124, 33126, 33129, 33157, 33161, 33172, 33174, 33177, 33189, 33280, 33282, 33288, 33290, 33301, 33312, 33314, 33320, 33322, 33361, 33364, 33369, 33381, 33408, 33410, 33416, 33418, 33429, 33440, 33442, 33448, 33450, 33812, 33817, 33857, 33860, 33873, 33877, 33882, 33889, 33892, 33897, 33940, 33945, 34049, 34057, 34066, 34069, 34074, 34086, 34089, 34112, 34113, 34117, 34120, 34129, 34132, 34133, 34134, 34137, 34138, 34149, 34150, 34152, 34154, 34177, 34180, 34182, 34185, 34192, 34194, 34197, 34200, 34214, 34321, 34326, 34329, 34341, 34369, 34372, 34377, 34378, 34384, 34389, 34393, 34394, 34401, 34406, 34410, 34437, 34449, 34458, 34468, 34816, 34818, 34824, 34826, 34837, 34848, 34850, 34856, 34858, 34881, 34885, 34897, 34900, 34905, 34917, 34921, 34944, 34946, 34952, 34954, 34965, 34976, 34978, 34984, 34986, 35077, 35078, 35089, 35092, 35094, 35109, 35137, 35140, 35142, 35145, 35152, 35154, 35157, 35162, 35169, 35172, 35205, 35222, 35225, 35237, 35328, 35330, 35336, 35338, 35349, 35360, 35362, 35368, 35370, 35397, 35409, 35412, 35414, 35456, 35458, 35464, 35466, 35477, 35488, 35490, 35496, 35498, 36869, 36881, 36886, 36888, 36889, 36901, 36929, 36934, 36937, 36949, 36952, 36954, 36969, 36970, 36997, 37009, 37012, 37014, 37017, 37029, 37121, 37124, 37126, 37129, 37136, 37141, 37144, 37146, 37153, 37156, 37158, 37161, 37184, 37189, 37200, 37201, 37204, 37205, 37206, 37209, 37218, 37221, 37252, 37254, 37266, 37269, 37272, 37281, 37284, 37286, 37289, 37381, 37393, 37396, 37401, 37413, 37444, 37446, 37449, 37456, 37458, 37461, 37464, 37478, 37481, 37509, 37524, 37526, 37545, 37889, 37892, 37894, 37904, 37909, 37912, 37926, 37952, 37962, 37969, 37972, 37973, 37974, 37976, 37977, 37984, 37985, 37986, 37989, 38020, 38022, 38034, 38036, 38037, 38040, 38049, 38057, 38144, 38149, 38152, 38154, 38160, 38161, 38164, 38165, 38166, 38169, 38177, 38181, 38185, 38186, 38209, 38212, 38213, 38214, 38217, 38224, 38225, 38226, 38228, 38229, 38230, 38232, 38233, 38234, 38241, 38244, 38245, 38246, 38249, 38273, 38277, 38280, 38289, 38290, 38292, 38293, 38294, 38297, 38298, 38304, 38306, 38309, 38312, 38314, 38401, 38404, 38416, 38421, 38425, 38432, 38438, 38441, 38469, 38472, 38473, 38481, 38482, 38485, 38486, 38489, 38501, 38504, 38530, 38532, 38537, 38538, 38546, 38548, 38549, 38564, 38566, 38569, 38917, 38934, 38937, 38949, 38977, 38982, 38992, 38994, 38997, 38998, 39002, 39012, 39013, 39045, 39057, 39062, 39065, 39077, 39172, 39174, 39177, 39184, 39186, 39189, 39192, 39194, 39200, 39201, 39204, 39206, 39232, 39234, 39237, 39240, 39242, 39249, 39252, 39253, 39254, 39257, 39266, 39269, 39270, 39274, 39297, 39300, 39312, 39314, 39317, 39322, 39329, 39334, 39429, 39445, 39461, 39492, 39494, 39497, 39504, 39509, 39512, 39521, 39557, 39569, 39572, 39573, 39574, 40960, 40962, 40968, 40970, 40981, 40992, 40994, 41000, 41002, 41029, 41041, 41044, 41046, 41049, 41088, 41090, 41096, 41098, 41109, 41120, 41122, 41128, 41130, 41221, 41225, 41233, 41236, 41238, 41241, 41242, 41286, 41289, 41297, 41301, 41304, 41306, 41313, 41316, 41349, 41360, 41362, 41366, 41369, 41474, 41480, 41482, 41488, 41497, 41506, 41512, 41514, 41541, 41553, 41558, 41561, 41573, 41600, 41602, 41608, 41610, 41621, 41632, 41634, 41640, 41642, 42009, 42021, 42049, 42052, 42064, 42068, 42069, 42072, 42074, 42081, 42085, 42086, 42088, 42089, 42117, 42246, 42249, 42256, 42258, 42261, 42264, 42278, 42281, 42306, 42309, 42321, 42324, 42325, 42326, 42329, 42341, 42346, 42369, 42372, 42373, 42374, 42377, 42386, 42389, 42392, 42501, 42513, 42518, 42522, 42529, 42533, 42564, 42566, 42570, 42578, 42581, 42582, 42584, 42592, 42594, 42630, 42640, 42645, 42646, 42649, 42657, 42660, 42662, 43008, 43010, 43016, 43018, 43040, 43042, 43048, 43050, 43089, 43092, 43094, 43097, 43136, 43138, 43144, 43146, 43157, 43168, 43170, 43176, 43178, 43269, 43284, 43289, 43297, 43301, 43329, 43344, 43349, 43354, 43361, 43366, 43369, 43408, 43414, 43520, 43522, 43528, 43530, 43552, 43554, 43560, 43562, 43601, 43604, 43606, 43648, 43650, 43656, 43658, 43669, 43680, 43682, 43688, 43690, }; static const uint16_t kgrid_2bit_1024[1024] = { 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70, 73, 80, 82, 85, 88, 97, 100, 102, 105, 128, 130, 133, 136, 145, 148, 160, 165, 170, 257, 260, 262, 265, 272, 274, 277, 280, 289, 292, 320, 322, 325, 328, 337, 340, 342, 345, 352, 357, 360, 385, 388, 400, 402, 405, 417, 420, 512, 514, 517, 520, 529, 532, 544, 554, 577, 580, 582, 585, 592, 597, 640, 645, 650, 660, 674, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1062, 1065, 1088, 1090, 1093, 1096, 1098, 1105, 1108, 1110, 1113, 1120, 1122, 1125, 1153, 1156, 1158, 1161, 1168, 1173, 1176, 1185, 1188, 1280, 1282, 1285, 1288, 1290, 1297, 1300, 1302, 1305, 1312, 1317, 1320, 1345, 1348, 1350, 1353, 1360, 1362, 1365, 1368, 1377, 1380, 1408, 1410, 1413, 1416, 1425, 1428, 1440, 1537, 1540, 1542, 1545, 1552, 1557, 1600, 1605, 1608, 1617, 1620, 1632, 1665, 1668, 1680, 2048, 2050, 2053, 2056, 2065, 2068, 2070, 2073, 2080, 2085, 2090, 2113, 2116, 2118, 2121, 2128, 2130, 2133, 2136, 2145, 2148, 2176, 2181, 2196, 2218, 2305, 2308, 2320, 2322, 2325, 2328, 2337, 2368, 2373, 2376, 2385, 2388, 2400, 2433, 2448, 2560, 2577, 2580, 2594, 2600, 2602, 2640, 2713, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4134, 4160, 4162, 4165, 4168, 4177, 4180, 4182, 4185, 4192, 4194, 4197, 4200, 4225, 4228, 4230, 4240, 4245, 4248, 4257, 4260, 4352, 4354, 4357, 4360, 4362, 4369, 4372, 4374, 4377, 4384, 4386, 4389, 4392, 4417, 4420, 4422, 4425, 4432, 4434, 4437, 4440, 4449, 4452, 4480, 4482, 4485, 4488, 4497, 4500, 4609, 4612, 4617, 4624, 4629, 4641, 4644, 4672, 4677, 4689, 4692, 4737, 4740, 4752, 5120, 5122, 5125, 5128, 5137, 5140, 5142, 5145, 5152, 5157, 5160, 5185, 5188, 5190, 5193, 5200, 5202, 5205, 5208, 5217, 5220, 5248, 5250, 5253, 5256, 5265, 5268, 5280, 5377, 5380, 5382, 5385, 5392, 5394, 5397, 5400, 5409, 5412, 5440, 5442, 5445, 5448, 5457, 5460, 5472, 5505, 5508, 5520, 5632, 5637, 5640, 5649, 5652, 5664, 5697, 5700, 5712, 5760, 5802, 6145, 6148, 6150, 6153, 6160, 6165, 6168, 6177, 6208, 6210, 6213, 6216, 6225, 6228, 6240, 6273, 6276, 6400, 6402, 6405, 6408, 6417, 6420, 6432, 6465, 6468, 6480, 6505, 6562, 6660, 6672, 6720, 6742, 8192, 8194, 8197, 8200, 8209, 8212, 8214, 8217, 8224, 8229, 8234, 8257, 8260, 8272, 8274, 8277, 8292, 8320, 8330, 8340, 8362, 8449, 8452, 8464, 8466, 8469, 8481, 8512, 8514, 8517, 8529, 8532, 8544, 8577, 8580, 8592, 8704, 8714, 8738, 8744, 8746, 8772, 8784, 8840, 8842, 8872, 9217, 9220, 9222, 9225, 9232, 9237, 9240, 9249, 9252, 9280, 9282, 9285, 9288, 9297, 9300, 9312, 9345, 9348, 9360, 9472, 9477, 9480, 9489, 9492, 9504, 9537, 9540, 9552, 9574, 9600, 9729, 9732, 9744, 9792, 9817, 10240, 10245, 10257, 10260, 10305, 10308, 10320, 10378, 10410, 10497, 10500, 10512, 10645, 10762, 10786, 10852, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16410, 16417, 16420, 16422, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16470, 16473, 16480, 16482, 16485, 16513, 16516, 16528, 16533, 16536, 16545, 16548, 16640, 16642, 16645, 16648, 16657, 16660, 16662, 16665, 16672, 16674, 16677, 16705, 16708, 16710, 16713, 16720, 16722, 16725, 16728, 16737, 16740, 16768, 16770, 16773, 16776, 16785, 16788, 16800, 16897, 16900, 16912, 16914, 16917, 16920, 16932, 16960, 16965, 16968, 16977, 16980, 16992, 17025, 17028, 17408, 17410, 17413, 17416, 17418, 17425, 17428, 17430, 17433, 17440, 17442, 17445, 17448, 17473, 17476, 17478, 17481, 17488, 17490, 17493, 17496, 17505, 17508, 17536, 17538, 17541, 17544, 17553, 17556, 17568, 17665, 17668, 17670, 17673, 17680, 17682, 17685, 17688, 17697, 17700, 17728, 17730, 17733, 17736, 17745, 17748, 17760, 17770, 17793, 17796, 17808, 17920, 17922, 17925, 17928, 17937, 17940, 17952, 17985, 17988, 18000, 18048, 18085, 18433, 18436, 18441, 18448, 18450, 18453, 18456, 18465, 18468, 18496, 18498, 18501, 18504, 18513, 18516, 18528, 18564, 18576, 18688, 18690, 18693, 18696, 18705, 18708, 18720, 18753, 18756, 18768, 18816, 18838, 18945, 18948, 18960, 19008, 20480, 20482, 20485, 20488, 20497, 20500, 20502, 20505, 20512, 20514, 20517, 20520, 20545, 20548, 20550, 20553, 20560, 20562, 20565, 20568, 20577, 20580, 20608, 20610, 20613, 20616, 20625, 20628, 20737, 20740, 20742, 20745, 20752, 20754, 20757, 20760, 20769, 20772, 20800, 20802, 20805, 20808, 20817, 20820, 20832, 20865, 20868, 20880, 20992, 20997, 21000, 21009, 21012, 21024, 21057, 21060, 21072, 21097, 21120, 21505, 21508, 21510, 21513, 21520, 21522, 21525, 21528, 21537, 21540, 21568, 21570, 21573, 21576, 21585, 21588, 21600, 21633, 21636, 21648, 21760, 21762, 21765, 21768, 21777, 21780, 21792, 21825, 21828, 21840, 21888, 22017, 22020, 22032, 22054, 22080, 22528, 22530, 22533, 22536, 22545, 22548, 22560, 22593, 22596, 22608, 22618, 22656, 22785, 22788, 22800, 22848, 23040, 23065, 23173, 23208, 24577, 24580, 24582, 24592, 24594, 24597, 24600, 24609, 24612, 24640, 24645, 24648, 24657, 24660, 24672, 24708, 24720, 24832, 24834, 24837, 24840, 24849, 24852, 24864, 24897, 24900, 24912, 24960, 24985, 25092, 25104, 25152, 25174, 25249, 25600, 25605, 25608, 25617, 25620, 25632, 25665, 25668, 25680, 25728, 25857, 25860, 25872, 25920, 25930, 25960, 26002, 26112, 26260, 26625, 26628, 26640, 26725, 26776, 26880, 26922, 27202, 27297, 32768, 32770, 32773, 32776, 32785, 32788, 32793, 32800, 32805, 32833, 32836, 32848, 32850, 32853, 32856, 32865, 32896, 32901, 32913, 32916, 33025, 33028, 33033, 33040, 33042, 33045, 33048, 33057, 33060, 33088, 33090, 33093, 33096, 33105, 33108, 33153, 33156, 33168, 33193, 33280, 33285, 33290, 33297, 33300, 33345, 33348, 33360, 33793, 33796, 33798, 33801, 33808, 33810, 33813, 33816, 33825, 33856, 33858, 33861, 33864, 33873, 33876, 33888, 33921, 33924, 33936, 34048, 34050, 34053, 34056, 34065, 34068, 34080, 34113, 34116, 34128, 34176, 34186, 34305, 34308, 34320, 34345, 34368, 34816, 34821, 34833, 34836, 34881, 34884, 34896, 34978, 35073, 35076, 35136, 35173, 35362, 35416, 35418, 35458, 35490, 36865, 36868, 36873, 36880, 36882, 36885, 36888, 36900, 36928, 36930, 36933, 36936, 36945, 36948, 36960, 36993, 36996, 37008, 37120, 37125, 37137, 37140, 37185, 37188, 37200, 37210, 37377, 37380, 37392, 37440, 37542, 37888, 37890, 37893, 37896, 37905, 37908, 37920, 37953, 37956, 37968, 38016, 38038, 38145, 38148, 38160, 38208, 38296, 38305, 38400, 38470, 38500, 38913, 38916, 38928, 38950, 38976, 39081, 39168, 39241, 39250, 39568, 40960, 40965, 40970, 40980, 40994, 41002, 41025, 41028, 41040, 41122, 41130, 41280, 41317, 41474, 41482, 41506, 41512, 41514, 41602, 41608, 41610, 41640, 41985, 41988, 42000, 42048, 42121, 42148, 42240, 42265, 42577, 43018, 43048, 43170, 43348, 43398, 43528, 43530, 43552, 43554, 43560, 43656, 43690, }; const int kmap_size = 43692; //const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2; const int nwant = type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 3 : type == GGML_TYPE_IQ2_S ? 1 : 2; const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 : type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 : type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? kgrid_1bit_2048 : kgrid_2bit_1024; uint64_t * kgrid_q2xs; int * kmap_q2xs; uint16_t * kneighbors_q2xs; //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size); uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t)); for (int k = 0; k < grid_size; ++k) { int8_t * pos = (int8_t *)(the_grid + k); for (int i = 0; i < 8; ++i) { int l = (kgrid[k] >> 2*i) & 0x3; pos[i] = 2*l + 1; } } kgrid_q2xs = the_grid; iq2_data[gindex].grid = the_grid; kmap_q2xs = (int *)malloc(kmap_size*sizeof(int)); iq2_data[gindex].map = kmap_q2xs; for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1; uint64_t aux64; uint8_t * aux8 = (uint8_t *)&aux64; for (int i = 0; i < grid_size; ++i) { aux64 = kgrid_q2xs[i]; uint16_t index = 0; for (int k=0; k<8; ++k) { uint16_t q = (aux8[k] - 1)/2; index |= (q << 2*k); } kmap_q2xs[index] = i; } int8_t pos[8]; int * dist2 = (int *)malloc(2*grid_size*sizeof(int)); int num_neighbors = 0, num_not_in_map = 0; for (int i = 0; i < kmap_size; ++i) { if (kmap_q2xs[i] >= 0) continue; ++num_not_in_map; for (int k = 0; k < 8; ++k) { int l = (i >> 2*k) & 0x3; pos[k] = 2*l + 1; } for (int j = 0; j < grid_size; ++j) { const int8_t * pg = (const int8_t *)(kgrid_q2xs + j); int d2 = 0; for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); dist2[2*j+0] = d2; dist2[2*j+1] = j; } qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func); int n = 0; int d2 = dist2[0]; int nhave = 1; for (int j = 0; j < grid_size; ++j) { if (dist2[2*j] > d2) { if (nhave == nwant) break; d2 = dist2[2*j]; ++nhave; } ++n; } num_neighbors += n; } //printf("%s: %d neighbours in total\n", __func__, num_neighbors); kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t)); iq2_data[gindex].neighbours = kneighbors_q2xs; int counter = 0; for (int i = 0; i < kmap_size; ++i) { if (kmap_q2xs[i] >= 0) continue; for (int k = 0; k < 8; ++k) { int l = (i >> 2*k) & 0x3; pos[k] = 2*l + 1; } for (int j = 0; j < grid_size; ++j) { const int8_t * pg = (const int8_t *)(kgrid_q2xs + j); int d2 = 0; for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); dist2[2*j+0] = d2; dist2[2*j+1] = j; } qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func); kmap_q2xs[i] = -(counter + 1); int d2 = dist2[0]; uint16_t * start = &kneighbors_q2xs[counter++]; int n = 0, nhave = 1; for (int j = 0; j < grid_size; ++j) { if (dist2[2*j] > d2) { if (nhave == nwant) break; d2 = dist2[2*j]; ++nhave; } kneighbors_q2xs[counter++] = dist2[2*j+1]; ++n; } *start = n; } free(dist2); } void iq2xs_free_impl(enum ggml_type type) { GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S); const int gindex = iq2_data_index(type); if (iq2_data[gindex].grid) { free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL; free(iq2_data[gindex].map); iq2_data[gindex].map = NULL; free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL; } } static int iq2_find_best_neighbour(const uint16_t * GGML_RESTRICT neighbours, const uint64_t * GGML_RESTRICT grid, const float * GGML_RESTRICT xval, const float * GGML_RESTRICT weight, float scale, int8_t * GGML_RESTRICT L) { int num_neighbors = neighbours[0]; GGML_ASSERT(num_neighbors > 0); float best_d2 = FLT_MAX; int grid_index = -1; for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float d2 = 0; for (int i = 0; i < 8; ++i) { float q = pg[i]; float diff = scale*q - xval[i]; d2 += weight[i]*diff*diff; } if (d2 < best_d2) { best_d2 = d2; grid_index = neighbours[j]; } } GGML_ASSERT(grid_index >= 0); const int8_t * pg = (const int8_t *)(grid + grid_index); for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2; return grid_index; } static void quantize_row_iq2_xxs_impl(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights) { const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS); const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; const int * kmap_q2xs = iq2_data[gindex].map; const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); const int kMaxQ = 3; const int64_t nbl = n/QK_K; block_iq2_xxs * y = vy; float scales[QK_K/32]; float weight[32]; float xval[32]; int8_t L[32]; int8_t Laux[32]; float waux[32]; uint8_t block_signs[4]; uint32_t q2[2*(QK_K/32)]; for (int ibl = 0; ibl < nbl; ++ibl) { y[ibl].d = GGML_FP32_TO_FP16(0.f); memset(q2, 0, QK_K/4); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = sumx2/QK_K; for (int ib = 0; ib < QK_K/32; ++ib) { const float * xb = xbl + 32*ib; const float * qw = quant_weights + QK_K*ibl + 32*ib; for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]); for (int k = 0; k < 4; ++k) { int nflip = 0; uint8_t s = 0; for (int i = 0; i < 8; ++i) { if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; else { xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); } } if (nflip%2) { int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; for (int i = 1; i < 8; ++i) { float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; if (ax < min) { min = ax; imin = i; } } xval[8*k+imin] = -xval[8*k+imin]; s ^= (1 << imin); } block_signs[k] = s & 127; } float max = xval[0]; for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); if (max < GROUP_MAX_EPS) { scales[ib] = 0; memset(L, 0, 32); continue; } float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight); float eff_max = scale*kMaxQ; float best = 0; for (int is = -6; is <= 6; ++is) { float id = (2*kMaxQ-1+is*0.1f)/eff_max; float this_scale = 1/id; for (int k = 0; k < 4; ++k) { for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l)); } uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 32; ++i) { float w = weight[i]; float q = 2*Laux[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { scale = sumqx/sumq2; best = scale*sumqx; memcpy(L, Laux, 32); } } if (scale > 0) { float id = 1/scale; for (int k = 0; k < 4; ++k) { uint16_t u = 0; for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); l = MAX(0, MIN(kMaxQ-1, l)); u |= (l << 2*i); } int grid_index = kmap_q2xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k); } const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index); for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2; } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 32; ++i) { float w = weight[i]; float q = 2*L[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0) scale = sumqx/sumq2; } if (scale < 0) { // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) // and correspondingly flip quant signs. scale = -scale; for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127; } for (int k = 0; k < 4; ++k) { uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { printf("Oops: found point %u not on grid:", u); for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]); printf("\n"); GGML_ABORT("fatal error"); } q2[2*ib+0] |= ((uint32_t) grid_index << 8*k); q2[2*ib+1] |= (block_signs[k] << 7*k); } GGML_ASSERT(scale >= 0); scales[ib] = scale; max_scale = MAX(max_scale, scale); } if (!max_scale) { memset(y[ibl].qs, 0, QK_K/4); continue; } float d = max_scale/31; y[ibl].d = GGML_FP32_TO_FP16(d); float id = 1/d; for (int ib = 0; ib < QK_K/32; ++ib) { int l = nearest_int(0.5f*(id*scales[ib]-1)); l = MAX(0, MIN(15, l)); q2[2*ib+1] |= ((uint32_t)l << 28); } memcpy(y[ibl].qs, q2, QK_K/4); } } static void quantize_row_iq2_xs_impl(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights) { const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS); const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; const int * kmap_q2xs = iq2_data[gindex].map; const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); const int kMaxQ = 3; const int64_t nbl = n/QK_K; block_iq2_xs * y = vy; float scales[QK_K/16]; float weight[16]; float xval[16]; int8_t L[16]; int8_t Laux[16]; float waux[16]; bool is_on_grid[2]; bool is_on_grid_aux[2]; uint8_t block_signs[2]; uint16_t q2[2*(QK_K/16)]; for (int ibl = 0; ibl < nbl; ++ibl) { y[ibl].d = GGML_FP32_TO_FP16(0.f); memset(q2, 0, QK_K/4); memset(y[ibl].scales, 0, QK_K/32); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = sumx2/QK_K; for (int ib = 0; ib < QK_K/16; ++ib) { const float * xb = xbl + 16*ib; const float * qw = quant_weights + QK_K*ibl + 16*ib; for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]); for (int k = 0; k < 2; ++k) { int nflip = 0; uint8_t s = 0; for (int i = 0; i < 8; ++i) { if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; else { xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); } } if (nflip%2) { int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; for (int i = 1; i < 8; ++i) { float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; if (ax < min) { min = ax; imin = i; } } xval[8*k+imin] = -xval[8*k+imin]; s ^= (1 << imin); } block_signs[k] = s & 127; } float max = xval[0]; for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]); if (max < GROUP_MAX_EPS) { scales[ib] = 0; memset(L, 0, 16); continue; } float best = 0; float scale = max/(2*kMaxQ-1); is_on_grid[0] = is_on_grid[1] = true; for (int is = -9; is <= 9; ++is) { float id = (2*kMaxQ-1+is*0.1f)/max; float this_scale = 1/id; for (int k = 0; k < 2; ++k) { for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l)); } uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; is_on_grid_aux[k] = true; if (grid_index < 0) { is_on_grid_aux[k] = false; const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 16; ++i) { float w = weight[i]; float q = 2*Laux[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { scale = sumqx/sumq2; best = scale*sumqx; for (int i = 0; i < 16; ++i) L[i] = Laux[i]; for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k]; } } int n_not_ongrid = 0; for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid; if (n_not_ongrid > 0 && scale > 0) { float id = 1/scale; for (int k = 0; k < 2; ++k) { if (is_on_grid[k]) continue; uint16_t u = 0; for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); l = MAX(0, MIN(kMaxQ-1, l)); u |= (l << 2*i); L[8*k + i] = l; } int grid_index = kmap_q2xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 16; ++i) { float w = weight[i]; float q = 2*L[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0) scale = sumqx/sumq2; } if (scale < 0) { scale = -scale; for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127; } for (int k = 0; k < 2; ++k) { uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { printf("Oops: found point %u not on grid:", u); for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]); printf("\n"); GGML_ABORT("fatal error"); } q2[2*ib+k] = grid_index | (block_signs[k] << 9); } GGML_ASSERT(scale >= 0); scales[ib] = scale; max_scale = MAX(max_scale, scale); } if (!max_scale) { memset(y[ibl].qs, 0, QK_K/4); continue; } float d = max_scale/31; y[ibl].d = GGML_FP32_TO_FP16(d); float id = 1/d; for (int ib = 0; ib < QK_K/16; ++ib) { int l = nearest_int(0.5f*(id*scales[ib]-1)); l = MAX(0, MIN(15, l)); if (ib%2 == 0) y[ibl].scales[ib/2] = l; else y[ibl].scales[ib/2] |= (l << 4); } memcpy(y[ibl].qs, q2, QK_K/4); } } size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights); src += n_per_row; qrow += nblock*sizeof(block_iq2_xxs); } return nrow * nblock * sizeof(block_iq2_xxs); } size_t quantize_iq2_xs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights); src += n_per_row; qrow += nblock*sizeof(block_iq2_xs); } return nrow * nblock * sizeof(block_iq2_xs); } // // ============================================= 3-bit using D4 lattice // typedef struct { uint32_t * grid; int * map; uint16_t * neighbours; } iq3_entry_t; static iq3_entry_t iq3_data[2] = { {NULL, NULL, NULL}, {NULL, NULL, NULL}, }; static inline int iq3_data_index(int grid_size) { (void)grid_size; GGML_ASSERT(grid_size == 256 || grid_size == 512); return grid_size == 256 ? 0 : 1; } static int iq3_compare_func(const void * left, const void * right) { const int * l = (const int *)left; const int * r = (const int *)right; return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0; } void iq3xs_init_impl(int grid_size) { const int gindex = iq3_data_index(grid_size); if (iq3_data[gindex].grid) { return; } static const uint16_t kgrid_256[256] = { 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74, 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159, 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321, 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531, 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664, 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978, 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105, 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228, 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553, 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722, 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063, 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389, 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746, 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153, 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610, 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992, }; static const uint16_t kgrid_512[512] = { 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34, 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77, 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142, 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210, 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288, 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393, 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514, 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576, 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653, 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727, 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833, 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977, 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047, 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103, 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199, 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296, 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415, 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561, 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648, 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761, 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877, 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068, 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177, 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269, 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520, 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634, 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805, 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083, 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276, 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591, 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729, 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032, }; const int kmap_size = 4096; const int nwant = grid_size == 256 ? 2 : 3; const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512; uint32_t * kgrid_q3xs; int * kmap_q3xs; uint16_t * kneighbors_q3xs; //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size); uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t)); for (int k = 0; k < grid_size; ++k) { int8_t * pos = (int8_t *)(the_grid + k); for (int i = 0; i < 4; ++i) { int l = (kgrid[k] >> 3*i) & 0x7; pos[i] = 2*l + 1; } } kgrid_q3xs = the_grid; iq3_data[gindex].grid = the_grid; kmap_q3xs = (int *)malloc(kmap_size*sizeof(int)); iq3_data[gindex].map = kmap_q3xs; for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1; uint32_t aux32; uint8_t * aux8 = (uint8_t *)&aux32; for (int i = 0; i < grid_size; ++i) { aux32 = kgrid_q3xs[i]; uint16_t index = 0; for (int k=0; k<4; ++k) { uint16_t q = (aux8[k] - 1)/2; index |= (q << 3*k); } kmap_q3xs[index] = i; } int8_t pos[4]; int * dist2 = (int *)malloc(2*grid_size*sizeof(int)); int num_neighbors = 0, num_not_in_map = 0; for (int i = 0; i < kmap_size; ++i) { if (kmap_q3xs[i] >= 0) continue; ++num_not_in_map; for (int k = 0; k < 4; ++k) { int l = (i >> 3*k) & 0x7; pos[k] = 2*l + 1; } for (int j = 0; j < grid_size; ++j) { const int8_t * pg = (const int8_t *)(kgrid_q3xs + j); int d2 = 0; for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); dist2[2*j+0] = d2; dist2[2*j+1] = j; } qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func); int n = 0; int d2 = dist2[0]; int nhave = 1; for (int j = 0; j < grid_size; ++j) { if (dist2[2*j] > d2) { if (nhave == nwant) break; d2 = dist2[2*j]; ++nhave; } ++n; } num_neighbors += n; } //printf("%s: %d neighbours in total\n", __func__, num_neighbors); kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t)); iq3_data[gindex].neighbours = kneighbors_q3xs; int counter = 0; for (int i = 0; i < kmap_size; ++i) { if (kmap_q3xs[i] >= 0) continue; for (int k = 0; k < 4; ++k) { int l = (i >> 3*k) & 0x7; pos[k] = 2*l + 1; } for (int j = 0; j < grid_size; ++j) { const int8_t * pg = (const int8_t *)(kgrid_q3xs + j); int d2 = 0; for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]); dist2[2*j+0] = d2; dist2[2*j+1] = j; } qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func); kmap_q3xs[i] = -(counter + 1); int d2 = dist2[0]; uint16_t * start = &kneighbors_q3xs[counter++]; int n = 0, nhave = 1; for (int j = 0; j < grid_size; ++j) { if (dist2[2*j] > d2) { if (nhave == nwant) break; d2 = dist2[2*j]; ++nhave; } kneighbors_q3xs[counter++] = dist2[2*j+1]; ++n; } *start = n; } free(dist2); } void iq3xs_free_impl(int grid_size) { GGML_ASSERT(grid_size == 256 || grid_size == 512); const int gindex = iq3_data_index(grid_size); if (iq3_data[gindex].grid) { free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL; free(iq3_data[gindex].map); iq3_data[gindex].map = NULL; free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL; } } static int iq3_find_best_neighbour(const uint16_t * GGML_RESTRICT neighbours, const uint32_t * GGML_RESTRICT grid, const float * GGML_RESTRICT xval, const float * GGML_RESTRICT weight, float scale, int8_t * GGML_RESTRICT L) { int num_neighbors = neighbours[0]; GGML_ASSERT(num_neighbors > 0); float best_d2 = FLT_MAX; int grid_index = -1; for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float d2 = 0; for (int i = 0; i < 4; ++i) { float q = pg[i]; float diff = scale*q - xval[i]; d2 += weight[i]*diff*diff; } if (d2 < best_d2) { best_d2 = d2; grid_index = neighbours[j]; } } GGML_ASSERT(grid_index >= 0); const int8_t * pg = (const int8_t *)(grid + grid_index); for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2; return grid_index; } static void quantize_row_iq3_xxs_impl(int grid_size, const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights) { const int gindex = iq3_data_index(grid_size); const uint32_t * kgrid_q3xs = iq3_data[gindex].grid; const int * kmap_q3xs = iq3_data[gindex].map; const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours; //GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); const int kMaxQ = 8; const int64_t nbl = n/QK_K; ggml_fp16_t * dh; uint8_t * qs; int block_size; if (grid_size == 256) { block_iq3_xxs * y = vy; dh = &y->d; qs = y->qs; block_size = sizeof(block_iq3_xxs); } else { block_iq3_s * y = vy; dh = &y->d; qs = y->qs; block_size = sizeof(block_iq3_s); } int quant_size = block_size - sizeof(ggml_fp16_t); float scales[QK_K/32]; float weight[32]; float xval[32]; int8_t L[32]; int8_t Laux[32]; float waux[32]; bool is_on_grid[8]; bool is_on_grid_aux[8]; uint8_t block_signs[8]; uint8_t q3[3*(QK_K/8)+QK_K/32]; uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4); uint8_t * qh = q3 + 3*(QK_K/8); for (int ibl = 0; ibl < nbl; ++ibl) { dh[0] = GGML_FP32_TO_FP16(0.f); memset(q3, 0, 3*QK_K/8+QK_K/32); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = 2*sumx2/QK_K; for (int ib = 0; ib < QK_K/32; ++ib) { const float * xb = xbl + 32*ib; if (quant_weights) { const float * qw = quant_weights + QK_K*ibl + 32*ib; for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); } else { for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i]; } for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]); for (int k = 0; k < 4; ++k) { int nflip = 0; uint8_t s = 0; for (int i = 0; i < 8; ++i) { if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; else { xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i); } } if (nflip%2) { int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin]; for (int i = 1; i < 8; ++i) { float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i]; if (ax < min) { min = ax; imin = i; } } xval[8*k+imin] = -xval[8*k+imin]; s ^= (1 << imin); } block_signs[k] = s & 127; } float max = xval[0]; for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]); if (max < GROUP_MAX_EPS_IQ3_XXS) { scales[ib] = 0; memset(L, 0, 32); continue; } float best = 0; float scale = max/(2*kMaxQ-1); for (int k = 0; k < 8; ++k) is_on_grid[k] = true; for (int is = -15; is <= 15; ++is) { float id = (2*kMaxQ-1+is*0.2f)/max; float this_scale = 1/id; for (int k = 0; k < 8; ++k) { for (int i = 0; i < 4; ++i) { int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l)); } uint16_t u = 0; for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i); int grid_index = kmap_q3xs[u]; is_on_grid_aux[k] = true; if (grid_index < 0) { is_on_grid_aux[k] = false; const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 32; ++i) { float w = weight[i]; float q = 2*Laux[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { scale = sumqx/sumq2; best = scale*sumqx; for (int i = 0; i < 32; ++i) L[i] = Laux[i]; for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k]; } } int n_not_ongrid = 0; for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid; if (n_not_ongrid > 0 && scale > 0) { float id = 1/scale; for (int k = 0; k < 8; ++k) { if (is_on_grid[k]) continue; uint16_t u = 0; for (int i = 0; i < 4; ++i) { int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); l = MAX(0, MIN(kMaxQ-1, l)); u |= (l << 3*i); } int grid_index = kmap_q3xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k); } const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index); for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2; } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 32; ++i) { float w = weight[i]; float q = 2*L[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0) scale = sumqx/sumq2; } if (scale < 0) { // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) // and correspondingly flip quant signs. scale = -scale; for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127; } for (int k = 0; k < 8; ++k) { uint16_t u = 0; for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i); int grid_index = kmap_q3xs[u]; if (grid_index < 0) { printf("Oops: found point %u not on grid:", u); for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]); printf("\n"); GGML_ABORT("fatal error"); } if (grid_size == 256) { q3[8*ib+k] = grid_index; } else { q3[8*ib+k] = grid_index & 255; qh[ib] |= ((grid_index >> 8) << k); } } scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21); GGML_ASSERT(scale >= 0); scales[ib] = scale; max_scale = MAX(max_scale, scale); } if (!max_scale) { memset(qs, 0, quant_size); dh += block_size/sizeof(ggml_fp16_t); qs += block_size; continue; } float d = max_scale/31; dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor float id = 1/d; for (int ib = 0; ib < QK_K/32; ++ib) { int l = nearest_int(0.5f*(id*scales[ib]-1)); l = MAX(0, MIN(15, l)); scales_and_signs[ib] |= ((uint32_t)l << 28); } memcpy(qs, q3, quant_size); dh += block_size/sizeof(ggml_fp16_t); qs += block_size; } } size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights); src += n_per_row; qrow += nblock*sizeof(block_iq3_xxs); } return nrow * nblock * sizeof(block_iq3_xxs); } void quantize_row_iq3_xxs_ref(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); quantize_row_iq3_xxs_impl(256, x, y, k, NULL); } static void quantize_row_iq3_s_impl(int block_size, const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int n, const float * GGML_RESTRICT quant_weights, float * scales, float * weight, float * xval, int8_t * L, int8_t * Laux, float * waux, bool * is_on_grid, bool * is_on_grid_aux, uint8_t * block_signs) { const int gindex = iq3_data_index(512); const uint32_t * kgrid_q3xs = iq3_data[gindex].grid; const int * kmap_q3xs = iq3_data[gindex].map; const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours; //GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); const int kMaxQ = 8; const int64_t nbl = n/QK_K; block_iq3_s * y = vy; const int bs4 = block_size/4; const int bs8 = block_size/8; for (int ibl = 0; ibl < nbl; ++ibl) { memset(&y[ibl], 0, sizeof(block_iq3_s)); y[ibl].d = GGML_FP32_TO_FP16(0.f); uint8_t * qs = y[ibl].qs; uint8_t * qh = y[ibl].qh; uint8_t * signs = y[ibl].signs; float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = 2*sumx2/QK_K; for (int ib = 0; ib < QK_K/block_size; ++ib) { const float * xb = xbl + block_size*ib; if (quant_weights) { const float * qw = quant_weights + QK_K*ibl + block_size*ib; for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); } else { for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i]; } for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]); for (int k = 0; k < bs8; ++k) { uint8_t s = 0; for (int i = 0; i < 8; ++i) { if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; else { xval[8*k + i] = -xb[8*k + i]; s |= (1 << i); } } block_signs[k] = s; } float max = xval[0]; for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]); if (!max) { scales[ib] = 0; continue; } float best = 0; float scale = max/(2*kMaxQ-1); for (int k = 0; k < bs4; ++k) is_on_grid[k] = false; for (int is = -9; is <= 9; ++is) { float id = (2*kMaxQ-1+is*0.2f)/max; float this_scale = 1/id; for (int k = 0; k < bs4; ++k) { for (int i = 0; i < 4; ++i) { int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l)); } uint16_t u = 0; for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i); int grid_index = kmap_q3xs[u]; is_on_grid_aux[k] = true; if (grid_index < 0) { is_on_grid_aux[k] = false; const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < block_size; ++i) { float w = weight[i]; float q = 2*Laux[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { scale = sumqx/sumq2; best = scale*sumqx; for (int i = 0; i < block_size; ++i) L[i] = Laux[i]; for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k]; } } int n_not_ongrid = 0; for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid; if (n_not_ongrid > 0 && scale > 0) { float id = 1/scale; for (int k = 0; k < bs4; ++k) { //if (is_on_grid[k]) continue; uint16_t u = 0; for (int i = 0; i < 4; ++i) { int l = nearest_int(0.5f*(id*xval[4*k+i]-1)); l = MAX(0, MIN(kMaxQ-1, l)); u |= (l << 3*i); } int grid_index = kmap_q3xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1; grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k); } const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index); for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2; } float sumqx = 0, sumq2 = 0; for (int i = 0; i < block_size; ++i) { float w = weight[i]; float q = 2*L[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0) scale = sumqx/sumq2; } if (scale < 0) { // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale) // and correspondingly flip quant signs. scale = -scale; for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k]; } for (int k = 0; k < bs4; ++k) { uint16_t u = 0; for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i); int grid_index = kmap_q3xs[u]; if (grid_index < 0) { printf("Oops: found point %u not on grid:", u); for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]); printf("\n"); GGML_ABORT("fatal error"); } qs[k] = grid_index & 255; qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8)); } qs += bs4; for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k]; signs += bs8; GGML_ASSERT(scale >= 0); scales[ib] = scale; max_scale = MAX(max_scale, scale); } if (!max_scale) { continue; } float d = max_scale/31; y[ibl].d = GGML_FP32_TO_FP16(d * 1.033f); float id = 1/d; for (int ib = 0; ib < QK_K/block_size; ib += 2) { int l1 = nearest_int(0.5f*(id*scales[ib+0]-1)); l1 = MAX(0, MIN(15, l1)); int l2 = nearest_int(0.5f*(id*scales[ib+1]-1)); l2 = MAX(0, MIN(15, l2)); y[ibl].scales[ib/2] = l1 | (l2 << 4); } } } #define IQ3S_BLOCK_SIZE 32 size_t quantize_iq3_s(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; float scales[QK_K/IQ3S_BLOCK_SIZE]; float weight[IQ3S_BLOCK_SIZE]; float xval[IQ3S_BLOCK_SIZE]; int8_t L[IQ3S_BLOCK_SIZE]; int8_t Laux[IQ3S_BLOCK_SIZE]; float waux[IQ3S_BLOCK_SIZE]; bool is_on_grid[IQ3S_BLOCK_SIZE/4]; bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4]; uint8_t block_signs[IQ3S_BLOCK_SIZE/8]; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights, scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs); src += n_per_row; qrow += nblock*sizeof(block_iq3_s); } return nrow * nblock * sizeof(block_iq3_s); } void quantize_row_iq3_s_ref(const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); quantize_iq3_s(x, y, 1, k, NULL); } // =================================== 1.5 bpw =================================================== static int iq1_find_best_neighbour(const uint16_t * GGML_RESTRICT neighbours, const uint64_t * GGML_RESTRICT grid, const float * GGML_RESTRICT xval, const float * GGML_RESTRICT weight, float * scale, int8_t * GGML_RESTRICT L, int ngrid) { int num_neighbors = neighbours[0]; GGML_ASSERT(num_neighbors > 0); float best_score = -FLT_MAX; int grid_index = -1; for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float sumqx = 0, sumq2 = 0; for (int i = 0; i < 8; ++i) { float q = (pg[i] - 3)/2; float w = weight[i]; sumqx += w*q*xval[i]; sumq2 += w*q*q; } if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { *scale = sumqx/sumq2; best_score = *scale * sumqx; grid_index = neighbours[j]; } } if (grid_index < 0) { for (int i = 0; i < ngrid; ++i) { const int8_t * grid_i = (const int8_t *)(grid + i); float sumqx = 0, sumq2 = 0; for (int j = 0; j < 8; ++j) { float w = weight[j]; float q = (grid_i[j] - 3)/2; sumqx += w*q*xval[j]; sumq2 += w*q*q; } if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { *scale = sumqx/sumq2; best_score = *scale*sumqx; grid_index = i; } } } if (grid_index < 0) { printf("Oops, did not find grid point\n"); printf("Have %d neighbours\n", num_neighbors); for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float sumqx = 0, sumq2 = 0; for (int i = 0; i < 8; ++i) { float q = (pg[i] - 3)/2; float w = weight[i]; sumqx += w*q*xval[i]; sumq2 += w*q*q; } printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2); } } GGML_ASSERT(grid_index >= 0); //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! const int8_t * pg = (const int8_t *)(grid + grid_index); for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2; return grid_index; } static int iq1_find_best_neighbour2(const uint16_t * GGML_RESTRICT neighbours, const uint64_t * GGML_RESTRICT grid, const float * GGML_RESTRICT xval, const float * GGML_RESTRICT weight, float scale, const float * GGML_RESTRICT xg, int8_t * GGML_RESTRICT L, int ngrid) { int num_neighbors = neighbours[0]; GGML_ASSERT(num_neighbors > 0); float best_score = FLT_MAX; int grid_index = -1; for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float d2 = 0; for (int i = 0; i < 8; ++i) { float q = xg[(pg[i] - 1)/2]; float w = weight[i]; float diff = scale*q - xval[i]; d2 += w*diff*diff; } if (d2 < best_score) { best_score = d2; grid_index = neighbours[j]; } } if (grid_index < 0) { for (int i = 0; i < ngrid; ++i) { const int8_t * grid_i = (const int8_t *)(grid + i); float d2 = 0; for (int j = 0; j < 8; ++j) { float w = weight[j]; float q = xg[(grid_i[j] - 1)/2]; float diff = scale*q - xval[i]; d2 += w*diff*diff; } if (d2 < best_score) { best_score = d2; grid_index = i; } } } if (grid_index < 0) { printf("Oops, did not find grid point\n"); printf("Have %d neighbours\n", num_neighbors); for (int j = 1; j <= num_neighbors; ++j) { const int8_t * pg = (const int8_t *)(grid + neighbours[j]); float sumqx = 0, sumq2 = 0; for (int i = 0; i < 8; ++i) { float q = xg[(pg[i] - 1)/2]; float w = weight[i]; sumqx += w*q*xval[i]; sumq2 += w*q*q; } printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2); } } GGML_ASSERT(grid_index >= 0); const int8_t * pg = (const int8_t *)(grid + grid_index); for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2; return grid_index; } static int iq1_sort_helper(const void * left, const void * right) { const float * l = left; const float * r = right; return *l < *r ? -1 : *l > *r ? 1 : 0; } #define IQ1S_BLOCK_SIZE 32 #define IQ1M_BLOCK_SIZE 16 static void quantize_row_iq1_s_impl(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights, float * scales, float * weight, float * sumx, float * sumw, float * pairs, int8_t * L, uint16_t * index, int8_t * shifts) { const int gindex = iq2_data_index(GGML_TYPE_IQ1_S); const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; const int * kmap_q2xs = iq2_data[gindex].map; const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); block_iq1_s * y = vy; const int64_t nbl = n/QK_K; const int block_size = IQ1S_BLOCK_SIZE; const float x_p[3] = {-1 + IQ1S_DELTA, IQ1S_DELTA, 1 + IQ1S_DELTA}; const float x_m[3] = {-1 - IQ1S_DELTA, -IQ1S_DELTA, 1 - IQ1S_DELTA}; int * idx = (int *)(pairs + 1); for (int ibl = 0; ibl < nbl; ++ibl) { y[ibl].d = GGML_FP32_TO_FP16(0.f); memset(y[ibl].qs, 0, QK_K/8); memset(y[ibl].qh, 0, QK_K/16); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = 2*sumx2/QK_K; for (int ib = 0; ib < QK_K/block_size; ++ib) { const float * xb = xbl + block_size*ib; const float * qw = quant_weights + QK_K*ibl + block_size*ib; for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); float max = fabsf(xb[0]); for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i])); if (max < GROUP_MAX_EPS_IQ1_S) { scales[ib] = 0; memset(L, 1, block_size); continue; } // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale // for each possible and score for each split. for (int j = 0; j < block_size; ++j) { pairs[2*j] = xb[j]; idx[2*j] = j; } qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper); { sumx[0] = sumw[0] = 0; for (int j = 0; j < block_size; ++j) { int i = idx[2*j]; sumx[j+1] = sumx[j] + weight[i]*xb[i]; sumw[j+1] = sumw[j] + weight[i]; } } float best_score = -FLT_MAX, scale = max; int besti1 = -1, besti2 = -1, best_shift = 0; for (int i1 = 0; i1 <= block_size; ++i1) { for (int i2 = i1; i2 <= block_size; ++i2) { float sumqx = (sumx[i1] - sumx[0])*x_p[0] + (sumx[i2] - sumx[i1])*x_p[1] + (sumx[block_size] - sumx[i2])*x_p[2]; float sumq2 = (sumw[i1] - sumw[0])*x_p[0]*x_p[0] + (sumw[i2] - sumw[i1])*x_p[1]*x_p[1] + (sumw[block_size] - sumw[i2])*x_p[2]*x_p[2]; if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { scale = sumqx/sumq2; best_score = scale*sumqx; besti1 = i1; besti2 = i2; best_shift = 1; } sumqx = (sumx[i1] - sumx[0])*x_m[0] + (sumx[i2] - sumx[i1])*x_m[1] + (sumx[block_size] - sumx[i2])*x_m[2]; sumq2 = (sumw[i1] - sumw[0])*x_m[0]*x_m[0] + (sumw[i2] - sumw[i1])*x_m[1]*x_m[1] + (sumw[block_size] - sumw[i2])*x_m[2]*x_m[2]; if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) { scale = sumqx/sumq2; best_score = scale*sumqx; besti1 = i1; besti2 = i2; best_shift = -1; } } } GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_shift != 0); for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0; for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1; for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2; if (scale < 0) { for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j]; scale = -scale; best_shift = -best_shift; } bool all_on_grid = true; const float * xx = best_shift == 1 ? x_p : x_m; for (int k = 0; k < block_size/8; ++k) { uint16_t u = 0; for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { all_on_grid = false; const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S); GGML_ASSERT(grid_index >= 0); } index[k] = grid_index; } if (!all_on_grid) { float sumqx = 0, sumq2 = 0; for (int k = 0; k < block_size/8; ++k) { const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]); for (int j = 0; j < 8; ++j) { float w = weight[8*k + j]; float q = xx[(pg[j] - 1)/2]; sumqx += w*q*xb[8*k+j]; sumq2 += w*q*q; } } if (sumqx > 0 && sumq2 > 0) scale = sumqx/sumq2; } uint16_t h = 0; for (int k = 0; k < block_size/8; ++k) { y[ibl].qs[(block_size/8)*ib + k] = index[k] & 255; h |= (index[k] >> 8) << 3*k; } y[ibl].qh[ib] = h; GGML_ASSERT(scale >= 0); scales[ib] = scale; shifts[ib] = best_shift; max_scale = MAX(max_scale, scale); } if (!max_scale) { continue; } float d = max_scale/15; y[ibl].d = GGML_FP32_TO_FP16(d*1.125f); // 1.125f is another fudge factor. Don't ask me why it is needed. float id = 1/d; for (int ib = 0; ib < QK_K/block_size; ++ib) { int l = nearest_int(0.5f*(id*scales[ib]-1)); l = MAX(0, MIN(7, l)); if (shifts[ib] == -1) l |= 8; y[ibl].qh[ib] |= (l << 12); } } } size_t quantize_iq1_s(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); float scales[QK_K/IQ1S_BLOCK_SIZE]; float weight[IQ1S_BLOCK_SIZE]; int8_t L[IQ1S_BLOCK_SIZE]; float sumx[IQ1S_BLOCK_SIZE+1]; float sumw[IQ1S_BLOCK_SIZE+1]; float pairs[2*IQ1S_BLOCK_SIZE]; uint16_t index[IQ1S_BLOCK_SIZE/8]; int8_t shifts[QK_K/IQ1S_BLOCK_SIZE]; int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights, scales, weight, sumx, sumw, pairs, L, index, shifts); src += n_per_row; qrow += nblock*sizeof(block_iq1_s); } return nrow * nblock * sizeof(block_iq1_s); } static void quantize_row_iq1_m_impl(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights, float * scales, float * weight, float * pairs, int8_t * L, uint16_t * index, int8_t * shifts) { const int gindex = iq2_data_index(GGML_TYPE_IQ1_M); const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; const int * kmap_q2xs = iq2_data[gindex].map; const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; //GGML_ASSERT(quant_weights && "missing quantization weights"); GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); block_iq1_m * y = vy; const int64_t nbl = n/QK_K; const int block_size = IQ1M_BLOCK_SIZE; const float x_p[3] = {-1 + IQ1M_DELTA, IQ1M_DELTA, 1 + IQ1M_DELTA}; const float x_m[3] = {-1 - IQ1M_DELTA, -IQ1M_DELTA, 1 - IQ1M_DELTA}; const uint8_t masks[4] = {0x00, 0x80, 0x08, 0x88}; int * idx = (int *)(pairs + 1); float sumqx[4], sumq2[4]; iq1m_scale_t s; const float * xx; for (int ibl = 0; ibl < nbl; ++ibl) { memset(y[ibl].qs, 0, QK_K/8); memset(y[ibl].qh, 0, QK_K/16); memset(y[ibl].scales, 0, QK_K/32); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = 2*sumx2/QK_K; for (int ib = 0; ib < QK_K/block_size; ++ib) { const float * xb = xbl + block_size*ib; if (quant_weights) { const float * qw = quant_weights + QK_K*ibl + block_size*ib; for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); } else { for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i]; } float max = fabsf(xb[0]); for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i])); if (max < GROUP_MAX_EPS_IQ1_M) { scales[ib] = 0; memset(L, 1, block_size); continue; } // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale // for each possible and score for each split. for (int j = 0; j < block_size; ++j) { pairs[2*j] = xb[j]; idx[2*j] = j; } qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper); float best_score = -FLT_MAX, scale = max; int besti1 = -1, besti2 = -1, best_k = -1; // 0: +, + // 1: +, - // 2: -, + // 3: -, - for (int i1 = 0; i1 <= block_size; ++i1) { for (int i2 = i1; i2 <= block_size; ++i2) { memset(sumqx, 0, 4*sizeof(float)); memset(sumq2, 0, 4*sizeof(float)); for (int j = 0; j < i1; ++j) { int i = idx[2*j]; if (i < block_size/2) { sumqx[0] += weight[i]*x_p[0]*xb[i]; sumqx[1] += weight[i]*x_p[0]*xb[i]; sumqx[2] += weight[i]*x_m[0]*xb[i]; sumqx[3] += weight[i]*x_m[0]*xb[i]; sumq2[0] += weight[i]*x_p[0]*x_p[0]; sumq2[1] += weight[i]*x_p[0]*x_p[0]; sumq2[2] += weight[i]*x_m[0]*x_m[0]; sumq2[3] += weight[i]*x_m[0]*x_m[0]; } else { sumqx[0] += weight[i]*x_p[0]*xb[i]; sumqx[2] += weight[i]*x_p[0]*xb[i]; sumqx[1] += weight[i]*x_m[0]*xb[i]; sumqx[3] += weight[i]*x_m[0]*xb[i]; sumq2[0] += weight[i]*x_p[0]*x_p[0]; sumq2[2] += weight[i]*x_p[0]*x_p[0]; sumq2[1] += weight[i]*x_m[0]*x_m[0]; sumq2[3] += weight[i]*x_m[0]*x_m[0]; } } for (int j = i1; j < i2; ++j) { int i = idx[2*j]; if (i < block_size/2) { sumqx[0] += weight[i]*x_p[1]*xb[i]; sumqx[1] += weight[i]*x_p[1]*xb[i]; sumqx[2] += weight[i]*x_m[1]*xb[i]; sumqx[3] += weight[i]*x_m[1]*xb[i]; sumq2[0] += weight[i]*x_p[1]*x_p[1]; sumq2[1] += weight[i]*x_p[1]*x_p[1]; sumq2[2] += weight[i]*x_m[1]*x_m[1]; sumq2[3] += weight[i]*x_m[1]*x_m[1]; } else { sumqx[0] += weight[i]*x_p[1]*xb[i]; sumqx[2] += weight[i]*x_p[1]*xb[i]; sumqx[1] += weight[i]*x_m[1]*xb[i]; sumqx[3] += weight[i]*x_m[1]*xb[i]; sumq2[0] += weight[i]*x_p[1]*x_p[1]; sumq2[2] += weight[i]*x_p[1]*x_p[1]; sumq2[1] += weight[i]*x_m[1]*x_m[1]; sumq2[3] += weight[i]*x_m[1]*x_m[1]; } } for (int j = i2; j < block_size; ++j) { int i = idx[2*j]; if (i < block_size/2) { sumqx[0] += weight[i]*x_p[2]*xb[i]; sumqx[1] += weight[i]*x_p[2]*xb[i]; sumqx[2] += weight[i]*x_m[2]*xb[i]; sumqx[3] += weight[i]*x_m[2]*xb[i]; sumq2[0] += weight[i]*x_p[2]*x_p[2]; sumq2[1] += weight[i]*x_p[2]*x_p[2]; sumq2[2] += weight[i]*x_m[2]*x_m[2]; sumq2[3] += weight[i]*x_m[2]*x_m[2]; } else { sumqx[0] += weight[i]*x_p[2]*xb[i]; sumqx[2] += weight[i]*x_p[2]*xb[i]; sumqx[1] += weight[i]*x_m[2]*xb[i]; sumqx[3] += weight[i]*x_m[2]*xb[i]; sumq2[0] += weight[i]*x_p[2]*x_p[2]; sumq2[2] += weight[i]*x_p[2]*x_p[2]; sumq2[1] += weight[i]*x_m[2]*x_m[2]; sumq2[3] += weight[i]*x_m[2]*x_m[2]; } } for (int k = 0; k < 4; ++k) { if (sumq2[k] > 0 && sumqx[k]*sumqx[k] > best_score*sumq2[k]) { scale = sumqx[k]/sumq2[k]; best_score = scale*sumqx[k]; besti1 = i1; besti2 = i2; best_k = k; } } } } GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_k >= 0); for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0; for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1; for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2; if (scale < 0) { for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j]; scale = -scale; best_k = best_k == 0 ? 3 : best_k == 1 ? 2 : best_k == 2 ? 1 : 0; } bool all_on_grid = true; for (int k = 0; k < block_size/8; ++k) { if (k == 0) xx = best_k < 2 ? x_p : x_m; else xx = best_k%2 == 0 ? x_p : x_m; uint16_t u = 0; for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { all_on_grid = false; const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S); GGML_ASSERT(grid_index >= 0); } index[k] = grid_index; } if (!all_on_grid) { float sumqx_f = 0, sumq2_f = 0; for (int k = 0; k < block_size/8; ++k) { if (k == 0) xx = best_k < 2 ? x_p : x_m; else xx = best_k%2 == 0 ? x_p : x_m; const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]); for (int j = 0; j < 8; ++j) { float w = weight[8*k + j]; float q = xx[(pg[j] - 1)/2]; sumqx_f += w*q*xb[8*k+j]; sumq2_f += w*q*q; } } if (sumqx_f > 0 && sumq2_f > 0) scale = sumqx_f/sumq2_f; } y[ibl].qs[2*ib + 0] = index[0] & 255; y[ibl].qs[2*ib + 1] = index[1] & 255; y[ibl].qh[ib] = (index[0] >> 8) | ((index[1] >> 8) << 4); GGML_ASSERT(scale >= 0); scales[ib] = scale; shifts[ib] = best_k; max_scale = MAX(max_scale, scale); } if (!max_scale) { continue; } uint16_t * sc = (uint16_t *)y[ibl].scales; float d = max_scale/15; float id = 1/d; float sumqx_f = 0, sumq2_f = 0; for (int ib = 0; ib < QK_K/block_size; ++ib) { int l = nearest_int(0.5f*(id*scales[ib+0]-1)); l = MAX(0, MIN(7, l)); sc[ib/4] |= (l << 3*(ib%4)); y[ibl].qh[ib] |= masks[shifts[ib]]; const float * xb = xbl + block_size*ib; if (quant_weights) { const float * qw = quant_weights + QK_K*ibl + block_size*ib; for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); } else { for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i]; } for (int k = 0; k < block_size/8; ++k) { if (k == 0) xx = shifts[ib] < 2 ? x_p : x_m; else xx = shifts[ib]%2 == 0 ? x_p : x_m; const int8_t * pg = (const int8_t *)(kgrid_q2xs + y[ibl].qs[2*ib+k] + ((y[ibl].qh[ib] << (8 - 4*k)) & 0x700)); for (int j = 0; j < 8; ++j) { float w = weight[8*k + j]; float q = xx[(pg[j] - 1)/2]*(2*l+1); sumqx_f += w*q*xb[8*k+j]; sumq2_f += w*q*q; } } } if (sumq2_f > 0) d = sumqx_f/sumq2_f; s.f16 = GGML_FP32_TO_FP16(d*1.1125f); // 1.1125f is another fudge factor. Don't ask me why it is needed. sc[0] |= ((s.u16 & 0x000f) << 12); sc[1] |= ((s.u16 & 0x00f0) << 8); sc[2] |= ((s.u16 & 0x0f00) << 4); sc[3] |= ((s.u16 & 0xf000) << 0); } } size_t quantize_iq1_m(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); float scales[QK_K/IQ1M_BLOCK_SIZE]; float weight[IQ1M_BLOCK_SIZE]; int8_t L[IQ1M_BLOCK_SIZE]; float pairs[2*IQ1M_BLOCK_SIZE]; uint16_t index[IQ1M_BLOCK_SIZE/8]; int8_t shifts[QK_K/IQ1M_BLOCK_SIZE]; int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq1_m_impl(src, qrow, n_per_row, quant_weights, scales, weight, pairs, L, index, shifts); src += n_per_row; qrow += nblock*sizeof(block_iq1_m); } return nrow * nblock * sizeof(block_iq1_m); } // ============================ 4-bit non-linear quants static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * GGML_RESTRICT x, ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l, float * scales, float * weight, uint8_t * L, const int8_t * values, const float * quant_weights, const int ntry) { float sigma2 = 0; for (int j = 0; j < super_block_size; ++j) sigma2 += x[j]*x[j]; sigma2 *= 2.f/super_block_size; memset(q4, 0, super_block_size/2); dh[0] = GGML_FP32_TO_FP16(0.f); float max_scale = 0, amax_scale = 0; for (int ib = 0; ib < super_block_size/block_size; ++ib) { const float * xb = x + ib*block_size; uint8_t * Lb = L + ib*block_size; if (quant_weights) { const float * qw = quant_weights + ib*block_size; for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]); } else { for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j]; } float amax = 0, max = 0; for (int j = 0; j < block_size; ++j) { float ax = fabsf(xb[j]); if (ax > amax) { amax = ax; max = xb[j]; } } if (amax < GROUP_MAX_EPS) { scales[ib] = 0; continue; } float d = ntry > 0 ? -max/values[0] : max/values[0]; float id = 1/d; float sumqx = 0, sumq2 = 0; for (int j = 0; j < block_size; ++j) { float al = id*xb[j]; int l = best_index_int8(16, values, al); Lb[j] = l; float q = values[l]; float w = weight[j]; sumqx += w*q*xb[j]; sumq2 += w*q*q; } d = sumqx/sumq2; float best = d*sumqx; for (int itry = -ntry; itry <= ntry; ++itry) { id = (itry + values[0])/max; sumqx = sumq2 = 0; for (int j = 0; j < block_size; ++j) { float al = id*xb[j]; int l = best_index_int8(16, values, al); float q = values[l]; float w = weight[j]; sumqx += w*q*xb[j]; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { d = sumqx/sumq2; best = d * sumqx; } } scales[ib] = d; float abs_d = fabsf(d); if (abs_d > amax_scale) { amax_scale = abs_d; max_scale = d; } } if (super_block_size/block_size > 1) { int nb = super_block_size/block_size; memset(scales_h, 0, ((nb+7)/8)*sizeof(uint16_t)); float d = -max_scale/32; dh[0] = GGML_FP32_TO_FP16(d); float id = d ? 1/d : 0.f; for (int ib = 0; ib < super_block_size/block_size; ++ib) { int l = nearest_int(id*scales[ib]); l = MAX(-32, MIN(31, l)); float dl = d * l; float idl = dl ? 1/dl : 0.f; uint8_t * Lb = L + ib*block_size; const float * xb = x + ib*block_size; for (int j = 0; j < block_size; ++j) { Lb[j] = best_index_int8(16, values, idl*xb[j]); } l += 32; uint8_t l_l = l & 0xf; uint8_t l_h = l >> 4; if (ib%2 == 0) scales_l[ib/2] = l_l; else scales_l[ib/2] |= (l_l << 4); scales_h[ib/8] |= (l_h << 2*(ib%8)); } } else { dh[0] = GGML_FP32_TO_FP16(scales[0]); if (ntry > 0) { float id = scales[0] ? 1/scales[0] : 0; for (int j = 0; j < super_block_size; ++j) { L[j] = best_index_int8(16, values, id*x[j]); } } } for (int i = 0; i < super_block_size/32; ++i) { for (int j = 0; j < 16; ++j) { q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4); } } } size_t quantize_iq4_nl(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK4_NL == 0); int64_t nblock = n_per_row/QK4_NL; char * qrow = (char *)dst; uint8_t L[QK4_NL]; float weight[QK4_NL]; uint16_t unused_h; uint8_t * unused_l = NULL; float scale; for (int64_t row = 0; row < nrow; ++row) { block_iq4_nl * iq4 = (block_iq4_nl *)qrow; for (int ibl = 0; ibl < nblock; ++ibl) { const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL; quantize_row_iq4_nl_impl(QK4_NL, 32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l, &scale, weight, L, kvalues_iq4nl, qw, 7); } src += n_per_row; qrow += nblock*sizeof(block_iq4_nl); } return nrow * nblock * sizeof(block_iq4_nl); } //void quantize_row_iq4_nl_ref(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { void quantize_row_iq4_nl_ref(const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k) { GGML_ASSERT(k%QK4_NL == 0); int64_t nblock = k/QK4_NL; uint8_t L[QK4_NL]; float weight[QK4_NL]; uint16_t unused_h; uint8_t * unused_l = NULL; float scale; block_iq4_nl * iq4 = y; for (int ibl = 0; ibl < nblock; ++ibl) { quantize_row_iq4_nl_impl(QK4_NL, 32, x + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l, &scale, weight, L, kvalues_iq4nl, NULL, -1); } } size_t quantize_iq4_xs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; uint8_t L[QK_K]; float weight[32]; float scales[QK_K/32]; for (int64_t row = 0; row < nrow; ++row) { block_iq4_xs * iq4 = (block_iq4_xs *)qrow; for (int ibl = 0; ibl < nblock; ++ibl) { const float * qw = quant_weights ? quant_weights + QK_K*ibl : NULL; quantize_row_iq4_nl_impl(QK_K, 32, src + QK_K*ibl, &iq4[ibl].d, iq4[ibl].qs, &iq4[ibl].scales_h, iq4[ibl].scales_l, scales, weight, L, kvalues_iq4nl, qw, 7); } src += n_per_row; qrow += nblock*sizeof(block_iq4_xs); } return nrow * nblock * sizeof(block_iq4_xs); } void quantize_row_iq4_xs_ref(const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); quantize_iq4_xs(x, y, 1, k, NULL); } // =============================== 2.5625 bpw static void quantize_row_iq2_s_impl(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t n, const float * GGML_RESTRICT quant_weights) { const int gindex = iq2_data_index(GGML_TYPE_IQ2_S); const uint64_t * kgrid_q2xs = iq2_data[gindex].grid; const int * kmap_q2xs = iq2_data[gindex].map; const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours; GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?"); GGML_ASSERT(n%QK_K == 0); const int kMaxQ = 3; const int64_t nbl = n/QK_K; block_iq2_s * y = vy; float scales[QK_K/16]; float weight[16]; float xval[16]; int8_t L[16]; int8_t Laux[16]; float waux[16]; bool is_on_grid[2]; bool is_on_grid_aux[2]; uint8_t block_signs[2]; for (int ibl = 0; ibl < nbl; ++ibl) { memset(&y[ibl], 0, sizeof(block_iq2_s)); y[ibl].d = GGML_FP32_TO_FP16(0.f); float max_scale = 0; const float * xbl = x + QK_K*ibl; float sumx2 = 0; for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i]; float sigma2 = 2*sumx2/QK_K; for (int ib = 0; ib < QK_K/16; ++ib) { const float * xb = xbl + 16*ib; if (quant_weights) { const float * qw = quant_weights + QK_K*ibl + 16*ib; for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]); } else { for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i]; } for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]); for (int k = 0; k < 2; ++k) { uint8_t s = 0; for (int i = 0; i < 8; ++i) { if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i]; else { xval[8*k + i] = -xb[8*k + i]; s |= (1 << i); } } block_signs[k] = s; } float max = xval[0]; for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]); if (max < GROUP_MAX_EPS_IQ2_S) { scales[ib] = 0; continue; } float best = 0; float scale = max/(2*kMaxQ-1); is_on_grid[0] = is_on_grid[1] = true; for (int is = -9; is <= 9; ++is) { float id = (2*kMaxQ-1+is*0.1f)/max; float this_scale = 1/id; for (int k = 0; k < 2; ++k) { for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l)); } uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; is_on_grid_aux[k] = true; if (grid_index < 0) { is_on_grid_aux[k] = false; const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 16; ++i) { float w = weight[i]; float q = 2*Laux[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0 && sumqx*sumqx > best*sumq2) { scale = sumqx/sumq2; best = scale*sumqx; for (int i = 0; i < 16; ++i) L[i] = Laux[i]; for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k]; } } int n_not_ongrid = 0; for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid; if (n_not_ongrid > 0 && scale > 0) { float id = 1/scale; for (int k = 0; k < 2; ++k) { if (is_on_grid[k]) continue; uint16_t u = 0; for (int i = 0; i < 8; ++i) { int l = nearest_int(0.5f*(id*xval[8*k+i]-1)); l = MAX(0, MIN(kMaxQ-1, l)); u |= (l << 2*i); L[8*k + i] = l; } int grid_index = kmap_q2xs[u]; if (grid_index < 0) { const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1; grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k); } } float sumqx = 0, sumq2 = 0; for (int i = 0; i < 16; ++i) { float w = weight[i]; float q = 2*L[i] + 1; sumqx += w*xval[i]*q; sumq2 += w*q*q; } if (sumq2 > 0) scale = sumqx/sumq2; } if (scale < 0) { scale = -scale; for (int k = 0; k < 2; ++k) block_signs[k] = ~block_signs[k]; } for (int k = 0; k < 2; ++k) { uint16_t u = 0; for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i); int grid_index = kmap_q2xs[u]; if (grid_index < 0) { printf("Oops: found point %u not on grid:", u); for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]); printf("\n"); GGML_ABORT("fatal error"); } const int i8 = 2*ib + k; y[ibl].qs[i8] = grid_index & 255; y[ibl].qh[i8/4] |= ((grid_index >> 8) << 2*(i8%4)); y[ibl].qs[QK_K/8 + i8] = block_signs[k]; } GGML_ASSERT(scale >= 0); scales[ib] = scale; max_scale = MAX(max_scale, scale); } if (!max_scale) { continue; } float d = max_scale/31; y[ibl].d = GGML_FP32_TO_FP16(d * 0.9875f); float id = 1/d; for (int ib = 0; ib < QK_K/16; ++ib) { int l = nearest_int(0.5f*(id*scales[ib]-1)); l = MAX(0, MIN(15, l)); if (ib%2 == 0) y[ibl].scales[ib/2] = l; else y[ibl].scales[ib/2] |= (l << 4); } } } size_t quantize_iq2_s(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { GGML_ASSERT(n_per_row%QK_K == 0); int64_t nblock = n_per_row/QK_K; char * qrow = (char *)dst; for (int64_t row = 0; row < nrow; ++row) { quantize_row_iq2_s_impl(src, qrow, n_per_row, quant_weights); src += n_per_row; qrow += nblock*sizeof(block_iq2_s); } return nrow * nblock * sizeof(block_iq2_s); } void quantize_row_iq2_s_ref(const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k) { assert(k % QK_K == 0); quantize_iq2_s(x, y, 1, k, NULL); } // =============================== data validation static bool validate_float(float f, size_t i) { if (isinf(f)) { fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i); return false; } if (isnan(f)) { fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i); return false; } return true; } static bool isinf_fp16(ggml_fp16_t f) { return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0; } static bool isnan_fp16(ggml_fp16_t f) { return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0; } static bool validate_fp16(ggml_fp16_t f, size_t i) { if (isinf_fp16(f)) { fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i); return false; } if (isnan_fp16(f)) { fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i); return false; } return true; } static bool validate_e_e8m0(uint8_t e, size_t i) { if (e == 0xff) { fprintf(stderr, "ggml_validate_row_data: found invalid e value %d at block %zu\n", e, i); return false; } return true; } #define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ if (!validate_fp16(q[i].d, i)) { \ return false; \ } \ } #define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \ return false; \ } \ } #define VALIDATE_ROW_DATA_E_E8M0_IMPL(type, data, nb) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ if (!validate_e_e8m0(q[i].e, i)) { \ return false; \ } \ } #define VALIDATE_ROW_DATA_DVEC_F16_IMPL(type, data, nb, nr) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ for (size_t j = 0; j < (nr); ++j) { \ if (!validate_fp16(q[i].d[j], i)) { \ return false; \ } \ } \ } bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) { if (type < 0 || type >= GGML_TYPE_COUNT) { fprintf(stderr, "%s: invalid type %d\n", __func__, type); return false; } if (nbytes % ggml_type_size(type) != 0) { fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type)); return false; } const size_t nb = nbytes/ggml_type_size(type); switch (type) { case GGML_TYPE_BF16: { int nans = 0; int infs = 0; const unsigned short * f = (const unsigned short *) data; for (size_t i = 0; i < nb; ++i) { nans += (f[i] & 0x7fff) > 0x7f80; infs += (f[i] & 0x7fff) == 0x7f80; } if (nans) { fprintf(stderr, "%s: found %d NaNs in row of %zu BF16 values\n", __func__, nans, nb); return false; } if (infs) { fprintf(stderr, "%s: found %d infinities in row of %zu BF16 values\n", __func__, infs, nb); return false; } } break; case GGML_TYPE_F16: { const ggml_fp16_t * f = (const ggml_fp16_t *) data; size_t i = 0; #if defined(__AVX2__) for (; i + 15 < nb; i += 16) { __m256i v = _mm256_loadu_si256((const __m256i *)(f + i)); __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00)); __m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00)); int mask = _mm256_movemask_epi8(cmp); if (mask) { for (size_t j = 0; j < 16; ++j) { if (!validate_fp16(f[i + j], i + j)) { return false; } } GGML_UNREACHABLE(); } } #elif defined(__ARM_NEON) for (; i + 7 < nb; i += 8) { uint16x8_t v = vld1q_u16(f + i); uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00)); uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00)); uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0); if (mask) { for (size_t j = 0; j < 8; ++j) { if (!validate_fp16(f[i + j], i + j)) { return false; } } GGML_UNREACHABLE(); } } #endif for (; i < nb; ++i) { if (!validate_fp16(f[i], i)) { return false; } } } break; case GGML_TYPE_F32: { const float * f = (const float *) data; size_t i = 0; #if defined(__AVX2__) for (; i + 7 < nb; i += 8) { __m256i v = _mm256_loadu_si256((const __m256i *)(f + i)); __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000)); __m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000)); int mask = _mm256_movemask_epi8(cmp); if (mask) { for (size_t j = 0; j < 8; ++j) { if (!validate_float(f[i + j], i + j)) { return false; } } GGML_UNREACHABLE(); } } #elif defined(__ARM_NEON) for (; i + 3 < nb; i += 4) { uint32x4_t v = vld1q_u32((const uint32_t *)f + i); uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000)); uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000)); uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0); if (mask) { for (size_t j = 0; j < 4; ++j) { if (!validate_float(f[i + j], i + j)) { return false; } } GGML_UNREACHABLE(); } } #endif for (; i < nb; ++i) { if (!validate_float(f[i], i)) { return false; } } } break; case GGML_TYPE_F64: { const double * f = (const double *) data; for (size_t i = 0; i < nb; ++i) { if (!validate_float(f[i], i)) { return false; } } } break; case GGML_TYPE_Q4_0: { VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb); } break; case GGML_TYPE_Q4_1: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m); } break; case GGML_TYPE_Q5_0: { VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb); } break; case GGML_TYPE_Q5_1: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m); } break; case GGML_TYPE_Q8_0: { VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb); } break; case GGML_TYPE_MXFP4: { VALIDATE_ROW_DATA_E_E8M0_IMPL(block_mxfp4, data, nb); } break; case GGML_TYPE_Q2_K: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin); } break; case GGML_TYPE_Q3_K: { VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb); } break; case GGML_TYPE_Q4_K: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin); } break; case GGML_TYPE_Q5_K: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin); } break; case GGML_TYPE_Q6_K: { VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb); } break; case GGML_TYPE_Q8_K: { const block_q8_K * q = (const block_q8_K *) data; for (size_t i = 0; i < nb; ++i) { if (!validate_float(q[i].d, i)) { return false; } } } break; case GGML_TYPE_TQ1_0: { VALIDATE_ROW_DATA_D_F16_IMPL(block_tq1_0, data, nb); } break; case GGML_TYPE_TQ2_0: { VALIDATE_ROW_DATA_D_F16_IMPL(block_tq2_0, data, nb); } break; case GGML_TYPE_IQ1_S: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb); } break; case GGML_TYPE_IQ1_M: { const block_iq1_m * q = (const block_iq1_m *) data; for (size_t i = 0; i < nb; ++i) { iq1m_scale_t scale; const uint16_t * sc = (const uint16_t *)q[i].scales; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); if (!validate_fp16(scale.f16, i)) { return false; } } } break; case GGML_TYPE_IQ2_XXS: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb); } break; case GGML_TYPE_IQ2_XS: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb); } break; case GGML_TYPE_IQ2_S: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb); } break; case GGML_TYPE_IQ3_XXS: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb); } break; case GGML_TYPE_IQ3_S: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb); } break; case GGML_TYPE_IQ4_XS: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb); } break; case GGML_TYPE_IQ4_NL: { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb); } break; case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_I64: // nothing to validate break; default: { fprintf(stderr, "%s: invalid type %d\n", __func__, type); return false; } } return true; } ggml-org-ggml-3678254/src/ggml-quants.h000066400000000000000000000210051512524704700175610ustar00rootroot00000000000000#pragma once #define GGML_COMMON_DECL_C #include "ggml-common.h" #include "ggml.h" // GGML internal header #ifdef __cplusplus extern "C" { #endif // NOTE: these functions are defined as GGML_API because they used by the CPU backend // Quantization GGML_API void quantize_row_q4_0_ref(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q4_1_ref(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q5_0_ref(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q5_1_ref(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q8_0_ref(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q8_1_ref(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q2_K_ref(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q3_K_ref(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q4_K_ref(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q5_K_ref(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q6_K_ref(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q8_K_ref(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_tq1_0_ref(const float * GGML_RESTRICT x, block_tq1_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_tq2_0_ref(const float * GGML_RESTRICT x, block_tq2_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq3_xxs_ref(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq4_nl_ref (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq4_xs_ref (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq3_s_ref (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq2_s_ref (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k); // Dequantization GGML_API void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); //GGML_API void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_tq1_0(const block_tq1_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_tq2_0(const block_tq2_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); // Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") GGML_API size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_tq1_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_tq2_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API void iq2xs_init_impl(enum ggml_type type); GGML_API void iq2xs_free_impl(enum ggml_type type); GGML_API void iq3xs_init_impl(int grid_size); GGML_API void iq3xs_free_impl(int grid_size); #ifdef __cplusplus } #endif ggml-org-ggml-3678254/src/ggml-rpc/000077500000000000000000000000001512524704700166635ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-rpc/CMakeLists.txt000066400000000000000000000003161512524704700214230ustar00rootroot00000000000000message(STATUS "Using RPC backend") ggml_add_backend_library(ggml-rpc ggml-rpc.cpp ) if (WIN32) target_link_libraries(ggml-rpc PRIVATE ws2_32) endif() ggml-org-ggml-3678254/src/ggml-rpc/ggml-rpc.cpp000066400000000000000000002306061512524704700211060ustar00rootroot00000000000000#include "ggml-rpc.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-cpp.h" #include #include #include #include #include #include #include #ifdef _WIN32 # define WIN32_LEAN_AND_MEAN # ifndef NOMINMAX # define NOMINMAX # endif # include # include #else # include # include # include # include # include # include # include #endif #include #include #include #include static const char * RPC_DEBUG = std::getenv("GGML_RPC_DEBUG"); #define LOG_DBG(...) \ do { if (RPC_DEBUG) GGML_LOG_DEBUG(__VA_ARGS__); } while (0) namespace fs = std::filesystem; static constexpr size_t MAX_CHUNK_SIZE = 1024ull * 1024ull * 1024ull; // 1 GiB #ifdef _WIN32 typedef SOCKET sockfd_t; using ssize_t = __int64; #else typedef int sockfd_t; #endif // cross-platform socket struct socket_t { sockfd_t fd; socket_t(sockfd_t fd) : fd(fd) {} ~socket_t() { LOG_DBG("[%s] closing socket %d\n", __func__, this->fd); #ifdef _WIN32 closesocket(this->fd); #else close(this->fd); #endif } }; // macro for nicer error messages on server crash #define RPC_STATUS_ASSERT(x) if (!(x)) GGML_ABORT("Remote RPC server crashed or returned malformed response") // all RPC structures must be packed #pragma pack(push, 1) // ggml_tensor is serialized into rpc_tensor struct rpc_tensor { uint64_t id; uint32_t type; uint64_t buffer; uint32_t ne[GGML_MAX_DIMS]; uint32_t nb[GGML_MAX_DIMS]; uint32_t op; int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; int32_t flags; uint64_t src[GGML_MAX_SRC]; uint64_t view_src; uint64_t view_offs; uint64_t data; char name[GGML_MAX_NAME]; char padding[4]; }; static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8"); // RPC commands enum rpc_cmd { RPC_CMD_ALLOC_BUFFER = 0, RPC_CMD_GET_ALIGNMENT, RPC_CMD_GET_MAX_SIZE, RPC_CMD_BUFFER_GET_BASE, RPC_CMD_FREE_BUFFER, RPC_CMD_BUFFER_CLEAR, RPC_CMD_SET_TENSOR, RPC_CMD_SET_TENSOR_HASH, RPC_CMD_GET_TENSOR, RPC_CMD_COPY_TENSOR, RPC_CMD_GRAPH_COMPUTE, RPC_CMD_GET_DEVICE_MEMORY, RPC_CMD_INIT_TENSOR, RPC_CMD_GET_ALLOC_SIZE, RPC_CMD_HELLO, RPC_CMD_DEVICE_COUNT, RPC_CMD_GRAPH_RECOMPUTE, RPC_CMD_COUNT, }; static_assert(RPC_CMD_HELLO == 14, "RPC_CMD_HELLO must be always 14"); // Try RPC_CMD_SET_TENSOR_HASH first when data size is larger than this threshold const size_t HASH_THRESHOLD = 10 * 1024 * 1024; struct rpc_msg_hello_rsp { uint8_t major; uint8_t minor; uint8_t patch; }; struct rpc_msg_device_count_rsp { uint32_t device_count; }; struct rpc_msg_get_alloc_size_req { uint32_t device; rpc_tensor tensor; rpc_tensor srcs[GGML_MAX_SRC]; }; struct rpc_msg_get_alloc_size_rsp { uint64_t alloc_size; }; struct rpc_msg_init_tensor_req { rpc_tensor tensor; }; struct rpc_msg_alloc_buffer_req { uint32_t device; uint64_t size; }; struct rpc_msg_alloc_buffer_rsp { uint64_t remote_ptr; uint64_t remote_size; }; struct rpc_msg_get_alignment_req { uint32_t device; }; struct rpc_msg_get_alignment_rsp { uint64_t alignment; }; struct rpc_msg_get_max_size_req { uint32_t device; }; struct rpc_msg_get_max_size_rsp { uint64_t max_size; }; struct rpc_msg_buffer_get_base_req { uint64_t remote_ptr; }; struct rpc_msg_buffer_get_base_rsp { uint64_t base_ptr; }; struct rpc_msg_free_buffer_req { uint64_t remote_ptr; }; struct rpc_msg_buffer_clear_req { uint64_t remote_ptr; uint8_t value; }; struct rpc_msg_set_tensor_hash_req { rpc_tensor tensor; uint64_t offset; uint64_t hash; }; struct rpc_msg_set_tensor_hash_rsp { uint8_t result; }; struct rpc_msg_get_tensor_req { rpc_tensor tensor; uint64_t offset; uint64_t size; }; struct rpc_msg_copy_tensor_req { rpc_tensor src; rpc_tensor dst; }; struct rpc_msg_copy_tensor_rsp { uint8_t result; }; struct rpc_msg_get_device_memory_req { uint32_t device; }; struct rpc_msg_get_device_memory_rsp { uint64_t free_mem; uint64_t total_mem; }; struct rpc_msg_graph_recompute_req { uint32_t device; }; #pragma pack(pop) // RPC data structures static ggml_guid_t ggml_backend_rpc_guid() { static ggml_guid guid = {0x99, 0x68, 0x5b, 0x6c, 0xd2, 0x83, 0x3d, 0x24, 0x25, 0x36, 0x72, 0xe1, 0x5b, 0x0e, 0x14, 0x03}; return &guid; } struct ggml_backend_rpc_buffer_type_context { std::string endpoint; uint32_t device; std::string name; size_t alignment; size_t max_size; }; struct graph_cache { bool is_cached(const ggml_cgraph * cgraph) { if ((int)last_graph.size() != cgraph->n_nodes) { return false; } for (int i = 0; i < cgraph->n_nodes; i++) { if (memcmp(&last_graph[i], cgraph->nodes[i], sizeof(ggml_tensor)) != 0) { return false; } } return true; } void add(const ggml_cgraph * cgraph) { last_graph.resize(cgraph->n_nodes); for (int i = 0; i < cgraph->n_nodes; i++) { memcpy(&last_graph[i], cgraph->nodes[i], sizeof(ggml_tensor)); } } std::vector last_graph; }; struct ggml_backend_rpc_context { std::string endpoint; uint32_t device; std::string name; graph_cache gc; }; struct ggml_backend_rpc_buffer_context { std::shared_ptr sock; void * base_ptr; uint64_t remote_ptr; }; // RPC helper functions // Computes FNV-1a hash of the data static uint64_t fnv_hash(const uint8_t * data, size_t len) { const uint64_t fnv_prime = 0x100000001b3ULL; uint64_t hash = 0xcbf29ce484222325ULL; for (size_t i = 0; i < len; ++i) { hash ^= data[i]; hash *= fnv_prime; } return hash; } static std::shared_ptr make_socket(sockfd_t fd) { #ifdef _WIN32 if (fd == INVALID_SOCKET) { return nullptr; } #else if (fd < 0) { return nullptr; } #endif return std::make_shared(fd); } static bool set_no_delay(sockfd_t sockfd) { int flag = 1; // set TCP_NODELAY to disable Nagle's algorithm int ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&flag, sizeof(int)); return ret == 0; } static bool set_reuse_addr(sockfd_t sockfd) { int flag = 1; int ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof(int)); return ret == 0; } static std::shared_ptr socket_connect(const char * host, int port) { struct sockaddr_in addr; auto sockfd = socket(AF_INET, SOCK_STREAM, 0); auto sock_ptr = make_socket(sockfd); if (sock_ptr == nullptr) { return nullptr; } if (!set_no_delay(sockfd)) { GGML_LOG_ERROR("Failed to set TCP_NODELAY\n"); return nullptr; } addr.sin_family = AF_INET; addr.sin_port = htons(port); struct hostent * server = gethostbyname(host); if (server == NULL) { GGML_LOG_ERROR("Cannot resolve host '%s'\n", host); return nullptr; } memcpy(&addr.sin_addr.s_addr, server->h_addr, server->h_length); if (connect(sock_ptr->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { return nullptr; } return sock_ptr; } static std::shared_ptr socket_accept(sockfd_t srv_sockfd) { auto client_socket_fd = accept(srv_sockfd, NULL, NULL); auto client_socket = make_socket(client_socket_fd); if (client_socket == nullptr) { return nullptr; } if (!set_no_delay(client_socket_fd)) { GGML_LOG_ERROR("Failed to set TCP_NODELAY\n"); return nullptr; } return client_socket; } static std::shared_ptr create_server_socket(const char * host, int port) { auto sockfd = socket(AF_INET, SOCK_STREAM, 0); auto sock = make_socket(sockfd); if (sock == nullptr) { return nullptr; } if (!set_reuse_addr(sockfd)) { GGML_LOG_ERROR("Failed to set SO_REUSEADDR\n"); return nullptr; } if (inet_addr(host) == INADDR_NONE) { GGML_LOG_ERROR("Invalid host address: %s\n", host); return nullptr; } struct sockaddr_in serv_addr; serv_addr.sin_family = AF_INET; serv_addr.sin_addr.s_addr = inet_addr(host); serv_addr.sin_port = htons(port); if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) { return nullptr; } if (listen(sockfd, 1) < 0) { return nullptr; } return sock; } static bool send_data(sockfd_t sockfd, const void * data, size_t size) { size_t bytes_sent = 0; while (bytes_sent < size) { size_t size_to_send = std::min(size - bytes_sent, MAX_CHUNK_SIZE); ssize_t n = send(sockfd, (const char *)data + bytes_sent, size_to_send, 0); if (n < 0) { GGML_LOG_ERROR("send failed (bytes_sent=%zu, size_to_send=%zu)\n", bytes_sent, size_to_send); return false; } bytes_sent += (size_t)n; } return true; } static bool recv_data(sockfd_t sockfd, void * data, size_t size) { size_t bytes_recv = 0; while (bytes_recv < size) { size_t size_to_recv = std::min(size - bytes_recv, MAX_CHUNK_SIZE); ssize_t n = recv(sockfd, (char *)data + bytes_recv, size_to_recv, 0); if (n < 0) { GGML_LOG_ERROR("recv failed (bytes_recv=%zu, size_to_recv=%zu)\n", bytes_recv, size_to_recv); return false; } if (n == 0) { LOG_DBG("recv returned 0 (peer closed?)\n"); return false; } bytes_recv += (size_t)n; } return true; } static bool send_msg(sockfd_t sockfd, const void * msg, size_t msg_size) { if (!send_data(sockfd, &msg_size, sizeof(msg_size))) { return false; } return send_data(sockfd, msg, msg_size); } static bool recv_msg(sockfd_t sockfd, void * msg, size_t msg_size) { uint64_t size; if (!recv_data(sockfd, &size, sizeof(size))) { return false; } if (size != msg_size) { return false; } return recv_data(sockfd, msg, msg_size); } static bool recv_msg(sockfd_t sockfd, std::vector & input) { uint64_t size; if (!recv_data(sockfd, &size, sizeof(size))) { return false; } try { input.resize(size); } catch (const std::bad_alloc & e) { GGML_LOG_ERROR("Failed to allocate input buffer of size %" PRIu64 "\n", size); return false; } return recv_data(sockfd, input.data(), size); } static bool parse_endpoint(const std::string & endpoint, std::string & host, int & port) { size_t pos = endpoint.find(':'); if (pos == std::string::npos) { return false; } host = endpoint.substr(0, pos); port = std::stoi(endpoint.substr(pos + 1)); return true; } // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | // No response static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const void * input, size_t input_size) { uint8_t cmd_byte = cmd; if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) { return false; } if (!send_data(sock->fd, &input_size, sizeof(input_size))) { return false; } if (!send_data(sock->fd, input, input_size)) { return false; } return true; } // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | // RPC response: | response_size (8 bytes) | response_data (response_size bytes) | static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const void * input, size_t input_size, void * output, size_t output_size) { if (!send_rpc_cmd(sock, cmd, input, input_size)) { return false; } // TODO: currently the output_size is always known, do we need support for commands with variable output size? // even if we do, we can skip sending output_size from the server for commands with known output size uint64_t out_size; if (!recv_data(sock->fd, &out_size, sizeof(out_size))) { return false; } if (out_size != output_size) { return false; } if (!recv_data(sock->fd, output, output_size)) { return false; } return true; } // RPC client-side implementation static bool check_server_version(const std::shared_ptr & sock) { rpc_msg_hello_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_HELLO, nullptr, 0, &response, sizeof(response)); RPC_STATUS_ASSERT(status); if (response.major != RPC_PROTO_MAJOR_VERSION || response.minor > RPC_PROTO_MINOR_VERSION) { GGML_LOG_ERROR("RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); return false; } if (response.minor != RPC_PROTO_MINOR_VERSION || response.patch != RPC_PROTO_PATCH_VERSION) { GGML_LOG_INFO("WARNING: RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); } return true; } static std::shared_ptr get_socket(const std::string & endpoint) { static std::mutex mutex; std::lock_guard lock(mutex); static std::unordered_map> sockets; static bool initialized = false; auto it = sockets.find(endpoint); if (it != sockets.end()) { if (auto sock = it->second.lock()) { return sock; } } std::string host; int port; if (!parse_endpoint(endpoint, host, port)) { GGML_LOG_ERROR("Failed to parse endpoint: %s\n", endpoint.c_str()); return nullptr; } #ifdef _WIN32 if (!initialized) { WSADATA wsaData; int res = WSAStartup(MAKEWORD(2, 2), &wsaData); if (res != 0) { return nullptr; } initialized = true; } #else GGML_UNUSED(initialized); #endif auto sock = socket_connect(host.c_str(), port); if (sock == nullptr) { return nullptr; } if (!check_server_version(sock)) { return nullptr; } LOG_DBG("[%s] connected to %s, sockfd=%d\n", __func__, endpoint.c_str(), sock->fd); sockets[endpoint] = sock; return sock; } static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_free_buffer_req request = {ctx->remote_ptr}; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, &request, sizeof(request), nullptr, 0); RPC_STATUS_ASSERT(status); delete ctx; } static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; if (ctx->base_ptr != nullptr) { return ctx->base_ptr; } rpc_msg_buffer_get_base_req request = {ctx->remote_ptr}; rpc_msg_buffer_get_base_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); ctx->base_ptr = reinterpret_cast(response.base_ptr); return ctx->base_ptr; } static bool ggml_backend_buffer_is_rpc(ggml_backend_buffer_t buffer) { return buffer->iface.free_buffer == ggml_backend_rpc_buffer_free_buffer; } static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { rpc_tensor result; if (!tensor) { memset(&result, 0, sizeof(result)); return result; } result.id = reinterpret_cast(tensor); result.type = tensor->type; if (tensor->buffer && ggml_backend_buffer_is_rpc(tensor->buffer)) { ggml_backend_buffer_t buffer = tensor->buffer; ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; result.buffer = ctx != nullptr ? ctx->remote_ptr : 0; } else { result.buffer = 0; } for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) { result.ne[i] = tensor->ne[i]; result.nb[i] = tensor->nb[i]; } result.op = tensor->op; for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) { result.op_params[i] = tensor->op_params[i]; } result.flags = tensor->flags; for (uint32_t i = 0; i < GGML_MAX_SRC; i++) { result.src[i] = reinterpret_cast(tensor->src[i]); } result.view_src = reinterpret_cast(tensor->view_src); result.view_offs = tensor->view_offs; result.data = reinterpret_cast(tensor->data); // Avoid sending uninitialized data over the wire memset(result.name, 0, sizeof(result.name)); memset(result.padding, 0, sizeof(result.padding)); snprintf(result.name, GGML_MAX_NAME, "%s", tensor->name); return result; } static enum ggml_status ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; // CUDA backend on the server pads everything to 512 due to CUDA limitations. // Due to bandwidth constraints, we only call the server init tensor functions if necessary. // In particular, only quantized tensors need padding if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) { rpc_msg_init_tensor_req request; request.tensor = serialize_tensor(tensor); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_INIT_TENSOR, &request, sizeof(request), nullptr, 0); RPC_STATUS_ASSERT(status); } return GGML_STATUS_SUCCESS; } static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_tensor rpc_tensor = serialize_tensor(tensor); if (size > HASH_THRESHOLD) { rpc_msg_set_tensor_hash_req request; request.tensor = rpc_tensor; request.offset = offset; request.hash = fnv_hash((const uint8_t*)data, size); rpc_msg_set_tensor_hash_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR_HASH, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); if (response.result) { // the server has the same data, no need to send it return; } } // input serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) size_t input_size = sizeof(rpc_tensor) + sizeof(uint64_t) + size; std::vector input(input_size, 0); memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size()); RPC_STATUS_ASSERT(status); } static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_get_tensor_req request; request.tensor = serialize_tensor(tensor); request.offset = offset; request.size = size; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, &request, sizeof(request), data, size); RPC_STATUS_ASSERT(status); } static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { if (ggml_backend_buffer_is_rpc(src->buffer)) { // check if src and dst are on the same server ggml_backend_buffer_t src_buffer = src->buffer; ggml_backend_rpc_buffer_context * src_ctx = (ggml_backend_rpc_buffer_context *)src_buffer->context; ggml_backend_buffer_t dst_buffer = dst->buffer; ggml_backend_rpc_buffer_context * dst_ctx = (ggml_backend_rpc_buffer_context *)dst_buffer->context; if (src_ctx->sock != dst_ctx->sock) { return false; } ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_copy_tensor_req request; request.src = serialize_tensor(src); request.dst = serialize_tensor(dst); rpc_msg_copy_tensor_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); return response.result; } return false; } static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_buffer_clear_req request = {ctx->remote_ptr, value}; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, &request, sizeof(request), nullptr, 0); RPC_STATUS_ASSERT(status); } static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = { /* .free_buffer = */ ggml_backend_rpc_buffer_free_buffer, /* .get_base = */ ggml_backend_rpc_buffer_get_base, /* .init_tensor = */ ggml_backend_rpc_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_rpc_buffer_set_tensor, /* .get_tensor = */ ggml_backend_rpc_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_rpc_buffer_cpy_tensor, /* .clear = */ ggml_backend_rpc_buffer_clear, /* .reset = */ NULL, }; static const char * ggml_backend_rpc_buffer_type_name(ggml_backend_buffer_type_t buft) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; return buft_ctx->name.c_str(); } static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; rpc_msg_alloc_buffer_req request = {buft_ctx->device, size}; rpc_msg_alloc_buffer_rsp response; auto sock = get_socket(buft_ctx->endpoint); bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); if (response.remote_ptr != 0) { ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, ggml_backend_rpc_buffer_interface, new ggml_backend_rpc_buffer_context{sock, nullptr, response.remote_ptr}, response.remote_size); return buffer; } else { return nullptr; } } static size_t get_alignment(const std::shared_ptr & sock, uint32_t device) { rpc_msg_get_alignment_req request = {device}; rpc_msg_get_alignment_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); return response.alignment; } static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; return buft_ctx->alignment; } static size_t get_max_size(const std::shared_ptr & sock, uint32_t device) { rpc_msg_get_max_size_req request = {device}; rpc_msg_get_max_size_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); return response.max_size; } static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; return buft_ctx->max_size; } static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { // should we query the remote server for the actual size bool rpc_get = false; // See comments in init_tensor. rpc_get |= ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr); // ops that require additional memory for fleeting data on certain backends // ref: https://github.com/ggml-org/llama.cpp/pull/15966 rpc_get |= tensor->op == GGML_OP_FLASH_ATTN_EXT; rpc_get |= tensor->op == GGML_OP_MUL_MAT_ID; if (rpc_get) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; auto sock = get_socket(buft_ctx->endpoint); rpc_msg_get_alloc_size_req request = { /*.device =*/ buft_ctx->device, /*.tensor =*/ serialize_tensor(tensor), /*.srcs =*/ {}, }; // .get_alloc_size could be a function of the tensor's srcs, so we must serialize them as well for (int i = 0; i < GGML_MAX_SRC; i++) { request.srcs[i] = serialize_tensor(tensor->src[i]); } // TODO: cache the alloc responses to avoid extra RPC calls? rpc_msg_get_alloc_size_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALLOC_SIZE, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); return response.alloc_size; } return ggml_nbytes(tensor); } static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = { /* .get_name = */ ggml_backend_rpc_buffer_type_name, /* .alloc_buffer = */ ggml_backend_rpc_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_rpc_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_rpc_get_max_size, /* .get_alloc_size = */ ggml_backend_rpc_buffer_type_get_alloc_size, /* .is_host = */ NULL, }; static const char * ggml_backend_rpc_name(ggml_backend_t backend) { ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; return rpc_ctx->name.c_str(); } static void ggml_backend_rpc_free(ggml_backend_t backend) { ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; delete rpc_ctx; delete backend; } static void ggml_backend_rpc_synchronize(ggml_backend_t backend) { GGML_UNUSED(backend); // this is no-op because we don't have any async operations } static void add_tensor(ggml_tensor * tensor, std::vector & tensors, std::unordered_set & visited) { if (tensor == nullptr) { return; } if (visited.find(tensor) != visited.end()) { return; } visited.insert(tensor); for (int i = 0; i < GGML_MAX_SRC; i++) { add_tensor(tensor->src[i], tensors, visited); } add_tensor(tensor->view_src, tensors, visited); tensors.push_back(serialize_tensor(tensor)); } static void serialize_graph(uint32_t device, const ggml_cgraph * cgraph, std::vector & output) { uint32_t n_nodes = cgraph->n_nodes; std::vector tensors; std::unordered_set visited; for (uint32_t i = 0; i < n_nodes; i++) { add_tensor(cgraph->nodes[i], tensors, visited); } // serialization format: // | device (4 bytes) | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | uint32_t n_tensors = tensors.size(); int output_size = 2*sizeof(uint32_t) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t) + n_tensors * sizeof(rpc_tensor); output.resize(output_size, 0); uint8_t * dest = output.data(); memcpy(dest, &device, sizeof(device)); dest += sizeof(device); memcpy(dest, &n_nodes, sizeof(n_nodes)); dest += sizeof(n_nodes); for (uint32_t i = 0; i < n_nodes; i++) { memcpy(dest + i * sizeof(uint64_t), &cgraph->nodes[i], sizeof(uint64_t)); } dest += n_nodes * sizeof(uint64_t); memcpy(dest, &n_tensors, sizeof(n_tensors)); dest += sizeof(n_tensors); rpc_tensor * out_tensors = (rpc_tensor *)dest; memcpy(out_tensors, tensors.data(), n_tensors * sizeof(rpc_tensor)); } static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; GGML_ASSERT(cgraph->n_nodes > 0); bool reuse = rpc_ctx->gc.is_cached(cgraph); if (reuse) { rpc_msg_graph_recompute_req request; request.device = rpc_ctx->device; auto sock = get_socket(rpc_ctx->endpoint); bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_RECOMPUTE, &request, sizeof(request)); RPC_STATUS_ASSERT(status); } else { rpc_ctx->gc.add(cgraph); std::vector input; serialize_graph(rpc_ctx->device, cgraph, input); auto sock = get_socket(rpc_ctx->endpoint); bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input.data(), input.size()); RPC_STATUS_ASSERT(status); } return GGML_STATUS_SUCCESS; } static ggml_backend_i ggml_backend_rpc_interface = { /* .get_name = */ ggml_backend_rpc_name, /* .free = */ ggml_backend_rpc_free, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, /* .cpy_tensor_async = */ NULL, /* .synchronize = */ ggml_backend_rpc_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_rpc_graph_compute, /* .event_record = */ NULL, /* .event_wait = */ NULL, /* .graph_optimize = */ NULL, }; ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint, uint32_t device) { static std::mutex mutex; std::lock_guard lock(mutex); std::string buft_name = "RPC" + std::to_string(device) + "[" + std::string(endpoint) + "]"; // NOTE: buffer types are allocated and never freed; this is by design static std::unordered_map buft_map; auto it = buft_map.find(buft_name); if (it != buft_map.end()) { return it->second; } auto sock = get_socket(endpoint); if (sock == nullptr) { GGML_LOG_ERROR("Failed to connect to %s\n", endpoint); return nullptr; } size_t alignment = get_alignment(sock, device); size_t max_size = get_max_size(sock, device); ggml_backend_rpc_buffer_type_context * buft_ctx = new ggml_backend_rpc_buffer_type_context { /* .endpoint = */ endpoint, /* .device = */ device, /* .name = */ buft_name, /* .alignment = */ alignment, /* .max_size = */ max_size }; auto reg = ggml_backend_rpc_add_server(endpoint); ggml_backend_buffer_type_t buft = new ggml_backend_buffer_type { /* .iface = */ ggml_backend_rpc_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(reg, device), /* .context = */ buft_ctx }; buft_map[buft_name] = buft; return buft; } ggml_backend_t ggml_backend_rpc_init(const char * endpoint, uint32_t device) { std::string dev_name = "RPC" + std::to_string(device) + "[" + std::string(endpoint) + "]"; ggml_backend_rpc_context * ctx = new ggml_backend_rpc_context { /* .endpoint = */ endpoint, /* .device = */ device, /* .name = */ dev_name, /* .gc = */ {}, }; auto reg = ggml_backend_rpc_add_server(endpoint); ggml_backend_t backend = new ggml_backend { /* .guid = */ ggml_backend_rpc_guid(), /* .iface = */ ggml_backend_rpc_interface, /* .device = */ ggml_backend_reg_dev_get(reg, device), /* .context = */ ctx }; return backend; } bool ggml_backend_is_rpc(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_rpc_guid()); } static void get_device_memory(const std::shared_ptr & sock, uint32_t device, size_t * free, size_t * total) { rpc_msg_get_device_memory_req request; request.device = device; rpc_msg_get_device_memory_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, &request, sizeof(request), &response, sizeof(response)); RPC_STATUS_ASSERT(status); *free = response.free_mem; *total = response.total_mem; } void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total) { auto sock = get_socket(endpoint); if (sock == nullptr) { *free = 0; *total = 0; return; } get_device_memory(sock, device, free, total); } // RPC server-side implementation class rpc_server { public: rpc_server(std::vector all_backends, const char * cache_dir) : backends(std::move(all_backends)), cache_dir(cache_dir) { stored_graphs.resize(backends.size()); } ~rpc_server(); void hello(rpc_msg_hello_rsp & response); bool alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response); bool get_alignment(const rpc_msg_get_alignment_req & request, rpc_msg_get_alignment_rsp & response); bool get_max_size(const rpc_msg_get_max_size_req & request, rpc_msg_get_max_size_rsp & response); bool buffer_get_base(const rpc_msg_buffer_get_base_req & request, rpc_msg_buffer_get_base_rsp & response); bool free_buffer(const rpc_msg_free_buffer_req & request); bool buffer_clear(const rpc_msg_buffer_clear_req & request); bool set_tensor(const std::vector & input); bool set_tensor_hash(const rpc_msg_set_tensor_hash_req & request, rpc_msg_set_tensor_hash_rsp & response); bool get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response); bool copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response); bool graph_compute(const std::vector & input); bool graph_recompute(const rpc_msg_graph_recompute_req & request); bool init_tensor(const rpc_msg_init_tensor_req & request); bool get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response); bool get_device_memory(const rpc_msg_get_device_memory_req & request, rpc_msg_get_device_memory_rsp & response); struct stored_graph { ggml_context_ptr ctx_ptr; ggml_cgraph * graph; }; private: bool get_cached_file(uint64_t hash, std::vector & data); ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor); ggml_tensor * create_node(uint64_t id, struct ggml_context * ctx, const std::unordered_map & tensor_ptrs, std::unordered_map & tensor_map); std::vector backends; const char * cache_dir; std::unordered_set buffers; // store the last computed graph for each backend std::vector stored_graphs; }; void rpc_server::hello(rpc_msg_hello_rsp & response) { response.major = RPC_PROTO_MAJOR_VERSION; response.minor = RPC_PROTO_MINOR_VERSION; response.patch = RPC_PROTO_PATCH_VERSION; LOG_DBG("[%s] version: %d.%d.%d\n", __func__, response.major, response.minor, response.patch); } bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response) { uint32_t dev_id = request.device; if (dev_id >= backends.size()) { return false; } ggml_backend_buffer_type_t buft; struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead()*(1 + GGML_MAX_SRC), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { GGML_LOG_ERROR("Null tensor pointer passed to server get_alloc_size function.\n"); return false; } for (int i = 0; i < GGML_MAX_SRC; i++) { if (request.srcs[i].id != 0) { tensor->src[i] = deserialize_tensor(ctx, &request.srcs[i]); } } LOG_DBG("[%s] device: %d, buffer: %p, data: %p\n", __func__, dev_id, (void*)tensor->buffer, tensor->data); if (tensor->buffer == nullptr) { //No buffer allocated. buft = ggml_backend_get_default_buffer_type(backends[dev_id]); } else { buft = tensor->buffer->buft; } response.alloc_size = ggml_backend_buft_get_alloc_size(buft, tensor); return true; } bool rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response) { uint32_t dev_id = request.device; if (dev_id >= backends.size()) { return false; } ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backends[dev_id]); ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, request.size); response.remote_ptr = 0; response.remote_size = 0; if (buffer != nullptr) { response.remote_ptr = reinterpret_cast(buffer); response.remote_size = buffer->size; LOG_DBG("[%s] device: %d, size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, dev_id, request.size, response.remote_ptr, response.remote_size); buffers.insert(buffer); } else { LOG_DBG("[%s] device: %d, size: %" PRIu64 " -> failed\n", __func__, dev_id, request.size); } return true; } bool rpc_server::get_alignment(const rpc_msg_get_alignment_req & request, rpc_msg_get_alignment_rsp & response) { uint32_t dev_id = request.device; if (dev_id >= backends.size()) { return false; } ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backends[dev_id]); size_t alignment = ggml_backend_buft_get_alignment(buft); LOG_DBG("[%s] device: %d, alignment: %lu\n", __func__, dev_id, alignment); response.alignment = alignment; return true; } bool rpc_server::get_max_size(const rpc_msg_get_max_size_req & request, rpc_msg_get_max_size_rsp & response) { uint32_t dev_id = request.device; if (dev_id >= backends.size()) { return false; } ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backends[dev_id]); size_t max_size = ggml_backend_buft_get_max_size(buft); LOG_DBG("[%s] device: %d, max_size: %lu\n", __func__, dev_id, max_size); response.max_size = max_size; return true; } bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rpc_msg_buffer_get_base_rsp & response) { LOG_DBG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr); ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { GGML_LOG_ERROR("[%s] buffer not found\n", __func__); return false; } void * base = ggml_backend_buffer_get_base(buffer); response.base_ptr = reinterpret_cast(base); return true; } bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) { LOG_DBG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr); ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { GGML_LOG_ERROR("[%s] buffer not found\n", __func__); return false; } ggml_backend_buffer_free(buffer); buffers.erase(buffer); return true; } bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) { LOG_DBG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value); ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { GGML_LOG_ERROR("[%s] buffer not found\n", __func__); return false; } ggml_backend_buffer_clear(buffer, request.value); return true; } ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) { // Validate tensor type before using it if (tensor->type >= GGML_TYPE_COUNT) { GGML_LOG_ERROR("[%s] invalid tensor type received: %u\n", __func__, tensor->type); return nullptr; } ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]); // ggml_new_tensor_4d might fail if dimensions are invalid, although less likely to crash than invalid type if (result == nullptr) { GGML_LOG_ERROR("[%s] ggml_new_tensor_4d failed for type %u\\n", __func__, tensor->type); return nullptr; } for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) { result->nb[i] = tensor->nb[i]; } result->buffer = reinterpret_cast(tensor->buffer); if (result->buffer && buffers.find(result->buffer) == buffers.end()) { result->buffer = nullptr; } if (result->buffer) { // require that the tensor data does not go beyond the buffer end uint64_t tensor_size = (uint64_t) ggml_nbytes(result); uint64_t buffer_start = (uint64_t) ggml_backend_buffer_get_base(result->buffer); uint64_t buffer_size = (uint64_t) ggml_backend_buffer_get_size(result->buffer); GGML_ASSERT(tensor->data + tensor_size >= tensor->data); // check for overflow GGML_ASSERT(tensor->data >= buffer_start && tensor->data + tensor_size <= buffer_start + buffer_size); } result->op = (ggml_op) tensor->op; for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) { result->op_params[i] = tensor->op_params[i]; } result->flags = tensor->flags; result->data = reinterpret_cast(tensor->data); ggml_set_name(result, tensor->name); return result; } bool rpc_server::set_tensor(const std::vector & input) { // serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) | if (input.size() < sizeof(rpc_tensor) + sizeof(uint64_t)) { return false; } const rpc_tensor * in_tensor = (const rpc_tensor *)input.data(); uint64_t offset; memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset)); const size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset); struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); if (tensor == nullptr || tensor->buffer == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); return false; } LOG_DBG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); // sanitize tensor->data { const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) { GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu) out of buffer bounds [0x%zx, 0x%zx)\n", __func__, in_tensor->data, offset, size, p0, p1); return false; } } const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset); if (cache_dir && size > HASH_THRESHOLD) { uint64_t hash = fnv_hash((const uint8_t*)data, size); char hash_str[17]; snprintf(hash_str, sizeof(hash_str), "%016" PRIx64, hash); // save to cache_dir/hash_str fs::path cache_file = fs::path(cache_dir) / hash_str; std::ofstream ofs(cache_file, std::ios::binary); ofs.write((const char *)data, size); GGML_LOG_INFO("[%s] saved to '%s'\n", __func__, cache_file.c_str()); } ggml_backend_tensor_set(tensor, data, offset, size); return true; } bool rpc_server::get_cached_file(uint64_t hash, std::vector & data) { if (!cache_dir) { return false; } char hash_str[17]; snprintf(hash_str, sizeof(hash_str), "%016" PRIx64, hash); fs::path cache_file = fs::path(cache_dir) / hash_str; std::error_code ec; if (!fs::exists(cache_file, ec)) { return false; } std::ifstream ifs(cache_file, std::ios::binary); ifs.seekg(0, std::ios::end); size_t size = ifs.tellg(); ifs.seekg(0, std::ios::beg); data.resize(size); ifs.read((char *)data.data(), size); return true; } bool rpc_server::set_tensor_hash(const rpc_msg_set_tensor_hash_req & request, rpc_msg_set_tensor_hash_rsp & response) { std::vector cached_file; if (!get_cached_file(request.hash, cached_file)) { response.result = 0; return true; } size_t size = cached_file.size(); struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr || tensor->buffer == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); return false; } LOG_DBG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu, hash: %" PRIx64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, size, request.hash); // sanitize tensor->data { const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (request.tensor.data + request.offset < p0 || request.tensor.data + request.offset >= p1 || size > (p1 - request.tensor.data - request.offset)) { GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu, hash=0x%" PRIx64 ") out of buffer bounds [0x%zx, 0x%zx)\n", __func__, request.tensor.data, request.offset, size, request.hash, p0, p1); return false; } } ggml_backend_tensor_set(tensor, cached_file.data(), request.offset, size); response.result = 1; return true; } bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) { struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { GGML_LOG_ERROR("Null tensor pointer passed to server init_tensor function.\n"); return false; } LOG_DBG("[%s] buffer: %p, data: %p\n", __func__, (void*)tensor->buffer, tensor->data); // Call the backend's buffer_init_tensor function ggml_backend_buffer_t buffer = tensor->buffer; if (buffer && buffer->iface.init_tensor) { buffer->iface.init_tensor(buffer, tensor); } else { GGML_LOG_ERROR("Null buffer for tensor passed to init_tensor function\n"); } if (tensor->extra != nullptr) { // This pointer can either be passed around client/server, or probably better stored server-side and kept track of. // Currently unimplemented. GGML_LOG_ERROR("tensor->extra populated by the backend, this is currently unsupported.\n"); return false; } return true; } bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response) { struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr || tensor->buffer == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); return false; } LOG_DBG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, request.size); // sanitize tensor->data { const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (request.tensor.data + request.offset < p0 || request.tensor.data + request.offset >= p1 || request.size > (p1 - request.tensor.data - request.offset)) { GGML_LOG_ERROR("[%s] requested tensor region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%" PRIu64 ") out of buffer bounds [0x%zx, 0x%zx)\n", __func__, request.tensor.data, request.offset, request.size, p0, p1); return false; } } response.resize(request.size, 0); ggml_backend_tensor_get(tensor, response.data(), request.offset, request.size); return true; } bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response) { struct ggml_init_params params { /*.mem_size =*/ 2*ggml_tensor_overhead(), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); ggml_tensor * src = deserialize_tensor(ctx, &request.src); ggml_tensor * dst = deserialize_tensor(ctx, &request.dst); if (src == nullptr || dst == nullptr || src->buffer == nullptr || dst->buffer == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__); return false; } uint64_t src_size = (uint64_t) ggml_nbytes(src); uint64_t dst_data = (uint64_t) dst->data; uint64_t dst_base = (uint64_t) ggml_backend_buffer_get_base(dst->buffer); uint64_t dst_buf_sz = (uint64_t) ggml_backend_buffer_get_size(dst->buffer); if (dst_data + src_size > dst_base + dst_buf_sz) { GGML_LOG_ERROR("[%s] out-of-bounds write in rpc_server::copy_tensor:\n" " write range : [0x%" PRIx64 ", 0x%" PRIx64 "]\n" " buffer base: [0x%" PRIx64 ", 0x%" PRIx64 "]\n", __func__, dst_data, dst_data + src_size, dst_base, dst_base + dst_buf_sz); return false; } LOG_DBG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*) src->buffer, (void*) dst->buffer); response.result = ggml_backend_buffer_copy_tensor(src, dst); return true; } ggml_tensor * rpc_server::create_node(uint64_t id, struct ggml_context * ctx, const std::unordered_map & tensor_ptrs, std::unordered_map & tensor_map) { if (tensor_map.find(id) != tensor_map.end()) { return tensor_map[id]; } // Safely find the tensor pointer auto it_ptr = tensor_ptrs.find(id); if (it_ptr == tensor_ptrs.end()) { return nullptr; } const rpc_tensor * tensor = it_ptr->second; struct ggml_tensor * result = deserialize_tensor(ctx, tensor); if (result == nullptr) { return nullptr; } tensor_map[id] = result; for (int i = 0; i < GGML_MAX_SRC; i++) { // Check if the source ID is 0 before calling create_node recursively if (tensor->src[i] == 0) { result->src[i] = nullptr; } else { result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map); // If the recursive call failed for a non-zero ID, propagate the error if (result->src[i] == nullptr) { GGML_LOG_ERROR("[%s] failed to create source node %d (src_id=%" PRIu64 ") for node id %" PRIu64 "\n", __func__, i, tensor->src[i], id); // Must return nullptr to signal failure up the call stack return nullptr; } } } // Handle view_src similarly if (tensor->view_src == 0) { result->view_src = nullptr; } else { result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map); // If the recursive call failed for a non-zero ID, propagate the error if (result->view_src == nullptr) { GGML_LOG_ERROR("[%s] failed to create view_src node (view_src_id=%" PRIu64 ") for node id %" PRIu64 "\n", __func__, tensor->view_src, id); // Must return nullptr to signal failure up the call stack return nullptr; } } result->view_offs = tensor->view_offs; return result; } bool rpc_server::graph_compute(const std::vector & input) { // serialization format: // | device (4 bytes) | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | if (input.size() < 2*sizeof(uint32_t)) { return false; } const uint8_t * src = input.data(); uint32_t device; memcpy(&device, src, sizeof(device)); src += sizeof(device); if (device >= backends.size()) { return false; } uint32_t n_nodes; memcpy(&n_nodes, src, sizeof(n_nodes)); src += sizeof(n_nodes); if (input.size() < 2*sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t)) { return false; } const uint64_t * nodes = (const uint64_t *)src; src += n_nodes*sizeof(uint64_t); uint32_t n_tensors; memcpy(&n_tensors, src, sizeof(n_tensors)); src += sizeof(n_tensors); if (input.size() < 2*sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t) + n_tensors*sizeof(rpc_tensor)) { return false; } const rpc_tensor * tensors = (const rpc_tensor *)src; LOG_DBG("[%s] device: %u, n_nodes: %u, n_tensors: %u\n", __func__, device, n_nodes, n_tensors); size_t buf_size = ggml_tensor_overhead()*(n_nodes + n_tensors) + ggml_graph_overhead_custom(n_nodes, false); struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; ggml_context_ptr ctx_ptr { ggml_init(params) }; GGML_ASSERT(ctx_ptr != nullptr); ggml_context * ctx = ctx_ptr.get(); struct ggml_cgraph * graph = ggml_new_graph_custom(ctx, n_nodes, false); graph->n_nodes = n_nodes; std::unordered_map tensor_ptrs; for (uint32_t i = 0; i < n_tensors; i++) { tensor_ptrs[tensors[i].id] = &tensors[i]; } std::unordered_map tensor_map; for (uint32_t i = 0; i < n_nodes; i++) { int64_t id; memcpy(&id, &nodes[i], sizeof(id)); graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map); // Check if create_node failed for a *non-zero* ID. // If id was 0, create_node returning nullptr is expected. // If id was non-zero and create_node returned nullptr, it indicates a deserialization error. if (graph->nodes[i] == nullptr && id != 0) { GGML_LOG_ERROR("[%s] failed to create graph node %d (id=%" PRId64 ")\n", __func__, i, id); return false; } } ggml_status status = ggml_backend_graph_compute(backends[device], graph); GGML_ASSERT(status == GGML_STATUS_SUCCESS && "Unsuccessful graph computations are not supported with RPC"); stored_graphs[device].ctx_ptr.swap(ctx_ptr); stored_graphs[device].graph = graph; return true; } bool rpc_server::graph_recompute(const rpc_msg_graph_recompute_req & request) { uint32_t device = request.device; if (device >= backends.size()) { return false; } if (stored_graphs[device].graph == nullptr) { return false; } ggml_cgraph * graph = stored_graphs[device].graph; LOG_DBG("[%s] device: %u\n", __func__, device); ggml_status status = ggml_backend_graph_compute(backends[device], graph); GGML_ASSERT(status == GGML_STATUS_SUCCESS && "Unsuccessful graph computations are not supported with RPC"); return true; } bool rpc_server::get_device_memory(const rpc_msg_get_device_memory_req & request, rpc_msg_get_device_memory_rsp & response) { uint32_t dev_id = request.device; if (dev_id >= backends.size()) { return false; } size_t free, total; ggml_backend_dev_t dev = ggml_backend_get_device(backends[dev_id]); ggml_backend_dev_memory(dev, &free, &total); response.free_mem = free; response.total_mem = total; LOG_DBG("[%s] device: %u, free_mem: %" PRIu64 ", total_mem: %" PRIu64 "\n", __func__, dev_id, response.free_mem, response.total_mem); return true; } rpc_server::~rpc_server() { for (auto buffer : buffers) { ggml_backend_buffer_free(buffer); } } static void rpc_serve_client(const std::vector & backends, const char * cache_dir, sockfd_t sockfd) { rpc_server server(backends, cache_dir); uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { return; } // the first command sent by the client must be HELLO if (cmd != RPC_CMD_HELLO) { GGML_LOG_ERROR("Expected HELLO command, update client\n"); return; } if (!recv_msg(sockfd, nullptr, 0)) { return; } rpc_msg_hello_rsp response; server.hello(response); if (!send_msg(sockfd, &response, sizeof(response))) { return; } while (true) { if (!recv_data(sockfd, &cmd, 1)) { break; } if (cmd >= RPC_CMD_COUNT) { // fail fast if the command is invalid GGML_LOG_ERROR("Unknown command: %d\n", cmd); break; } switch (cmd) { case RPC_CMD_HELLO: { // HELLO command is handled above return; } case RPC_CMD_DEVICE_COUNT: { if (!recv_msg(sockfd, nullptr, 0)) { return; } rpc_msg_device_count_rsp response; response.device_count = backends.size(); if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_ALLOC_BUFFER: { rpc_msg_alloc_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_alloc_buffer_rsp response; if (!server.alloc_buffer(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_GET_ALLOC_SIZE: { rpc_msg_get_alloc_size_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_get_alloc_size_rsp response; if (!server.get_alloc_size(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_GET_ALIGNMENT: { rpc_msg_get_alignment_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_get_alignment_rsp response; if (!server.get_alignment(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_GET_MAX_SIZE: { rpc_msg_get_max_size_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_get_max_size_rsp response; if (!server.get_max_size(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_BUFFER_GET_BASE: { rpc_msg_buffer_get_base_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_buffer_get_base_rsp response; if (!server.buffer_get_base(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_FREE_BUFFER: { rpc_msg_free_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } if (!server.free_buffer(request)) { return; } if (!send_msg(sockfd, nullptr, 0)) { return; } break; } case RPC_CMD_BUFFER_CLEAR: { rpc_msg_buffer_clear_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } if (!server.buffer_clear(request)) { return; } if (!send_msg(sockfd, nullptr, 0)) { return; } break; } case RPC_CMD_SET_TENSOR: { std::vector input; if (!recv_msg(sockfd, input)) { return; } if (!server.set_tensor(input)) { return; } break; } case RPC_CMD_SET_TENSOR_HASH: { rpc_msg_set_tensor_hash_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_set_tensor_hash_rsp response; if (!server.set_tensor_hash(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_INIT_TENSOR: { rpc_msg_init_tensor_req request; if (!recv_msg(sockfd, &request,sizeof(request))) { return; } if (!server.init_tensor(request)) { return; } if (!send_msg(sockfd, nullptr, 0)) { return; } break; } case RPC_CMD_GET_TENSOR: { rpc_msg_get_tensor_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } std::vector response; if (!server.get_tensor(request, response)) { return; } if (!send_msg(sockfd, response.data(), response.size())) { return; } break; } case RPC_CMD_COPY_TENSOR: { rpc_msg_copy_tensor_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_copy_tensor_rsp response; if (!server.copy_tensor(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } case RPC_CMD_GRAPH_COMPUTE: { std::vector input; if (!recv_msg(sockfd, input)) { return; } if (!server.graph_compute(input)) { return; } break; } case RPC_CMD_GRAPH_RECOMPUTE: { rpc_msg_graph_recompute_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } if (!server.graph_recompute(request)) { return; } break; } case RPC_CMD_GET_DEVICE_MEMORY: { rpc_msg_get_device_memory_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { return; } rpc_msg_get_device_memory_rsp response; if (!server.get_device_memory(request, response)) { return; } if (!send_msg(sockfd, &response, sizeof(response))) { return; } break; } default: { GGML_LOG_ERROR("Unknown command: %d\n", cmd); return; } } } } void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir, size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices) { if (n_devices == 0 || devices == nullptr) { fprintf(stderr, "Invalid arguments to ggml_backend_rpc_start_server\n"); return; } std::vector backends; printf("Starting RPC server v%d.%d.%d\n", RPC_PROTO_MAJOR_VERSION, RPC_PROTO_MINOR_VERSION, RPC_PROTO_PATCH_VERSION); printf(" endpoint : %s\n", endpoint); printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a"); printf("Devices:\n"); for (size_t i = 0; i < n_devices; i++) { auto dev = devices[i]; size_t free, total; ggml_backend_dev_memory(dev, &free, &total); printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024); auto backend = ggml_backend_dev_init(dev, nullptr); if (!backend) { fprintf(stderr, "Failed to create backend for device %s\n", dev->iface.get_name(dev)); return; } backends.push_back(backend); ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; if (reg) { auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); if (ggml_backend_set_n_threads_fn) { ggml_backend_set_n_threads_fn(backend, n_threads); } } } std::string host; int port; if (!parse_endpoint(endpoint, host, port)) { return; } #ifdef _WIN32 { WSADATA wsaData; int res = WSAStartup(MAKEWORD(2, 2), &wsaData); if (res != 0) { fprintf(stderr, "WSAStartup failed: %d\n", res); return; } } #endif auto server_socket = create_server_socket(host.c_str(), port); if (server_socket == nullptr) { fprintf(stderr, "Failed to create server socket\n"); return; } while (true) { auto client_socket = socket_accept(server_socket->fd); if (client_socket == nullptr) { fprintf(stderr, "Failed to accept client connection\n"); return; } printf("Accepted client connection\n"); fflush(stdout); rpc_serve_client(backends, cache_dir, client_socket->fd); printf("Client connection closed\n"); fflush(stdout); } #ifdef _WIN32 WSACleanup(); #endif for (auto backend : backends) { ggml_backend_free(backend); } } // device interface struct ggml_backend_rpc_device_context { std::string endpoint; uint32_t device; std::string name; std::string description; }; static const char * ggml_backend_rpc_device_get_name(ggml_backend_dev_t dev) { ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context; return ctx->name.c_str(); } static const char * ggml_backend_rpc_device_get_description(ggml_backend_dev_t dev) { ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context; return ctx->description.c_str(); } static void ggml_backend_rpc_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context; ggml_backend_rpc_get_device_memory(ctx->endpoint.c_str(), ctx->device, free, total); } static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type(ggml_backend_dev_t dev) { // TODO: obtain value from the server return GGML_BACKEND_DEVICE_TYPE_GPU; GGML_UNUSED(dev); } static void ggml_backend_rpc_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_rpc_device_get_name(dev); props->description = ggml_backend_rpc_device_get_description(dev); props->type = ggml_backend_rpc_device_get_type(dev); ggml_backend_rpc_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ false, /* .host_buffer = */ false, /* .buffer_from_host_ptr = */ false, /* .events = */ false, }; } static ggml_backend_t ggml_backend_rpc_device_init(ggml_backend_dev_t dev, const char * params) { ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context; return ggml_backend_rpc_init(ctx->endpoint.c_str(), ctx->device); GGML_UNUSED(params); } static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_backend_dev_t dev) { ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context; return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str(), ctx->device); GGML_UNUSED(dev); } static bool ggml_backend_rpc_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { GGML_UNUSED(dev); GGML_UNUSED(op); //TODO: call the remote backend and cache the results return true; } static bool ggml_backend_rpc_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { if (!buft || buft->iface.get_name != ggml_backend_rpc_buffer_type_name) { return false; } ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; ggml_backend_rpc_device_context * dev_ctx = (ggml_backend_rpc_device_context *)dev->context; return buft_ctx->endpoint == dev_ctx->endpoint && buft_ctx->device == dev_ctx->device; } static const struct ggml_backend_device_i ggml_backend_rpc_device_i = { /* .get_name = */ ggml_backend_rpc_device_get_name, /* .get_description = */ ggml_backend_rpc_device_get_description, /* .get_memory = */ ggml_backend_rpc_device_get_memory, /* .get_type = */ ggml_backend_rpc_device_get_type, /* .get_props = */ ggml_backend_rpc_device_get_props, /* .init_backend = */ ggml_backend_rpc_device_init, /* .get_buffer_type = */ ggml_backend_rpc_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, /* .buffer_from_host_ptr = */ NULL, /* .supports_op = */ ggml_backend_rpc_device_supports_op, /* .supports_buft = */ ggml_backend_rpc_device_supports_buft, /* .offload_op = */ NULL, /* .event_new = */ NULL, /* .event_free = */ NULL, /* .event_synchronize = */ NULL, }; // backend reg interface struct ggml_backend_rpc_reg_context { std::string name; std::vector devices; }; static const char * ggml_backend_rpc_reg_get_name(ggml_backend_reg_t reg) { ggml_backend_rpc_reg_context * ctx = (ggml_backend_rpc_reg_context *)reg->context; return ctx ? ctx->name.c_str() : "RPC"; } static size_t ggml_backend_rpc_reg_get_device_count(ggml_backend_reg_t reg) { ggml_backend_rpc_reg_context * ctx = (ggml_backend_rpc_reg_context *)reg->context; return ctx ? ctx->devices.size() : 0; } static ggml_backend_dev_t ggml_backend_rpc_reg_get_device(ggml_backend_reg_t reg, size_t index) { ggml_backend_rpc_reg_context * ctx = (ggml_backend_rpc_reg_context *)reg->context; if (ctx == nullptr) { GGML_ABORT("The RPC backend does not have enumerated devices - use ggml_backend_rpc_add_server instead"); } else { GGML_ASSERT(index < ctx->devices.size()); return ctx->devices[index]; } } static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (std::strcmp(name, "ggml_backend_rpc_add_server") == 0) { return (void *)ggml_backend_rpc_add_server; } if (std::strcmp(name, "ggml_backend_rpc_start_server") == 0) { return (void *)ggml_backend_rpc_start_server; } return NULL; GGML_UNUSED(reg); } static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = { /* .get_name = */ ggml_backend_rpc_reg_get_name, /* .get_device_count = */ ggml_backend_rpc_reg_get_device_count, /* .get_device = */ ggml_backend_rpc_reg_get_device, /* .get_proc_address = */ ggml_backend_rpc_get_proc_address, }; ggml_backend_reg_t ggml_backend_rpc_reg(void) { static struct ggml_backend_reg ggml_backend_rpc_reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_rpc_reg_i, /* .context = */ NULL, }; return &ggml_backend_rpc_reg; } static uint32_t ggml_backend_rpc_get_device_count(const char * endpoint) { auto sock = get_socket(endpoint); if (sock == nullptr) { GGML_LOG_ERROR("Failed to connect to %s\n", endpoint); return 0; } rpc_msg_device_count_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_DEVICE_COUNT, nullptr, 0, &response, sizeof(response)); RPC_STATUS_ASSERT(status); return response.device_count; } static const ggml_backend_reg_i ggml_backend_rpc_reg_interface = { /* .get_name = */ ggml_backend_rpc_reg_get_name, /* .get_device_count = */ ggml_backend_rpc_reg_get_device_count, /* .get_device = */ ggml_backend_rpc_reg_get_device, /* .get_proc_address = */ ggml_backend_rpc_get_proc_address, }; ggml_backend_reg_t ggml_backend_rpc_add_server(const char * endpoint) { static std::unordered_map reg_map; static std::mutex mutex; static uint32_t dev_id = 0; std::lock_guard lock(mutex); if (reg_map.find(endpoint) != reg_map.end()) { return reg_map[endpoint]; } uint32_t dev_count = ggml_backend_rpc_get_device_count(endpoint); if (dev_count == 0) { return nullptr; } ggml_backend_rpc_reg_context * ctx = new ggml_backend_rpc_reg_context; ctx->name = "RPC[" + std::string(endpoint) + "]"; for (uint32_t ind = 0; ind < dev_count; ind++) { std::string dev_name = "RPC" + std::to_string(dev_id); std::string dev_desc = std::string(endpoint); ggml_backend_rpc_device_context * dev_ctx = new ggml_backend_rpc_device_context { /* .endpoint = */ endpoint, /* .device = */ ind, /* .name = */ dev_name, /* .description = */ dev_desc }; ggml_backend_dev_t dev = new ggml_backend_device { /* .iface = */ ggml_backend_rpc_device_i, /* .reg = */ ggml_backend_rpc_reg(), /* .context = */ dev_ctx, }; ctx->devices.push_back(dev); dev_id++; } ggml_backend_reg_t reg = new ggml_backend_reg { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_rpc_reg_interface, /* .context = */ ctx }; reg_map[endpoint] = reg; return reg; } GGML_BACKEND_DL_IMPL(ggml_backend_rpc_reg) ggml-org-ggml-3678254/src/ggml-sycl/000077500000000000000000000000001512524704700170515ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-sycl/CMakeLists.txt000066400000000000000000000225011512524704700216110ustar00rootroot00000000000000message(STATUS "GGML_SYCL_TARGET=${GGML_SYCL_TARGET}") if (NOT GGML_SYCL_TARGET MATCHES "^(INTEL|NVIDIA|AMD)$") message(FATAL_ERROR "Invalid backend chosen, supported options are INTEL, NVIDIA, or AMD") endif() check_cxx_compiler_flag("-fsycl" SUPPORTS_SYCL) if (DEFINED ENV{ONEAPI_ROOT}) message(STATUS "Using oneAPI Release SYCL compiler (icpx).") elseif(SUPPORTS_SYCL) message(WARNING "Using open-source SYCL compiler (clang++). Didn't detect ENV {ONEAPI_ROOT}. If you expected the oneAPI Release compiler, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh") else() message(FATAL_ERROR "C++ compiler lacks SYCL support.") endif() message(STATUS "SYCL found") #todo: AOT ggml_add_backend_library(ggml-sycl ggml-sycl.cpp ../../include/ggml-sycl.h ) file(GLOB GGML_HEADERS_SYCL "*.hpp") file(GLOB GGML_SOURCES_SYCL "*.cpp") target_sources(ggml-sycl PRIVATE ${GGML_HEADERS_SYCL} ${GGML_SOURCES_SYCL}) if (WIN32) # To generate a Visual Studio solution, using Intel C++ Compiler for ggml-sycl is mandatory if( ${CMAKE_GENERATOR} MATCHES "Visual Studio" AND NOT (${CMAKE_GENERATOR_TOOLSET} MATCHES "Intel C")) set_target_properties(ggml-sycl PROPERTIES VS_PLATFORM_TOOLSET "Intel C++ Compiler 2025") set(CMAKE_CXX_COMPILER "icx") set(CMAKE_CXX_COMPILER_ID "IntelLLVM") endif() endif() macro(detect_and_find_package package_name) set(test_source " cmake_minimum_required(VERSION ${CMAKE_VERSION}) project(check_package LANGUAGES CXX) find_package(${package_name} QUIET) ") set(test_dir "${CMAKE_CURRENT_BINARY_DIR}/check_package_${package_name}") file(WRITE "${test_dir}/CMakeLists.txt" "${test_source}") set(cmake_args "") if(CMAKE_GENERATOR) list(APPEND cmake_args "-G" "${CMAKE_GENERATOR}") endif() if(CMAKE_GENERATOR_PLATFORM) list(APPEND cmake_args "-A" "${CMAKE_GENERATOR_PLATFORM}") endif() if(CMAKE_GENERATOR_TOOLSET) list(APPEND cmake_args "-T" "${CMAKE_GENERATOR_TOOLSET}") endif() if(CMAKE_CXX_COMPILER) list(APPEND cmake_args "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}") endif() execute_process( COMMAND ${CMAKE_COMMAND} ${cmake_args} . WORKING_DIRECTORY "${test_dir}" RESULT_VARIABLE result OUTPUT_QUIET ERROR_QUIET ) if(result EQUAL 0) find_package(${package_name} ${ARGN}) else() message(WARNING "Detection of ${package_name} failed. The package might be broken or incompatible.") set(${package_name}_FOUND FALSE) endif() endmacro() detect_and_find_package(IntelSYCL) if (IntelSYCL_FOUND) # Use oneAPI CMake when possible target_link_libraries(ggml-sycl PRIVATE IntelSYCL::SYCL_CXX) else() # Fallback to the simplest way of enabling SYCL when using intel/llvm nightly for instance target_compile_options(ggml-sycl PRIVATE "-fsycl") target_link_options(ggml-sycl PRIVATE "-fsycl") endif() target_compile_options(ggml-sycl PRIVATE "-Wno-narrowing") # Link against oneDNN set(GGML_SYCL_DNNL 0) if(GGML_SYCL_DNN) find_package(DNNL) if(DNNL_FOUND) if (NOT DEFINED DNNL_GPU_VENDOR) # default to intel target set(DNNL_GPU_VENDOR "INTEL") if(NOT "${GGML_SYCL_TARGET}" STREQUAL "INTEL") message(WARNING "oneDNN builds bundled with oneapi release only support INTEL target") endif() endif() # Verify oneDNN was compiled for the same target as llama if("${GGML_SYCL_TARGET}" STREQUAL "${DNNL_GPU_VENDOR}") target_link_libraries(ggml-sycl PRIVATE DNNL::dnnl) set(GGML_SYCL_DNNL 1) get_target_property(CONFIGS DNNL::dnnl IMPORTED_CONFIGURATIONS) foreach(CONFIG ${CONFIGS}) get_target_property(DNNL_LIB DNNL::dnnl IMPORTED_LOCATION_${CONFIG}) message(STATUS "Found oneDNN: ${DNNL_LIB}") endforeach() else() message(WARNING "oneDNN must be compiled for the same target as llama.cpp. llama.cpp: ${GGML_SYCL_TARGET}, oneDNN: ${DNNL_GPU_VENDOR}. Disabling oneDNN support.") endif() else() message(STATUS "oneDNN not found, disabling oneDNN support") endif() else() message(STATUS "oneDNN support disabled by the user") endif() target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_DNNL=${GGML_SYCL_DNNL}) if (GGML_SYCL_F16) if (GGML_SYCL_TARGET STREQUAL "AMD") message(WARNING "AMD target does not entirely support FP16 in the SYCL backend.") endif() add_compile_definitions(GGML_SYCL_F16) endif() if (GGML_SYCL_TARGET STREQUAL "INTEL") add_compile_definitions(GGML_SYCL_WARP_SIZE=16) target_link_options(ggml-sycl PRIVATE -Xs -ze-intel-greater-than-4GB-buffer-required) elseif (GGML_SYCL_TARGET STREQUAL "NVIDIA") add_compile_definitions(GGML_SYCL_WARP_SIZE=32) elseif (GGML_SYCL_TARGET STREQUAL "AMD") # INFO: Allowed Sub_group_sizes are not consistent through all # hip targets. For example, 64 is used for certain models, but the backend # does not support it. # Target archs tested working: gfx1030, gfx1031, (Only tested sub_group_size = 32) add_compile_definitions(GGML_SYCL_WARP_SIZE=32) else() # default for other target add_compile_definitions(GGML_SYCL_WARP_SIZE=32) endif() if (GGML_SYCL_GRAPH) target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_GRAPH) endif() # Link against Intel oneMKL or oneMath if (GGML_SYCL_TARGET STREQUAL "INTEL") # Intel devices use Intel oneMKL directly instead of oneMath to avoid the limitation of linking Intel oneMKL statically # See https://github.com/uxlfoundation/oneMath/issues/654 if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(SYCL_COMPILER ON) endif() find_package(MKL REQUIRED) target_link_libraries(ggml-sycl PRIVATE MKL::MKL_SYCL::BLAS) target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_USE_INTEL_ONEMKL) else() find_package(oneMath QUIET) if (NOT oneMath_FOUND) message(STATUS "oneMath not found: oneMath will be automatically downloaded") # Use FetchContent to automatically pull and build oneMath include(FetchContent) set(BUILD_FUNCTIONAL_TESTS False) set(BUILD_EXAMPLES False) set(TARGET_DOMAINS blas) if (GGML_SYCL_TARGET STREQUAL "NVIDIA") set(ENABLE_MKLCPU_BACKEND False) set(ENABLE_MKLGPU_BACKEND False) set(ENABLE_CUBLAS_BACKEND True) elseif (GGML_SYCL_TARGET STREQUAL "AMD") set(ENABLE_MKLCPU_BACKEND False) set(ENABLE_MKLGPU_BACKEND False) set(ENABLE_ROCBLAS_BACKEND True) # Ensure setting a string variable here is not overriden by oneMath CACHE variables cmake_policy(SET CMP0126 NEW) # Setting the device architecture is only needed and useful for AMD devices in oneMath set(HIP_TARGETS ${GGML_SYCL_DEVICE_ARCH} CACHE STRING "oneMath HIP target" FORCE) endif() FetchContent_Declare( ONEMATH GIT_REPOSITORY https://github.com/uxlfoundation/oneMath.git GIT_TAG 8efe85f5aaebb37f1d8c503b7af66315feabf142 ) FetchContent_MakeAvailable(ONEMATH) # Create alias to match with find_package targets name function(onemath_alias target) if (TARGET ${target}_obj) # Silence verbose warnings from external libraries target_compile_options(${target}_obj PRIVATE -w) endif() if (TARGET ${target}) add_library(ONEMATH::${target} ALIAS ${target}) endif() endfunction() onemath_alias(onemath) onemath_alias(onemath_blas_mklcpu) onemath_alias(onemath_blas_mklgpu) onemath_alias(onemath_blas_cublas) onemath_alias(onemath_blas_rocblas) endif() # Below oneMath compile-time dispatching is used for better performance if (GGML_SYCL_TARGET STREQUAL "NVIDIA") target_link_libraries(ggml-sycl PRIVATE ONEMATH::onemath_blas_cublas) target_compile_options(ggml-sycl PRIVATE "-fsycl-targets=nvptx64-nvidia-cuda") target_link_options(ggml-sycl PRIVATE "-fsycl-targets=nvptx64-nvidia-cuda") target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_NVIDIA) elseif (GGML_SYCL_TARGET STREQUAL "AMD") if (NOT GGML_SYCL_DEVICE_ARCH) message(FATAL_ERROR "Can't enable SYCL hip backend, GGML_SYCL_DEVICE_ARCH has not been set.") endif() target_link_libraries(ggml-sycl PRIVATE ONEMATH::onemath_blas_rocblas) target_compile_options(ggml-sycl PRIVATE "-fsycl-targets=amdgcn-amd-amdhsa") target_link_options(ggml-sycl PRIVATE "-fsycl-targets=amdgcn-amd-amdhsa") target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_AMD) else() # Fallback to oneMath runtime dispatcher target_link_libraries(ggml-sycl PRIVATE ONEMATH::onemath) target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_GENERIC) endif() endif() if (GGML_SYCL_DEVICE_ARCH) target_compile_options(ggml-sycl PRIVATE -Xsycl-target-backend --offload-arch=${GGML_SYCL_DEVICE_ARCH}) target_link_options(ggml-sycl PRIVATE -Xsycl-target-backend --offload-arch=${GGML_SYCL_DEVICE_ARCH}) endif() ggml-org-ggml-3678254/src/ggml-sycl/add-id.cpp000066400000000000000000000042061512524704700207010ustar00rootroot00000000000000#include #include "common.hpp" #include "add-id.hpp" static void add_id_kernel( const float* src0, const float* src1, const int32_t* src2, float* dst, int64_t ne0, int64_t ne1, size_t nb01, size_t nb02, size_t nb11, size_t nb21, sycl::nd_item<3> item_ct1) { const int64_t i1 = item_ct1.get_group(2); const int64_t i2 = item_ct1.get_group(1); const int i11 = *(const int32_t*)((const char*)src2 + i1 * sizeof(int32_t) + i2 * nb21); const size_t nb1 = ne0 * sizeof(float); const size_t nb2 = ne1 * nb1; float* dst_row = (float*)((char*)dst + i1 * nb1 + i2 * nb2); const float* src0_row = (const float*)((const char*)src0 + i1 * nb01 + i2 * nb02); const float* src1_row = (const float*)((const char*)src1 + i11 * nb11); for (int64_t i0 = item_ct1.get_local_id(2); i0 < ne0; i0 += item_ct1.get_local_range(2)) { dst_row[i0] = src0_row[i0] + src1_row[i0]; } } void ggml_sycl_add_id(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { const ggml_tensor* src0 = dst->src[0]; const ggml_tensor* src1 = dst->src[1]; const ggml_tensor* src2 = dst->src[2]; GGML_TENSOR_TERNARY_OP_LOCALS GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(src2->type == GGML_TYPE_I32); GGML_ASSERT(nb00 == sizeof(float)); GGML_ASSERT(nb10 == sizeof(float)); GGML_ASSERT(nb20 == sizeof(int32_t)); const float* src0_d = (const float*)src0->data; const float* src1_d = (const float*)src1->data; const int32_t* src2_d = (const int32_t*)src2->data; float* dst_d = (float*)dst->data; int threads = std::min((int)ne00, 768); // cols ctx.stream()->parallel_for( sycl::nd_range<3>( sycl::range<3>(1, ne02, ne01) * sycl::range<3>(1, 1, threads), sycl::range<3>(1, 1, threads)), [=](sycl::nd_item<3> item_ct1) { add_id_kernel( src0_d, src1_d, src2_d, dst_d, ne0, ne1, nb01, nb02, nb11, nb21, item_ct1); }); } ggml-org-ggml-3678254/src/ggml-sycl/add-id.hpp000066400000000000000000000002751512524704700207100ustar00rootroot00000000000000#ifndef GGML_SYCL_ADD_ID_HPP #define GGML_SYCL_ADD_ID_HPP #include "common.hpp" void ggml_sycl_add_id(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_ADD_ID_HPP ggml-org-ggml-3678254/src/ggml-sycl/backend.hpp000066400000000000000000000017471512524704700211620ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_BACKEND_HPP #define GGML_SYCL_BACKEND_HPP #include "binbcast.hpp" #include "common.hpp" #include "concat.hpp" #include "conv.hpp" #include "convert.hpp" #include "count-equal.hpp" #include "cpy.hpp" #include "dequantize.hpp" #include "dmmv.hpp" #include "element_wise.hpp" #include "gla.hpp" #include "im2col.hpp" #include "mmq.hpp" #include "mmvq.hpp" #include "norm.hpp" #include "outprod.hpp" #include "pad.hpp" #include "quantize.hpp" #include "quants.hpp" #include "roll.hpp" #include "rope.hpp" #include "set_rows.hpp" #include "ssm_conv.hpp" #include "softmax.hpp" #include "tsembd.hpp" #include "wkv.hpp" #include "pad_reflect_1d.hpp" #endif // GGML_SYCL_BACKEND_HPP ggml-org-ggml-3678254/src/ggml-sycl/binbcast.cpp000066400000000000000000000347231512524704700213530ustar00rootroot00000000000000#include "binbcast.hpp" #include #include #include #include "ggml.h" template static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst, int ne0, int ne1, int ne2, int ne3, int ne10, int ne11, int ne12, int ne13, /*int s0, */ int s1, int s2, int s3, /*int s00,*/ int s01, int s02, int s03, /*int s10,*/ int s11, int s12, int s13, const sycl::nd_item<3> &item_ct1) { const int i0s = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); const int i1 = (item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1)); const int i2 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) + item_ct1.get_local_id(0)) / ne3; const int i3 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) + item_ct1.get_local_id(0)) % ne3; if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } const int i11 = i1 % ne11; const int i12 = i2 % ne12; const int i13 = i3 % ne13; const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 + i_src0; const src1_t * src1_row = src1 + i_src1; dst_t * dst_row = dst + i_dst; for (int i0 = i0s; i0 < ne0; i0 += item_ct1.get_local_range(2) * item_ct1.get_group_range(2)) { const int i10 = i0 % ne10; dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); } } template static void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst, int ne0, int ne1, int ne2, int ne3, int ne10, int ne11, int ne12, int ne13, /*int s0, */ int s1, int s2, int s3, /*int s00,*/ int s01, int s02, int s03, /*int s10,*/ int s11, int s12, int s13, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); const int i3 = i/(ne2*ne1*ne0); const int i2 = (i/(ne1*ne0)) % ne2; const int i1 = (i/ne0) % ne1; const int i0 = i % ne0; if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } const int i11 = i1 % ne11; const int i12 = i2 % ne12; const int i13 = i3 % ne13; const size_t i_src0 = i3*s03 + i2*s02 + i1*s01; const size_t i_src1 = i13*s13 + i12*s12 + i11*s11; const size_t i_dst = i3*s3 + i2*s2 + i1*s1; const src0_t * src0_row = src0 + i_src0; const src1_t * src1_row = src1 + i_src1; dst_t * dst_row = dst + i_dst; const int i10 = i0 % ne10; dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]); } template struct bin_bcast_sycl { template void operator()(const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, const size_t nb00, const size_t nb01, const size_t nb02, const size_t nb03, const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb13, const size_t nb0, const size_t nb1, const size_t nb2, const size_t nb3, const bool src0_is_contiguous, const bool src1_is_contiguous, const bool dst_is_contiguous, queue_ptr stream) { int nr0 = ne10 / ne0; int nr1 = ne11/ne1; int nr2 = ne12/ne2; int nr3 = ne13/ne3; int nr[4] = { nr0, nr1, nr2, nr3 }; // collapse dimensions until first broadcast dimension int64_t cne[] = {ne0, ne1, ne2, ne3}; int64_t cne0[] = {ne00, ne01, ne02, ne03}; int64_t cne1[] = {ne10, ne11, ne12, ne13}; size_t cnb[] = {nb0, nb1, nb2, nb3}; size_t cnb0[] = {nb00, nb01, nb02, nb03}; size_t cnb1[] = {nb10, nb11, nb12, nb13}; auto collapse = [](int64_t cne[]) { cne[0] *= cne[1]; cne[1] = cne[2]; cne[2] = cne[3]; cne[3] = 1; }; auto collapse_nb = [](size_t cnb[], int64_t cne[]) { cnb[1] *= cne[1]; cnb[2] *= cne[2]; cnb[3] *= cne[3]; }; if (src0_is_contiguous && src1_is_contiguous && dst_is_contiguous) { for (int i = 0; i < 4; i++) { if (nr[i] != 1) { break; } if (i > 0) { collapse_nb(cnb, cne); collapse_nb(cnb0, cne0); collapse_nb(cnb1, cne1); collapse(cne); collapse(cne0); collapse(cne1); } } } { int64_t ne0 = cne[0]; int64_t ne1 = cne[1]; int64_t ne2 = cne[2]; int64_t ne3 = cne[3]; int64_t ne10 = cne1[0]; int64_t ne11 = cne1[1]; int64_t ne12 = cne1[2]; int64_t ne13 = cne1[3]; size_t nb0 = cnb[0]; size_t nb1 = cnb[1]; size_t nb2 = cnb[2]; size_t nb3 = cnb[3]; size_t nb00 = cnb0[0]; size_t nb01 = cnb0[1]; size_t nb02 = cnb0[2]; size_t nb03 = cnb0[3]; size_t nb10 = cnb1[0]; size_t nb11 = cnb1[1]; size_t nb12 = cnb1[2]; size_t nb13 = cnb1[3]; size_t s0 = nb0 / sizeof(dst_t); size_t s1 = nb1 / sizeof(dst_t); size_t s2 = nb2 / sizeof(dst_t); size_t s3 = nb3 / sizeof(dst_t); size_t s10 = nb10 / sizeof(src1_t); size_t s11 = nb11 / sizeof(src1_t); size_t s12 = nb12 / sizeof(src1_t); size_t s13 = nb13 / sizeof(src1_t); size_t s00 = nb00 / sizeof(src0_t); size_t s01 = nb01 / sizeof(src0_t); size_t s02 = nb02 / sizeof(src0_t); size_t s03 = nb03 / sizeof(src0_t); GGML_UNUSED(s00); GGML_ASSERT(nb0 % sizeof(dst_t) == 0); GGML_ASSERT(nb1 % sizeof(dst_t) == 0); GGML_ASSERT(nb2 % sizeof(dst_t) == 0); GGML_ASSERT(nb3 % sizeof(dst_t) == 0); GGML_ASSERT(nb00 % sizeof(src0_t) == 0); GGML_ASSERT(nb01 % sizeof(src0_t) == 0); GGML_ASSERT(nb02 % sizeof(src0_t) == 0); GGML_ASSERT(nb03 % sizeof(src0_t) == 0); GGML_ASSERT(nb10 % sizeof(src1_t) == 0); GGML_ASSERT(nb11 % sizeof(src1_t) == 0); GGML_ASSERT(nb12 % sizeof(src1_t) == 0); GGML_ASSERT(nb13 % sizeof(src1_t) == 0); GGML_ASSERT(s0 == 1); GGML_ASSERT(s10 == 1); const int block_size = 128; int64_t hne0 = std::max(ne0/2LL, 1LL); sycl::range<3> block_dims(1, 1, 1); block_dims[2] = std::min(hne0, block_size); block_dims[1] = std::min( ne1, block_size / (unsigned int)block_dims[2]); block_dims[0] = std::min( std::min( ne2 * ne3, block_size / (unsigned int)block_dims[2] / (unsigned int)block_dims[1]), 64U); sycl::range<3> block_nums( (ne2 * ne3 + block_dims[0] - 1) / block_dims[0], (ne1 + block_dims[1] - 1) / block_dims[1], (hne0 + block_dims[2] - 1) / block_dims[2]); if (block_nums[0] > 65535) { // this is the maximum number of blocks in z direction, fallback to 1D grid kernel int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, block_num) * sycl::range<3>(1, 1, block_size), sycl::range<3>(1, 1, block_size)), [=](sycl::nd_item<3> item_ct1) { k_bin_bcast_unravel( src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3, ne10, ne11, ne12, ne13, s1, s2, s3, s01, s02, s03, s11, s12, s13, item_ct1); }); } } else { /* DPCT1049:16: The work-group size passed to the SYCL kernel may exceed the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_bin_bcast(src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3, ne10, ne11, ne12, ne13, s1, s2, s3, s01, s02, s03, s11, s12, s13, item_ct1); }); } } } }; template inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { dpct::queue_ptr main_stream = ctx.stream(); GGML_TENSOR_BINARY_OP_LOCALS if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { op()((const float *) src0->data, (const float *) src1->data, (float *) dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { op()((const sycl::half *) src0->data, (const sycl::half *) src1->data, (sycl::half *) dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) { op()((const sycl::half *) src0->data, (const float *) src1->data, (sycl::half *) dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream); } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) { op()((const int32_t *) src0->data, (const int32_t *) src1->data, (int32_t *) dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream); } else if (src0->type == GGML_TYPE_I16 && src1->type == GGML_TYPE_I16 && dst->type == GGML_TYPE_I16) { op()((const int16_t *) src0->data, (const int16_t *) src1->data, (int16_t *) dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream); } else { fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); GGML_ABORT("fatal error"); } } inline void ggml_sycl_op_add(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { ggml_sycl_op_bin_bcast>(ctx, dst->src[0], dst->src[1], dst); } inline void ggml_sycl_op_sub(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { ggml_sycl_op_bin_bcast>(ctx, dst->src[0], dst->src[1], dst); } inline void ggml_sycl_op_mul(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { ggml_sycl_op_bin_bcast>(ctx, dst->src[0], dst->src[1], dst); } inline void ggml_sycl_op_div(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { ggml_sycl_op_bin_bcast>(ctx, dst->src[0], dst->src[1], dst); } inline void ggml_sycl_op_repeat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { ggml_sycl_op_bin_bcast>(ctx, dst, dst->src[0], dst); } void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); ggml_sycl_op_add(ctx, dst); } void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); ggml_sycl_op_sub(ctx, dst); } void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); ggml_sycl_op_mul(ctx, dst); } void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); ggml_sycl_op_div(ctx, dst); } void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_repeat(ctx, dst); } ggml-org-ggml-3678254/src/ggml-sycl/binbcast.hpp000066400000000000000000000016701512524704700213530ustar00rootroot00000000000000#ifndef GGML_SYCL_BINBCAST_HPP #define GGML_SYCL_BINBCAST_HPP #include "common.hpp" static __dpct_inline__ float op_repeat(const float a, const float b) { return b; GGML_UNUSED(a); } static __dpct_inline__ float op_add(const float a, const float b) { return a + b; } static __dpct_inline__ float op_sub(const float a, const float b) { return a - b; } static __dpct_inline__ float op_mul(const float a, const float b) { return a * b; } static __dpct_inline__ float op_div(const float a, const float b) { return a / b; } void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif //GGML_SYCL_BINBCAST_HPP ggml-org-ggml-3678254/src/ggml-sycl/common.cpp000066400000000000000000000052101512524704700210430ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #include "common.hpp" #include "ggml-backend-impl.h" #include "ggml-impl.h" int get_current_device_id() { return dpct::dev_mgr::instance().current_device_id(); } void* ggml_sycl_host_malloc(size_t size) try { if (getenv("GGML_SYCL_NO_PINNED") != nullptr) { return nullptr; } void* ptr = nullptr; // allow to use dpct::get_in_order_queue() for host malloc dpct::err0 err = CHECK_TRY_ERROR( ptr = (void*)sycl::malloc_host(size, dpct::get_in_order_queue())); if (err != 0) { // clear the error GGML_LOG_ERROR("WARNING: failed to allocate %.2f MB of pinned memory: %s\n", size / 1024.0 / 1024.0, "syclGetErrorString is not supported"); return nullptr; } return ptr; } catch (sycl::exception const& exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } void ggml_sycl_host_free(void* ptr) try { // allow to use dpct::get_in_order_queue() for host malloc SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, dpct::get_in_order_queue()))); } catch (sycl::exception const& exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } bool gpu_has_xmx(sycl::device &dev) { return dev.has(sycl::aspect::ext_intel_matrix); } int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size) { const int64_t max_range = std::numeric_limits::max(); int64_t sycl_down_blk_size = block_size; int64_t global_range = accumulate_block_num * sycl_down_blk_size; while(global_range > max_range) { sycl_down_blk_size /= 2; global_range = accumulate_block_num * sycl_down_blk_size; } return sycl_down_blk_size; } void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector streams) { for (int i = 0; i < ggml_sycl_info().device_count; ++i) { for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) { if (extra->events[i][is] != nullptr) { SYCL_CHECK(CHECK_TRY_ERROR(dpct::destroy_event(extra->events[i][is]))); } } if (extra->data_device[i] != nullptr && streams.size()>0) { ggml_sycl_set_device(i); SYCL_CHECK( CHECK_TRY_ERROR(sycl::free(extra->data_device[i], *(streams[i])))); } } delete extra; } ggml-org-ggml-3678254/src/ggml-sycl/common.hpp000066400000000000000000000513311512524704700210550ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_COMMON_HPP #define GGML_SYCL_COMMON_HPP #include #include #include #include #include "dpct/helper.hpp" #include "ggml-sycl.h" #include "presets.hpp" #include "sycl_hw.hpp" #if GGML_SYCL_DNNL #include "dnnl.hpp" #include "dnnl_sycl.hpp" #endif #define GGML_COMMON_DECL_SYCL #define GGML_COMMON_IMPL_SYCL /* suppress warning spam */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wnested-anon-types" #include "ggml-common.h" #pragma clang diagnostic pop #include "ggml-impl.h" void* ggml_sycl_host_malloc(size_t size); void ggml_sycl_host_free(void* ptr); extern int g_ggml_sycl_debug; extern int g_ggml_sycl_disable_optimize; extern int g_ggml_sycl_prioritize_dmmv; #if defined(__clang__) && __has_builtin(__builtin_expect) // Hint the optimizer to pipeline the more likely following instruction in branches # define LIKELY(expr) __builtin_expect(expr, true) # define UNLIKELY(expr) __builtin_expect(expr, false) #else # define LIKELY(expr) (expr) # define UNLIKELY(expr) (expr) #endif #define GGML_SYCL_DEBUG(...) \ do { \ if (UNLIKELY(g_ggml_sycl_debug)) \ fprintf(stderr, __VA_ARGS__); \ } while (0) #define CHECK_TRY_ERROR(expr) \ [&]() { \ try { \ expr; \ return dpct::success; \ } catch (std::exception const& e) { \ std::cerr << e.what() << "\nException caught at file:" << __FILE__ \ << ", line:" << __LINE__ << ", func:" << __func__ \ << std::endl; \ return dpct::default_error; \ } \ }() #define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP #define VER_4VEC 610 // todo for hardward optimize. #define VER_GEN9 700 // todo for hardward optimize. #define VER_GEN12 1000000 // todo for hardward optimize. #define VER_GEN13 (VER_GEN12 + 1030) // todo for hardward optimize. #define GGML_SYCL_MAX_NODES 8192 // TODO: adapt to hardwares // define for XMX in Intel GPU // TODO: currently, it's not used for XMX really. #if !defined(GGML_SYCL_FORCE_MMQ) #define SYCL_USE_XMX #endif // max batch size to use MMQ kernels when tensor cores are available #define MMQ_MAX_BATCH_SIZE 32 // dmmv = dequantize_mul_mat_vec #ifndef GGML_SYCL_DMMV_X #define GGML_SYCL_DMMV_X 32 #endif #ifndef GGML_SYCL_MMV_Y #define GGML_SYCL_MMV_Y 1 #endif typedef sycl::queue *queue_ptr; enum ggml_sycl_backend_gpu_mode { SYCL_UNSET_GPU_MODE = -1, SYCL_SINGLE_GPU_MODE = 0, SYCL_MUL_GPU_MODE }; static_assert(sizeof(sycl::half) == sizeof(ggml_fp16_t), "wrong fp16 size"); static void crash() { int* ptr = NULL; *ptr = 0; } [[noreturn]] static void ggml_sycl_error( const char* stmt, const char* func, const char* file, const int line, const char* msg) { fprintf(stderr, "SYCL error: %s: %s\n", stmt, msg); fprintf(stderr, " in function %s at %s:%d\n", func, file, line); GGML_ABORT("SYCL error"); } #define SYCL_CHECK(err) \ do { \ auto err_ = (err); \ if (err_ != 0) \ ggml_sycl_error(#err, __func__, __FILE__, __LINE__, "Exception caught in this line of code."); \ } while (0) #if DPCT_COMPAT_RT_VERSION >= 11100 #define GGML_SYCL_ASSUME(x) __builtin_assume(x) #else #define GGML_SYCL_ASSUME(x) #endif // DPCT_COMPAT_RT_VERSION >= 11100 #ifdef GGML_SYCL_F16 typedef sycl::half dfloat; // dequantize float typedef sycl::half2 dfloat2; #else typedef float dfloat; // dequantize float typedef sycl::float2 dfloat2; #endif // GGML_SYCL_F16 #define MMVQ_MAX_BATCH_SIZE 8 static int g_all_sycl_device_count = -1; static bool g_ggml_backend_sycl_buffer_type_initialized = false; static ggml_sycl_backend_gpu_mode g_ggml_sycl_backend_gpu_mode = SYCL_UNSET_GPU_MODE; static void* g_scratch_buffer = nullptr; static size_t g_scratch_size = 0; // disabled by default static size_t g_scratch_offset = 0; [[noreturn]] static inline void bad_arch(const sycl::stream& stream_ct1) { stream_ct1 << "ERROR: ggml-sycl was compiled without support for the " "current GPU architecture.\n"; // __trap(); std::exit(1); (void)bad_arch; // suppress unused function warning } int get_current_device_id(); inline dpct::err0 ggml_sycl_set_device(const int device) try { int current_device_id; SYCL_CHECK(CHECK_TRY_ERROR(current_device_id = get_current_device_id())); // GGML_SYCL_DEBUG("ggml_sycl_set_device device_id=%d, // current_device_id=%d\n", device, current_device); if (device == current_device_id) { return 0; } return CHECK_TRY_ERROR(dpct::select_device(device)); } catch (sycl::exception const& exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; crash(); std::exit(1); } ////////////////////// struct optimize_feature { bool reorder=false; }; struct sycl_device_info { int cc; // compute capability int nsm; // number of streaming multiprocessors (CUDA) maps to the maximum // number of compute units on a SYCL device. // size_t smpb; // max. shared memory per block size_t smpbo; // max. shared memory per block (with opt-in) bool vmm; // virtual memory support size_t total_vram; //sycl_hw_info hw_info; \\ device id and aarch, currently not used optimize_feature opt_feature; }; struct ggml_sycl_device_info { int device_count; sycl_device_info devices[GGML_SYCL_MAX_DEVICES] = {}; std::array default_tensor_split = {}; int max_work_group_sizes[GGML_SYCL_MAX_DEVICES] = {0}; }; const ggml_sycl_device_info & ggml_sycl_info(); struct ggml_sycl_pool { virtual ~ggml_sycl_pool() = default; virtual void * alloc(size_t size, size_t * actual_size) = 0; virtual void free(void * ptr, size_t size) = 0; }; template struct ggml_sycl_pool_alloc { ggml_sycl_pool * pool = nullptr; T * ptr = nullptr; size_t actual_size = 0; explicit ggml_sycl_pool_alloc(ggml_sycl_pool & pool) : pool(&pool) { } ggml_sycl_pool_alloc(ggml_sycl_pool & pool, size_t size) : pool(&pool) { alloc(size); } ~ggml_sycl_pool_alloc() { if (ptr != nullptr) { pool->free(ptr, actual_size); } } T * realloc(size_t size) { GGML_ASSERT(pool != nullptr); if (ptr) pool->free(ptr, actual_size); ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size); return ptr; } // size is in number of elements T * alloc(size_t size) { GGML_ASSERT(pool != nullptr); GGML_ASSERT(ptr == nullptr); ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size); return ptr; } T * alloc(ggml_sycl_pool & pool, size_t size) { this->pool = &pool; return alloc(size); } T * get() { return ptr; } ggml_sycl_pool_alloc() = default; ggml_sycl_pool_alloc(const ggml_sycl_pool_alloc &) = delete; ggml_sycl_pool_alloc(ggml_sycl_pool_alloc &&) = delete; ggml_sycl_pool_alloc& operator=(const ggml_sycl_pool_alloc &) = delete; ggml_sycl_pool_alloc& operator=(ggml_sycl_pool_alloc &&) = delete; }; // backend interface struct ggml_tensor_extra_gpu { void* data_device[GGML_SYCL_MAX_DEVICES]; // 1 pointer for each device for split // tensors dpct::event_ptr events[GGML_SYCL_MAX_DEVICES] [GGML_SYCL_MAX_STREAMS]; // events for synchronizing multiple GPUs optimize_feature optimized_feature; }; void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector streams={}); namespace sycl_ex = sycl::ext::oneapi::experimental; struct ggml_backend_sycl_context { int device; std::string name; optimize_feature opt_feature; queue_ptr qptrs[GGML_SYCL_MAX_DEVICES][GGML_SYCL_MAX_STREAMS] = { { nullptr } }; explicit ggml_backend_sycl_context(int device) : device(device), name(GGML_SYCL_NAME + std::to_string(device)) { opt_feature = ggml_sycl_info().devices[device].opt_feature; } queue_ptr stream(int device, int stream) { if (qptrs[device][stream] == nullptr) { qptrs[device][stream] = &(dpct::get_device(device).default_queue()); } return qptrs[device][stream]; } queue_ptr stream() { return stream(device, 0); } #if GGML_SYCL_DNNL dnnl::engine make_engine(sycl::queue* q) { // Get the device associated with the queue sycl::device dev = q->get_device(); // Get the context associated with the queue sycl::context ctx = q->get_context(); const dnnl::engine eng = dnnl::sycl_interop::make_engine(dev, ctx); return eng; } std::unordered_map stream_map; std::unordered_map engine_map; dnnl::stream stream_dnnl(int device, int _stream) { auto q = stream(device, _stream); return stream_dnnl(q); } dnnl::engine engine_dnnl(sycl::queue* qptr) { auto it = engine_map.find(qptr); if (it == engine_map.end()) { auto eng = make_engine(qptr); engine_map[qptr] = eng; return eng; } else { return it->second; } } dnnl::stream stream_dnnl(sycl::queue* qptr) { auto it = stream_map.find(qptr); if (it == stream_map.end()) { auto eng = engine_dnnl(qptr); auto stream = dnnl::sycl_interop::make_stream(eng, *qptr); stream_map[qptr] = stream; return stream; } else { return it->second; } } dnnl::stream stream_dnnl() { return stream_dnnl(device, 0); } dnnl::memory get_scratchpad_mem(const dnnl::memory::desc & scratchpad_md, const dnnl::engine & eng, const queue_ptr q) { ggml_sycl_pool_alloc * pool; auto it = scratchpad_map.find(q); if (it == scratchpad_map.end()) { scratchpad_map[q] = std::make_unique>(this->pool()); pool = scratchpad_map[q].get(); } else { pool = it->second.get(); } size_t scratchpad_size = scratchpad_md.get_size(); if (scratchpad_size > pool->actual_size) { pool->realloc(scratchpad_size); } void * mem_ptr = pool->get(); return dnnl::memory(scratchpad_md, eng, mem_ptr); } #endif // pool std::unique_ptr pools[GGML_SYCL_MAX_DEVICES]; std::unordered_map>> scratchpad_map; std::unique_ptr host_pools[GGML_SYCL_MAX_DEVICES]; static std::unique_ptr new_pool_for_device(queue_ptr qptr, int device); static std::unique_ptr new_pool_for_host(queue_ptr qptr, int device); ggml_sycl_pool & pool(int device) { if (pools[device] == nullptr) { pools[device] = new_pool_for_device(stream(device,0), device); } return *pools[device]; } ggml_sycl_pool & pool() { return pool(device); } #ifdef GGML_SYCL_GRAPH std::unique_ptr> exec_graph = nullptr; #endif ggml_sycl_pool & host_pool(int device) { if (host_pools[device] == nullptr) { host_pools[device] = new_pool_for_host(stream(device, 0), device); } return *host_pools[device]; } ggml_sycl_pool & host_pool() { return host_pool(device); } }; // common device functions static __dpct_inline__ float warp_reduce_sum(float x, const sycl::nd_item<3>& item_ct1) { #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask); } return x; } static __dpct_inline__ sycl::float2 warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3>& item_ct1) { #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(), mask); a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(), mask); } return a; } template static __dpct_inline__ int warp_reduce_sum(int x) { return sycl::reduce_over_group( sycl::ext::oneapi::this_work_item::get_sub_group(), x, sycl::plus<>()); } template static __dpct_inline__ float warp_reduce_sum(float x) { #pragma unroll for (int offset = width / 2; offset > 0; offset >>= 1) { x += dpct::permute_sub_group_by_xor( sycl::ext::oneapi::this_work_item::get_sub_group(), x, offset, width); } return x; } template static __dpct_inline__ sycl::float2 warp_reduce_sum(sycl::float2 a) { #pragma unroll for (int offset = width / 2; offset > 0; offset >>= 1) { a.x() += dpct::permute_sub_group_by_xor( sycl::ext::oneapi::this_work_item::get_sub_group(), a.x(), offset, width); a.y() += dpct::permute_sub_group_by_xor( sycl::ext::oneapi::this_work_item::get_sub_group(), a.y(), offset, width); } return a; } template static __dpct_inline__ sycl::half2 warp_reduce_sum(sycl::half2 a) { #pragma unroll for (int offset = width / 2; offset > 0; offset >>= 1) { a = a + dpct::permute_sub_group_by_xor( sycl::ext::oneapi::this_work_item::get_sub_group(), a, offset, width); } return a; } static constexpr int ggml_sycl_get_physical_warp_size() { // todo: for old iGPU + dGPU case, need to be changed. return WARP_SIZE; } template static __dpct_inline__ float warp_reduce_max(float x) { #pragma unroll for (int offset = width / 2; offset > 0; offset >>= 1) { x = sycl::fmax(x, dpct::permute_sub_group_by_xor( sycl::ext::oneapi::this_work_item::get_sub_group(), x, offset, width)); } return x; } static __dpct_inline__ float warp_reduce_max(float x, const sycl::nd_item<3>& item_ct1) { #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { x = sycl::fmax(x, dpct::permute_sub_group_by_xor( item_ct1.get_sub_group(), x, mask)); } return x; } /* Helper for Computing the linear offset of a ggml_tensor given per-dimension sizes, strides, and indices */ template __dpct_inline__ size_t calculate_offset(const std::array & strides, const std::array & indices) { size_t offset = 0; #pragma unroll for (int i = 0; i < N; i++) { auto index_i = indices[i]; offset += strides[i] * index_i; } return offset; } // Helper for vec loading aligned data template inline sycl::vec vec_aligned_load(const Tp* aligned_ptr) { return *reinterpret_cast*>(aligned_ptr); } // Helper for accessing pointers with no warnings template static __dpct_inline__ Tp* get_pointer(sycl::local_accessor acc) { return acc.template get_multi_ptr().get(); } int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size); constexpr size_t ceil_div(const size_t m, const size_t n) { return (m + n - 1) / n; } bool gpu_has_xmx(sycl::device &dev); template std::string debug_get_array_str(const std::string & prefix, const T array[N]) { if (LIKELY(!g_ggml_sycl_debug)) { return ""; } std::stringstream ss; ss << prefix << "=["; for (std::size_t i = 0; i < N - 1; ++i) { ss << array[i] << ", "; } if constexpr (N > 0) { ss << array[N - 1]; } ss << "]"; return ss.str(); } inline std::string debug_get_tensor_str(const std::string &prefix, const ggml_tensor *tensor, const std::string &suffix = "") { std::stringstream ss; if (LIKELY(!g_ggml_sycl_debug)) { return ss.str(); } ss << prefix.c_str() << "="; if (tensor) { ss << "'" << tensor->name << "':type=" << ggml_type_name(tensor->type); ss << debug_get_array_str(";ne", tensor->ne); ss << debug_get_array_str(";nb", tensor->nb); if (!ggml_is_contiguous(tensor)) { ss << ";strided"; } if (ggml_is_permuted(tensor)) { ss << ";permuted"; } } else { ss << "nullptr"; } ss << suffix; return ss.str(); } // Use scope_op_debug_print to log operations coming from running a model struct scope_op_debug_print { // Use string_views to avoid the cost of creating a string and concatenating them // string_views must be alive for as long as the object is alive // scope_op_debug_print are used with string literals in practice which are stored in constant space so always accessible scope_op_debug_print(const std::string_view & func, const std::string_view & func_suffix, const ggml_tensor * dst, std::size_t num_src, const std::string_view & suffix = "") : func(func), func_suffix(func_suffix) { if (LIKELY(!g_ggml_sycl_debug)) { return; } GGML_SYCL_DEBUG("[SYCL][OP] call %s%s:", func.data(), func_suffix.data()); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" dst", dst).c_str()); if (dst) { for (std::size_t i = 0; i < num_src; ++i) { GGML_SYCL_DEBUG("%s", debug_get_tensor_str("\tsrc" + std::to_string(i), dst->src[i]).c_str()); } } GGML_SYCL_DEBUG("%s\n", suffix.data()); } scope_op_debug_print(const std::string_view & func, const ggml_tensor * dst, std::size_t num_src, const std::string_view & suffix = "") : scope_op_debug_print(func, "", dst, num_src, suffix) {} ~scope_op_debug_print() { GGML_SYCL_DEBUG("[SYCL][OP] call %s%s done\n", func.data(), func_suffix.data()); } private: std::string_view func; std::string_view func_suffix; }; static __dpct_inline__ float get_alibi_slope(const float max_bias, const uint32_t h, const uint32_t n_head_log2, const float m0, const float m1) { if (max_bias <= 0.0f) { return 1.0f; } const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; return dpct::pow(base, exph); } static const sycl::uint3 init_fastdiv_values(uint32_t d) { GGML_ASSERT(d != 0); uint32_t L = 0; while (L < 32 && (uint32_t{ 1 } << L) < d) { L++; } uint32_t mp = (uint32_t) ((uint64_t{ 1 } << 32) * ((uint64_t{ 1 } << L) - d) / d + 1); return sycl::uint3(mp, L, d); } static __dpct_inline__ uint32_t fastdiv(uint32_t n, const sycl::uint3 fastdiv_values) { const uint32_t hi = sycl::mul_hi(n, fastdiv_values.x()); return (hi + n) >> fastdiv_values.y(); } static __dpct_inline__ sycl::uint2 fast_div_modulo(uint32_t n, const sycl::uint3 fastdiv_values) { const uint32_t div_val = fastdiv(n, fastdiv_values); const uint32_t mod_val = n - div_val * fastdiv_values.z(); return sycl::uint2(div_val, mod_val); } static __dpct_inline__ int ggml_sycl_dp4a(const int a, const int b, int c) { return dpct::dp4a(a, b, c); } static __dpct_inline__ float ggml_sycl_e8m0_to_fp32(uint8_t x) { uint32_t bits; if (x == 0) { bits = 0x00400000; } else { bits = (uint32_t) x << 23; } float result; memcpy(&result, &bits, sizeof(float)); return result; } #endif // GGML_SYCL_COMMON_HPP ggml-org-ggml-3678254/src/ggml-sycl/concat.cpp000066400000000000000000000202611512524704700210250ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #include "concat.hpp" static inline size_t elem_size(ggml_type t) { return ggml_type_size(t) / ggml_blck_size(t); } template static void concat_T_dim0(const T *x, const T *y, T *dst, const int ne0, const int ne00, const sycl::nd_item<3> &item_ct1) { int nidx = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); if (nidx >= ne0) { return; } // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); if (nidx < ne00) { // src0 int offset_src = nidx + item_ct1.get_group(1) * ne00 + item_ct1.get_group(0) * ne00 * item_ct1.get_group_range(1); dst[offset_dst] = x[offset_src]; } else { int offset_src = nidx - ne00 + item_ct1.get_group(1) * (ne0 - ne00) + item_ct1.get_group(0) * (ne0 - ne00) * item_ct1.get_group_range(1); dst[offset_dst] = y[offset_src]; } } template static void concat_T_dim1(const T *x, const T *y, T *dst, const int ne0, const int ne01, const sycl::nd_item<3> &item_ct1) { int nidx = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); if (nidx >= ne0) { return; } // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); if (item_ct1.get_group(1) < (size_t) ne01) { // src0 int offset_src = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * ne01; dst[offset_dst] = x[offset_src]; } else { int offset_src = nidx + (item_ct1.get_group(1) - ne01) * ne0 + item_ct1.get_group(0) * ne0 * (item_ct1.get_group_range(1) - ne01); dst[offset_dst] = y[offset_src]; } } template static void concat_T_dim2(const T *x, const T *y, T *dst, const int ne0, const int ne02, const sycl::nd_item<3> &item_ct1) { int nidx = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); if (nidx >= ne0) { return; } // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); if (item_ct1.get_group(0) < (size_t) ne02) { // src0 int offset_src = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); dst[offset_dst] = x[offset_src]; } else { int offset_src = nidx + item_ct1.get_group(1) * ne0 + (item_ct1.get_group(0) - ne02) * ne0 * item_ct1.get_group_range(1); dst[offset_dst] = y[offset_src]; } } template static void concat_T_sycl(const T *x, const T *y, T *dst, int ne00, int ne01, int ne02, int ne0, int ne1, int ne2, int dim, queue_ptr stream) { int num_blocks = (ne0 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE; sycl::range<3> gridDim(ne2, ne1, num_blocks); switch (dim) { case 0: stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { concat_T_dim0(x, y, dst, ne0, ne00, item_ct1); }); break; case 1: stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { concat_T_dim1(x, y, dst, ne0, ne01, item_ct1); }); break; // dim >=2 will be dispatched to the default path default: stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { concat_T_dim2(x, y, dst, ne0, ne02, item_ct1); }); break; } } // non-contiguous kernel (slow) template static void concat_T_sycl_non_cont( queue_ptr stream, const char *src0, const char *src1, char *dst, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, uint64_t nb00, uint64_t nb01, uint64_t nb02, uint64_t nb03, int64_t /*ne10*/, int64_t /*ne11*/, int64_t /*ne12*/, int64_t /*ne13*/, uint64_t nb10, uint64_t nb11, uint64_t nb12, uint64_t nb13, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3, uint64_t nb0, uint64_t nb1, uint64_t nb2, uint64_t nb3, int32_t dim) { sycl::range<3> gridDim(ne3, ne2, ne1); stream->parallel_for(sycl::nd_range<3>(gridDim, sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { int64_t i3 = item_ct1.get_group(0); int64_t i2 = item_ct1.get_group(1); int64_t i1 = item_ct1.get_group(2); int64_t o[4] = { 0, 0, 0, 0 }; o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03)); const T * x; for (int i0 = item_ct1.get_local_id(2); i0 < ne0; i0 += item_ct1.get_local_range(2)) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const T *) (src0 + (i3) *nb03 + (i2) *nb02 + (i1) *nb01 + (i0) *nb00); } else { x = (const T *) (src1 + (i3 - o[3]) * nb13 + (i2 - o[2]) * nb12 + (i1 - o[1]) * nb11 + (i0 - o[0]) * nb10); } T *y = (T *)(dst + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); *y = *x; } }); } template void concat_impl_sycl(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; queue_ptr stream = ctx.stream(); const int32_t dim = ((int32_t *) dst->op_params)[0]; if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) { const T * src0_d = (const T *) src0->data; const T * src1_d = (const T *) src1->data; T * dst_d = (T *) dst->data; size_t type_size = elem_size(dst->type); if (dim != 3) { for (int i3 = 0; i3 < dst->ne[3]; i3++) { concat_T_sycl(src0_d + i3 * (src0->nb[3] / type_size), src1_d + i3 * (src1->nb[3] / type_size), dst_d + i3 * (dst->nb[3] / type_size), src0->ne[0], src0->ne[1], src0->ne[2], dst->ne[0], dst->ne[1], dst->ne[2], dim, stream); } } else { const size_t size0 = ggml_nbytes(src0); const size_t size1 = ggml_nbytes(src1); SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(dst_d, src0_d, size0).wait())); SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(dst_d + size0 / type_size, src1_d, size1).wait())); } } else { concat_T_sycl_non_cont(stream, (const char *) src0->data, (const char *) src1->data, (char *) dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], dim); } } void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { switch (dst->type) { case GGML_TYPE_F32: concat_impl_sycl(ctx, dst); break; case GGML_TYPE_I32: concat_impl_sycl(ctx, dst); break; default: GGML_ASSERT(false && "ggml_sycl_op_concat: unsupported type"); break; } } ggml-org-ggml-3678254/src/ggml-sycl/concat.hpp000066400000000000000000000007551512524704700210400ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_CONCAT_HPP #define GGML_SYCL_CONCAT_HPP #include "common.hpp" void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst); #endif // GGML_SYCL_CONCAT_HPP ggml-org-ggml-3678254/src/ggml-sycl/conv.cpp000066400000000000000000000064111512524704700205240ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #include "conv.hpp" static void conv_transpose_1d_kernel( const int s0, const int output_size, const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src1_ne0, const int dst_ne0, const float * src0, const float * src1, float * dst, const sycl::nd_item<3> &item_ct1) { int global_index = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); if (global_index >= output_size) { return; } int out_index = global_index / dst_ne0; float accumulator = 0; for (int c = 0; c < src0_ne2; c++) { int idx = global_index % dst_ne0; int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0); int input_offset = src1_ne0 * c; for (int i = 0; i < src1_ne0; i++) { if (!(idx >= i*s0 && idx < i*s0 + src0_ne0)) { continue; } int weight_idx = idx - i*s0; float kernel_weight = src0[kernel_offset + weight_idx]; float input_value = src1[input_offset+i]; accumulator += kernel_weight * input_value; } } dst[global_index] = accumulator; } static void conv_transpose_1d_f32_f32_sycl( const int s0, const int output_size, const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src1_ne0, const int dst_ne0, const float *src0, const float *src1, float *dst, const queue_ptr& stream) { const int num_blocks = (output_size + SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE; const sycl::range<3> block_dims(1, 1, SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE); const sycl::range<3> block_nums(1, 1, num_blocks); stream->parallel_for( sycl::nd_range<3>( block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { conv_transpose_1d_kernel( s0, output_size, src0_ne0, src0_ne1, src0_ne2, src1_ne0, dst_ne0, src0, src1, dst, item_ct1); }); } void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); const ggml_tensor *src0 = dst->src[0]; const ggml_tensor *src1 = dst->src[1]; const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; float * dst_d = (float *)dst->data; dpct::queue_ptr stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); const int32_t * opts = (const int32_t *)dst->op_params; const int s0 = opts[0]; const int64_t output_size = ggml_nelements(dst); conv_transpose_1d_f32_f32_sycl(s0, output_size, src0->ne[0], src0->ne[1], src0->ne[2], src1->ne[0], dst->ne[0], src0_d, src1_d, dst_d, stream); } ggml-org-ggml-3678254/src/ggml-sycl/conv.hpp000066400000000000000000000007621512524704700205340ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_CONV_HPP #define GGML_SYCL_CONV_HPP #include "common.hpp" void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst); #endif // GGML_SYCL_CONV_HPP ggml-org-ggml-3678254/src/ggml-sycl/convert.cpp000066400000000000000000000663331512524704700212500ustar00rootroot00000000000000#include "convert.hpp" #include "dequantize.hpp" #include "presets.hpp" #if defined(__INTEL_LLVM_COMPILER) #if __has_include() #include #define GGML_SYCL_HAS_BF16 #endif #endif template static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, const sycl::nd_item<3> &item_ct1) { const int64_t i = 2 * (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)); if (i >= k) { return; } const int64_t ib = i/qk; // block index const int64_t iqs = (i%qk)/qr; // quant index const int64_t iybs = i - i%qk; // y block start index const int64_t y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(vx, ib, iqs, v); y[iybs + iqs + 0] = v.x(); y[iybs + iqs + y_offset] = v.y(); } template static void dequantize_block_sycl(const void *__restrict__ vx, dst_t *__restrict__ y, const int64_t k, dpct::queue_ptr stream) { const int64_t num_blocks = (k + 2*SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / (2*SYCL_DEQUANTIZE_BLOCK_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>( sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { dequantize_block(vx, y, k, item_ct1); }); } } template static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; #if QK_K == 256 { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q2_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q2_K(vx, y, item_ct1); }); } #endif } template static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; #if QK_K == 256 { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q3_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q3_K(vx, y, item_ct1); }); } #endif } template static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb32 = k / 32; const int64_t nb = (k + 255) / 256; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q4_0(vx, y, nb32, item_ct1); }); } } template static void dequantize_row_q4_0_sycl_reorder(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); int constexpr WARP_K = WARP_SIZE * QK4_0; const int n_warp = (k + WARP_K - 1) / WARP_K; GGML_ASSERT(k % 2 == 0); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, n_warp) * sycl::range<3>(1, 1, WARP_SIZE), sycl::range<3>(1, 1, WARP_SIZE)), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{ dequantize_block_q4_0_reorder(vx, y, k, item_ct1); }); } template static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb32 = k / 32; const int64_t nb = (k + 255) / 256; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q4_1(vx, y, nb32, item_ct1); }); } } template static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { sycl::local_accessor scale_local_acc(sycl::range<1>(12), cgh); cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q4_K(vx, y, get_pointer(scale_local_acc), item_ct1); }); }); } } template static void dequantize_row_q4_K_sycl_reorder(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; const size_t local_size = 32; const size_t global_size = nb * local_size; dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->submit([&](sycl::handler & cgh) { sycl::local_accessor scale_local_acc(sycl::range<1>(12), cgh); cgh.parallel_for(sycl::nd_range<1>(sycl::range<1>(global_size), sycl::range<1>(local_size)), [=](sycl::nd_item<1> item_ct1) { dequantize_block_q4_K_reorder(vx, y, get_pointer(scale_local_acc), item_ct1, nb); }); }); } template static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; #if QK_K == 256 { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q5_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q5_K(vx, y, item_ct1); }); } #endif } template static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; #if QK_K == 256 { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K(vx, y, item_ct1); }); } #endif } template static void dequantize_row_q6_K_sycl_reorder(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K_reorder(vx, y, item_ct1, nb); }); } template static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq1_s( vx, y, item_ct1, iq1s_grid_gpu ); }); }); } } template static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq1_m( vx, y, item_ct1, iq1s_grid_gpu ); }); }); } } template static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq2_xxs( vx, y, item_ct1, iq2xxs_grid, ksigns_iq2xs, kmask_iq2xs); }); }); } } template static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq2_xs( vx, y, item_ct1, iq2xs_grid, ksigns_iq2xs, kmask_iq2xs); }); }); } } template static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq2_s(vx, y, item_ct1); }); }); } } template static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq3_xxs( vx, y, item_ct1, iq3xxs_grid, ksigns_iq2xs, kmask_iq2xs); }); }); } } template static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = k / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq3_s( vx, y, item_ct1, kmask_iq2xs, iq3s_grid); }); }); } } template static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = (k + QK_K - 1) / QK_K; #if QK_K == 64 dequantize_row_iq4_nl_sycl(vx, y, k, stream); #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq4_xs(vx, y, item_ct1); }); }); } #endif } template static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { const int64_t nb = (k + QK_K - 1) / QK_K; { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq4_nl(vx, y, item_ct1); }); }); } } template static void dequantize_row_mxfp4_sycl(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) { const int nb = (k + QK_K - 1) / QK_K; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), [=](sycl::nd_item<3> item_ct1) { dequantize_block_mxfp4(vx, y, item_ct1); }); } template static void convert_unary_nc(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t s01, const int64_t s02, const int64_t s03, const sycl::nd_item<3> & item_ct1) { const int64_t work_group_size = item_ct1.get_local_range(2); const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2); const int64_t i01 = item_ct1.get_group(1); const int64_t i02 = item_ct1.get_group(0) % ne02; const int64_t i03 = item_ct1.get_group(0) / ne02; // make each work-item deal with more elements since sycl global range can not exceed max int const src_t * x = static_cast(vx); const int64_t ix = i03 * s03 + i02 * s02 + i01 * s01; const int64_t iy = ((i03 * ne02 + i02) * ne01 + i01) * ne00; #pragma unroll for (int64_t i00 = global_id; i00 < ne00; i00 += work_group_size * item_ct1.get_group_range(2)) { y[iy + i00] = static_cast(x[ix + i00]); } } template static void convert_unary_nc_sycl(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t s01, const int64_t s02, const int64_t s03, dpct::queue_ptr queue) { dpct::has_capability_or_fail(queue->get_device(), { sycl::aspect::fp16 }); sycl::range<3> global_size(ne02 * ne03, ne01, ceil_div(ne00, SYCL_DEQUANTIZE_BLOCK_SIZE)); // decrease global range when it exceeds the max int // TODO: Downsample logic is separated from the kernel, a rewrite is desirable int64_t downsized_workgroup = downsample_sycl_global_range(global_size[0], SYCL_DEQUANTIZE_BLOCK_SIZE); sycl::range<3> workgroup_size(1, 1, downsized_workgroup); queue->parallel_for(sycl::nd_range<3>(global_size * workgroup_size, workgroup_size), [=](sycl::nd_item<3> item_ct1) { convert_unary_nc(vx, y, ne00, ne01, ne02, s01, s02, s03, item_ct1); }); } template static void convert_unary_sycl(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr queue) { convert_unary_nc_sycl(vx, y, k, 1, 1, 1, k, k, k, queue); } to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst) { switch (type) { case GGML_TYPE_Q4_0: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q4_0_sycl_reorder; } else { return dequantize_block_sycl; } case GGML_TYPE_Q4_1: return dequantize_block_sycl; case GGML_TYPE_Q5_0: return dequantize_block_sycl; case GGML_TYPE_Q5_1: return dequantize_block_sycl; case GGML_TYPE_Q8_0: return dequantize_block_sycl; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_sycl; case GGML_TYPE_Q3_K: return dequantize_row_q3_K_sycl; case GGML_TYPE_Q4_K: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q4_K_sycl_reorder; } else { return dequantize_row_q4_K_sycl; } case GGML_TYPE_Q5_K: return dequantize_row_q5_K_sycl; case GGML_TYPE_Q6_K: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q6_K_sycl_reorder; } else { return dequantize_row_q6_K_sycl; } case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_sycl; case GGML_TYPE_IQ1_M: return dequantize_row_iq1_m_sycl; case GGML_TYPE_IQ2_XXS: return dequantize_row_iq2_xxs_sycl; case GGML_TYPE_IQ2_XS: return dequantize_row_iq2_xs_sycl; case GGML_TYPE_IQ2_S: return dequantize_row_iq2_s_sycl; case GGML_TYPE_IQ3_XXS: return dequantize_row_iq3_xxs_sycl; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_sycl; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_sycl; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_sycl; case GGML_TYPE_MXFP4: return dequantize_row_mxfp4_sycl; case GGML_TYPE_F32: return convert_unary_sycl; #ifdef GGML_SYCL_HAS_BF16 case GGML_TYPE_BF16: return convert_unary_sycl; #endif default: return nullptr; } } to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor *dst) { switch (type) { case GGML_TYPE_Q4_0: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q4_0_sycl_reorder; } else { return dequantize_row_q4_0_sycl; } case GGML_TYPE_Q4_1: return dequantize_row_q4_1_sycl; case GGML_TYPE_Q5_0: return dequantize_block_sycl; case GGML_TYPE_Q5_1: return dequantize_block_sycl; case GGML_TYPE_Q8_0: return dequantize_block_sycl; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_sycl; case GGML_TYPE_Q3_K: return dequantize_row_q3_K_sycl; case GGML_TYPE_Q4_K: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q4_K_sycl_reorder; } else { return dequantize_row_q4_K_sycl; } case GGML_TYPE_Q5_K: return dequantize_row_q5_K_sycl; case GGML_TYPE_Q6_K: if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { return dequantize_row_q6_K_sycl_reorder; } else { return dequantize_row_q6_K_sycl; } case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_sycl; case GGML_TYPE_IQ1_M: return dequantize_row_iq1_m_sycl; case GGML_TYPE_IQ2_XXS: return dequantize_row_iq2_xxs_sycl; case GGML_TYPE_IQ2_XS: return dequantize_row_iq2_xs_sycl; case GGML_TYPE_IQ2_S: return dequantize_row_iq2_s_sycl; case GGML_TYPE_IQ3_XXS: return dequantize_row_iq3_xxs_sycl; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_sycl; case GGML_TYPE_IQ4_XS: return dequantize_row_iq4_xs_sycl; case GGML_TYPE_IQ4_NL: return dequantize_row_iq4_nl_sycl; case GGML_TYPE_MXFP4: return dequantize_row_mxfp4_sycl; case GGML_TYPE_F16: return convert_unary_sycl; #ifdef GGML_SYCL_HAS_BF16 case GGML_TYPE_BF16: return convert_unary_sycl; #endif default: return nullptr; } } to_fp16_nc_sycl_t get_to_fp16_nc_sycl(ggml_type type) { switch (type) { case GGML_TYPE_F32: return convert_unary_nc_sycl; #ifdef GGML_SYCL_HAS_BF16 case GGML_TYPE_BF16: return convert_unary_nc_sycl; #endif default: return nullptr; } } ggml-org-ggml-3678254/src/ggml-sycl/convert.hpp000066400000000000000000000022141512524704700212410ustar00rootroot00000000000000// // MIT license // Copyright (C) 2025 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_CONVERT_HPP #define GGML_SYCL_CONVERT_HPP #include "common.hpp" template using to_t_sycl_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, dpct::queue_ptr stream); typedef to_t_sycl_t to_fp32_sycl_t; typedef to_t_sycl_t to_fp16_sycl_t; to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst); to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor * dst); // Nc = Non-contiguous template using to_t_nc_sycl_t = void (*)(const void * x, T * y, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, int64_t s01, int64_t s02, int64_t s03, dpct::queue_ptr queue); typedef to_t_nc_sycl_t to_fp16_nc_sycl_t; to_fp16_nc_sycl_t get_to_fp16_nc_sycl(ggml_type type); #endif // GGML_SYCL_CONVERT_HPP ggml-org-ggml-3678254/src/ggml-sycl/count-equal.cpp000066400000000000000000000050261512524704700220150ustar00rootroot00000000000000#include "count-equal.hpp" #include template static void count_equal(const T *__restrict__ x, const T *__restrict__ y, int64_t *__restrict__ dst, const int64_t dk, const int64_t k) { auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>(); const int64_t i0 = (int64_t)item_ct1.get_group(2) * dk; const int64_t i1 = sycl::min(i0 + dk, k); int nequal = 0; for (int64_t i = i0 + item_ct1.get_local_id(2); i < i1; i += WARP_SIZE) { const T xi = x[i]; const T yi = y[i]; nequal += xi == yi; } nequal = warp_reduce_sum(nequal); if (item_ct1.get_local_id(2) != 0) { return; } dpct::atomic_fetch_add( (int *)dst, nequal); } void ggml_sycl_count_equal(ggml_backend_sycl_context &ctx, ggml_tensor *dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(src0->type == src1->type); GGML_ASSERT( dst->type == GGML_TYPE_I64); GGML_ASSERT(ggml_are_same_shape(src0, src1)); GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(src1)); GGML_ASSERT(ggml_is_contiguous(dst)); int64_t * dst_d = (int64_t *) dst->data; dpct::queue_ptr stream = ctx.stream(); const int id = get_current_device_id(); const int nsm = ggml_sycl_info().devices[id].nsm; const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne < (1 << 30) && "atomicAdd implementation only supports int"); const int64_t dne = GGML_PAD((ne + 4 * nsm - 1) / (4 * nsm), SYCL_COUNT_EQUAL_CHUNK_SIZE); SYCL_CHECK(CHECK_TRY_ERROR(stream->memset(dst_d, 0, ggml_nbytes(dst)))); const dpct::dim3 block_dims(WARP_SIZE, 1, 1); const dpct::dim3 block_nums( std::min((int64_t)4 * nsm, (ne + SYCL_COUNT_EQUAL_CHUNK_SIZE - 1) / SYCL_COUNT_EQUAL_CHUNK_SIZE), 1, 1); switch (src0->type) { case GGML_TYPE_I32: { const int *src0_d = (const int *)src0->data; const int *src1_d = (const int *)src1->data; stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { count_equal(src0_d, src1_d, dst_d, dne, ne); GGML_UNUSED(item_ct1); }); } break; default: GGML_ASSERT(false); break; } } ggml-org-ggml-3678254/src/ggml-sycl/count-equal.hpp000066400000000000000000000003701512524704700220170ustar00rootroot00000000000000#ifndef GGML_SYCL_COUNT_EQUAL_HPP #define GGML_SYCL_COUNT_EQUAL_HPP #include "common.hpp" #define SYCL_COUNT_EQUAL_CHUNK_SIZE 128 void ggml_sycl_count_equal(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif //GGML_SYCL_COUNT_EQUAL_HPP ggml-org-ggml-3678254/src/ggml-sycl/cpy.cpp000066400000000000000000001062031512524704700203520ustar00rootroot00000000000000#include "cpy.hpp" #include #include "dequantize.hpp" #include "ggml-sycl/common.hpp" #include "ggml-sycl/presets.hpp" #include "ggml.h" static void cpy_1_f32_f32(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; float * dsti = (float *) cdsti; *dsti = *xi; } static void cpy_1_f32_f16(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; sycl::half * dsti = (sycl::half *) cdsti; *dsti = sycl::vec(*xi).convert()[0]; } static void cpy_1_f16_f16(const char * cxi, char * cdsti) { const sycl::half * xi = (const sycl::half *) cxi; sycl::half * dsti = (sycl::half *) cdsti; *dsti = *xi; } static void cpy_1_f16_f32(const char * cxi, char * cdsti) { const sycl::half * xi = (const sycl::half *) cxi; float * dsti = (float *) cdsti; *dsti = *xi; } static void cpy_1_i16_i16(const char * cxi, char * cdsti) { const int16_t * xi = (const int16_t *) cxi; int16_t * dsti = (int16_t *) cdsti; *dsti = *xi; } static void cpy_1_i32_i32(const char * cxi, char * cdsti) { const int32_t * xi = (const int32_t *) cxi; int32_t * dsti = (int32_t *) cdsti; *dsti = *xi; } template static void cpy_f32_f16(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, const sycl::nd_item<3> & item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); if (i >= ne) { return; } // determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor // then combine those indices with the corresponding byte offsets to get the total offsets const int i03 = i / (ne00 * ne01 * ne02); const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; const int x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; const int i13 = i / (ne10 * ne11 * ne12); const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; const int dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; cpy_1(cx + x_offset, cdst + dst_offset); } /* quantized type same copy */ template static void cpy_blck_q_q(const char * cxi, char * cdsti) { const T * xi = (const T *) cxi; T * dsti = (T *) cdsti; *dsti = *xi; } static void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) { float * cdstf = (float *) (cdsti); for (int j = 0; j < QK8_0; j += 2) { dfloat2 dq; dequantize_q8_0(cxi, 0, j, dq); *(cdstf + j) = dq.x(); *(cdstf + j + 1) = dq.y(); } } template static void cpy_blck_q_f32(const char * cxi, char * cdsti) { float * cdstf = (float *) (cdsti); for (int j = 0; j < qk / 2; j++) { dfloat2 dq; dequant(cxi, 0, j, dq); *(cdstf + j) = dq.x(); *(cdstf + j + qk / 2) = dq.y(); } } template static void cpy_q_q(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, const sycl::nd_item<3> & item_ct1) { const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk; if (i >= ne) { return; } const int i03 = i / (ne00 * ne01 * ne02); const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; const int x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; const int i13 = i / (ne10 * ne11 * ne12); const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; const int dst_offset = (i10 / qk) * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; cpy_blck_q_q(cx + x_offset, cdst + dst_offset); } template static void cpy_f32_q(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, const sycl::nd_item<3> & item_ct1) { const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk; if (i >= ne) { return; } const int i03 = i / (ne00 * ne01 * ne02); const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; const int x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; const int i13 = i / (ne10 * ne11 * ne12); const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; const int dst_offset = (i10 / qk) * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; cpy_blck(cx + x_offset, cdst + dst_offset); } template static void cpy_q_f32(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, const sycl::nd_item<3> & item_ct1) { const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk; if (i >= ne) { return; } const int i03 = i / (ne00 * ne01 * ne02); const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; const int x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; const int i13 = i / (ne10 * ne11 * ne12); const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; const int dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; cpy_blck(cx + x_offset, cdst + dst_offset); } static void ggml_cpy_f16_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_f32_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_f32_f16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_f32_q8_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK8_0 == 0); const int num_blocks = ne / QK8_0; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q8_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_q_f32(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f32_q4_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_0 == 0); const int num_blocks = ne / QK4_0; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q4_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK4_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f32_q4_1_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_1 == 0); const int num_blocks = ne / QK4_1; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q4_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK4_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f32_q5_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK5_0 == 0); const int num_blocks = ne / QK5_0; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q5_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK5_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f32_q5_1_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK5_1 == 0); const int num_blocks = ne / QK5_1; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q5_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK5_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f32_iq4_nl_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_NL == 0); const int num_blocks = ne / QK4_NL; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_f16_f16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_i16_i16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { // dpct::has_capability_or_fail(stream->get_device(), // {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_i32_i32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE; { // dpct::has_capability_or_fail(stream->get_device(), // {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_f32_f16(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } } static void ggml_cpy_q8_0_q8_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q5_0_q5_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q5_1_q5_1(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q4_0_q4_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } static void ggml_cpy_q4_1_q4_1(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); }); } void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1) try { // Unlike other operators ggml_sycl_cpy takes 2 distinct tensors instead of a dst ggml_tensor and rely on its src field scope_op_debug_print scope_dbg_print(__func__, src1, /*num_src=*/0, debug_get_tensor_str("\tsrc0", src0)); const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne == ggml_nelements(src1)); GGML_TENSOR_BINARY_OP_LOCALS01; SYCL_CHECK(ggml_sycl_set_device(ctx.device)); queue_ptr main_stream = ctx.stream(); char * src0_ddc = (char *) src0->data; char * src1_ddc = (char *) src1->data; if ((src0->type == src1->type) && (ggml_is_contiguous(src0) && ggml_is_contiguous(src1))) { GGML_SYCL_DEBUG("%s: memcpy path\n", __func__); main_stream->memcpy(src1_ddc, src0_ddc, ggml_nbytes(src0)); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { ggml_cpy_f32_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { ggml_cpy_f32_f16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { ggml_cpy_f32_q8_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { ggml_cpy_f32_q4_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { ggml_cpy_f32_q4_1_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { ggml_cpy_f16_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { ggml_cpy_f16_f16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_I16 && src1->type == GGML_TYPE_I16) { ggml_cpy_i16_i16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { ggml_cpy_i32_i32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q4_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_1 && src1->type == GGML_TYPE_F32) { ggml_cpy_q4_1_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q8_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) { ggml_cpy_f32_q5_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_0 && src1->type == GGML_TYPE_F32) { ggml_cpy_q5_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) { ggml_cpy_f32_q5_1_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_F32) { ggml_cpy_q5_1_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) { ggml_cpy_f32_iq4_nl_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_Q8_0) { ggml_cpy_q8_0_q8_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_0 && src1->type == GGML_TYPE_Q5_0) { ggml_cpy_q5_0_q5_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_Q5_1) { ggml_cpy_q5_1_q5_1(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_0 && src1->type == GGML_TYPE_Q4_0) { ggml_cpy_q4_0_q4_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else if (src0->type == GGML_TYPE_Q4_1 && src1->type == GGML_TYPE_Q4_1) { ggml_cpy_q4_1_q4_1(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream); } else { GGML_LOG_ERROR("%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type), ggml_type_name(src1->type)); GGML_ABORT("fatal error"); } } catch (const sycl::exception & exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } void ggml_sycl_dup(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_cpy(ctx, dst->src[0], dst); } ggml-org-ggml-3678254/src/ggml-sycl/cpy.hpp000066400000000000000000000141361512524704700203620ustar00rootroot00000000000000#ifndef GGML_SYCL_CPY_HPP #define GGML_SYCL_CPY_HPP #include "common.hpp" #include typedef void (*cpy_kernel_t)(const char * cx, char * cdst); __dpct_inline__ int best_index_int8(int n, const int8_t * val, float x) { if (x <= val[0]) { return 0; } if (x >= val[n - 1]) { return n - 1; } int ml = 0, mu = n - 1; while (mu - ml > 1) { int mav = (ml + mu) / 2; if (x < val[mav]) { mu = mav; } else { ml = mav; } } return x - val[mu - 1] < val[mu] - x ? mu - 1 : mu; } inline void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_q8_0 * dsti = (block_q8_0 *) cdsti; float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = xi[j]; amax = sycl::fmax(amax, sycl::fabs((float) v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f / d : 0.0f; dsti->d = d; for (int j = 0; j < QK8_0; ++j) { const float x0 = xi[j] * id; dsti->qs[j] = sycl::round((float) x0); } } inline void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_q4_0 * dsti = (block_q4_0 *) cdsti; float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK4_0; ++j) { const float v = xi[j]; if (amax < sycl::fabs((float) v)) { amax = sycl::fabs((float) v); vmax = v; } } const float d = vmax / -8; const float id = d ? 1.0f / d : 0.0f; dsti->d = d; for (int j = 0; j < QK4_0 / 2; ++j) { const float x0 = xi[0 + j] * id; const float x1 = xi[QK4_0 / 2 + j] * id; const uint8_t xi0 = dpct::min(15, (int8_t) (x0 + 8.5f)); const uint8_t xi1 = dpct::min(15, (int8_t) (x1 + 8.5f)); dsti->qs[j] = xi0; dsti->qs[j] |= xi1 << 4; } } inline void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_q4_1 * dsti = (block_q4_1 *) cdsti; float vmin = FLT_MAX; float vmax = -FLT_MAX; for (int j = 0; j < QK4_1; ++j) { const float v = xi[j]; vmin = sycl::min(v, vmin); vmax = sycl::max(v, vmax); } const float d = (vmax - vmin) / ((1 << 4) - 1); const float id = d ? 1.0f / d : 0.0f; dsti->dm.x() = d; dsti->dm.y() = vmin; for (int j = 0; j < QK4_1 / 2; ++j) { const float x0 = (xi[0 + j] - vmin) * id; const float x1 = (xi[QK4_1 / 2 + j] - vmin) * id; const uint8_t xi0 = dpct::min(15, (int8_t) (x0 + 0.5f)); const uint8_t xi1 = dpct::min(15, (int8_t) (x1 + 0.5f)); dsti->qs[j] = xi0; dsti->qs[j] |= xi1 << 4; } } inline void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_q5_0 * dsti = (block_q5_0 *) cdsti; float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK5_0; ++j) { const float v = xi[j]; if (amax < sycl::fabs((float) v)) { amax = sycl::fabs((float) v); vmax = v; } } const float d = vmax / -16; const float id = d ? 1.0f / d : 0.0f; dsti->d = d; uint32_t qh = 0; for (int j = 0; j < QK5_0 / 2; ++j) { const float x0 = xi[0 + j] * id; const float x1 = xi[QK5_0 / 2 + j] * id; const uint8_t xi0 = dpct::min(31, (int8_t) (x0 + 16.5f)); const uint8_t xi1 = dpct::min(31, (int8_t) (x1 + 16.5f)); dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0 / 2); } memcpy(dsti->qh, &qh, sizeof(qh)); } inline void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_q5_1 * dsti = (block_q5_1 *) cdsti; float min = xi[0]; float max = xi[0]; for (int j = 1; j < QK5_1; ++j) { const float v = xi[j]; min = v < min ? v : min; max = v > max ? v : max; } const float d = (max - min) / 31; const float id = d ? 1.0f / d : 0.0f; dsti->dm.x() = d; dsti->dm.y() = min; uint32_t qh = 0; for (int j = 0; j < QK5_1 / 2; ++j) { const float x0 = (xi[0 + j] - min) * id; const float x1 = (xi[QK5_1 / 2 + j] - min) * id; const uint8_t xi0 = (uint8_t) (x0 + 0.5f); const uint8_t xi1 = (uint8_t) (x1 + 0.5f); dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); qh |= ((xi0 & 0x10u) >> 4) << (j + 0); qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1 / 2); } memcpy(dsti->qh, &qh, sizeof(qh)); } inline void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; block_iq4_nl * dsti = (block_iq4_nl *) cdsti; float amax = 0.0f; float vmax = 0.0f; for (int j = 0; j < QK4_NL; ++j) { const float v = xi[j]; if (amax < sycl::fabs((float) v)) { amax = sycl::fabs((float) v); vmax = v; } } float d = vmax / kvalues_iq4nl[0]; const float id = d ? 1.0f / d : 0.0f; float sumqx = 0, sumq2 = 0; for (int j = 0; j < QK4_NL / 2; ++j) { const float x0 = xi[0 + j] * id; const float x1 = xi[QK4_NL / 2 + j] * id; const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0); const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1); dsti->qs[j] = xi0 | (xi1 << 4); const float v0 = kvalues_iq4nl[xi0]; const float v1 = kvalues_iq4nl[xi1]; const float w0 = xi[0 + j] * xi[0 + j]; const float w1 = xi[QK4_NL / 2 + j] * xi[QK4_NL / 2 + j]; sumqx += w0 * v0 * xi[j] + w1 * v1 * xi[QK4_NL / 2 + j]; sumq2 += w0 * v0 * v0 + w1 * v1 * v1; } dsti->d = sumq2 > 0 ? sumqx / sumq2 : d; } void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1); void ggml_sycl_dup(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_CPY_HPP ggml-org-ggml-3678254/src/ggml-sycl/dequantize.hpp000066400000000000000000000721751512524704700217470ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_DEQUANTIZE_HPP #define GGML_SYCL_DEQUANTIZE_HPP #include "common.hpp" typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v); typedef void (*dequantize_kernel_t_reorder)(const void *d, const int64_t ib, const void *qs, const int iqs, dfloat2 &v); static __dpct_inline__ void dequantize_q4_0(const void *vx, const int64_t ib, const int iqs, dfloat2 &v) { const block_q4_0 * x = (const block_q4_0 *) vx; const dfloat d = x[ib].d; const int vui = x[ib].qs[iqs]; v.x() = vui & 0xF; v.y() = vui >> 4; #ifdef GGML_SYCL_F16 // v = v - {8.0f, 8.0f}; // v = v * {d, d}; v.s0() = (v.s0() - 8.0f) * d; v.s1() = (v.s1() - 8.0f) * d; #else v.x() = (v.x() - 8.0f) * d; v.y() = (v.y() - 8.0f) * d; #endif // GGML_SYCL_F16 } static __dpct_inline__ void dequantize_q4_0_reorder(const void *d_ptr, const int64_t ib, const void *qs, const int iqs, dfloat2 &v) { // const block_q4_0 * x = (const block_q4_0 *) vx; const dfloat d = (const dfloat)*((const sycl::half*)d_ptr+ib); const int vui = *((const uint8_t *)qs+iqs); v.x() = vui & 0xF; v.y() = vui >> 4; #ifdef GGML_SYCL_F16 // v = v - {8.0f, 8.0f}; // v = v * {d, d}; v.s0() = (v.s0() - 8.0f) * d; v.s1() = (v.s1() - 8.0f) * d; #else v.x() = (v.x() - 8.0f) * d; v.y() = (v.y() - 8.0f) * d; #endif // GGML_SYCL_F16 } static __dpct_inline__ void dequantize_q4_1(const void *vx, const int64_t ib, const int iqs, dfloat2 &v) { const block_q4_1 * x = (const block_q4_1 *) vx; const dfloat d = x[ib].dm[0]; const dfloat m = x[ib].dm[1]; const int vui = x[ib].qs[iqs]; v.x() = vui & 0xF; v.y() = vui >> 4; #ifdef GGML_SYCL_F16 // v = v * {d, d}; // v = v + {m, m}; v.s0() = sycl::fma(v.s0(), d, m); v.s1() = sycl::fma(v.s1(), d, m); #else v.x() = sycl::fma(v.x(), d, m); v.y() = sycl::fma(v.y(), d, m); #endif // GGML_SYCL_F16 } static __dpct_inline__ void dequantize_q5_0(const void *vx, const int64_t ib, const int iqs, dfloat2 &v) { const block_q5_0 * x = (const block_q5_0 *) vx; const dfloat d = x[ib].d; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y() = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_SYCL_F16 // v = v - {16.0f, 16.0f}; // v = v * {d, d}; v.s0() = (v.s0() - 16.0f) * d; v.s1() = (v.s1() - 16.0f) * d; #else v.x() = (v.x() - 16.0f) * d; v.y() = (v.y() - 16.0f) * d; #endif // GGML_SYCL_F16 } static __dpct_inline__ void dequantize_q5_1(const void *vx, const int64_t ib, const int iqs, dfloat2 &v) { const block_q5_1 * x = (const block_q5_1 *) vx; const dfloat d = x[ib].dm[0]; const dfloat m = x[ib].dm[1]; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y() = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_SYCL_F16 // v = v * {d, d}; // v = v + {m, m}; v.s0() = sycl::fma(v.s0(), d, m); v.s1() = sycl::fma(v.s1(), d, m); #else v.x() = sycl::fma(v.x(), d, m); v.y() = sycl::fma(v.y(), d, m); #endif // GGML_SYCL_F16 } static __dpct_inline__ void dequantize_q8_0(const void *vx, const int64_t ib, const int iqs, dfloat2 &v) { const block_q8_0 * x = (const block_q8_0 *) vx; const dfloat d = x[ib].d; v.x() = x[ib].qs[iqs + 0]; v.y() = x[ib].qs[iqs + 1]; #ifdef GGML_SYCL_F16 // v = v * {d, d}; v.s0() *= d; v.s1() *= d; #else v.x() *= d; v.y() *= d; #endif // GGML_SYCL_F16 } template static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); // assume 32 threads const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/8; const int64_t ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_0 * x = (const block_q4_0 *)vx + ib; const float d = sycl::vec(x->d) .convert()[0]; const float dm = -8*d; const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d * (q[l] & 0xF) + dm; y[l+16] = d * (q[l] >> 4) + dm; } } template static void dequantize_block_q4_0_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); auto k=nb32; // assume 32 threads const int64_t tid = item_ct1.get_local_id(2); const int lane_ib = i * WARP_SIZE + tid; if (lane_ib >= k / QK4_0) { return; } dst_t * y_ptr = yy + lane_ib * QK4_0; auto qs = (const uint8_t*)vx + lane_ib * QK4_0 / 2; auto s_ptr = (const sycl::half*)((const uint8_t*)vx + k / 2) + lane_ib; const float d = float(*s_ptr); #pragma unroll for (int l = 0; l < QK4_0 / 2; ++l) { int vq = qs[l]; y_ptr[l + 0] = d * ((vq & 0xF) - 8); y_ptr[l + 16] = d * ((vq >> 4) - 8); } } template static void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); // assume 32 threads const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/8; const int64_t ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_1 * x = (const block_q4_1 *)vx + ib; const sycl::float2 d = x->dm.convert(); const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l + 0] = d.x() * (q[l] & 0xF) + d.y(); y[l + 16] = d.x() * (q[l] >> 4) + d.y(); } } //================================== k-quants template static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); const block_q2_K * x = (const block_q2_K *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t n = tid/32; const int64_t l = tid - 32*n; const int64_t is = 8*n + l/16; const uint8_t q = x[i].qs[32*n + l]; dst_t * y = yy + i*QK_K + 128*n; float dall = x[i].dm[0]; float dmin = x[i].dm[1]; y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); #else const int64_t is = tid/16; // 0 or 1 const int64_t il = tid%16; // 0...15 const uint8_t q = x[i].qs[il] >> (2*is); dst_t * y = yy + i*QK_K + 16*is + il; float dall = x[i].dm[0]; float dmin = x[i].dm[1]; y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); #endif } template static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); const block_q3_K * x = (const block_q3_K *) vx; #if QK_K == 256 const int64_t r = item_ct1.get_local_id(2) / 4; const int64_t tid = r/2; const int64_t is0 = r%2; const int64_t l0 = 16 * is0 + 4 * (item_ct1.get_local_id(2) % 4); const int64_t n = tid / 4; const int64_t j = tid - 4*n; uint8_t m = 1 << (4*n + j); int64_t is = 8*n + 2*j + is0; int shift = 2*j; int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); float d_all = x[i].d; float dl = d_all * (us - 32); dst_t * y = yy + i*QK_K + 128*n + 32*j; const uint8_t * q = x[i].qs + 32*n; const uint8_t * hm = x[i].hmask; for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); #else const int64_t tid = item_ct1.get_local_id(2); const int64_t is = tid/16; // 0 or 1 const int64_t il = tid%16; // 0...15 const int64_t im = il/8; // 0...1 const int64_t in = il%8; // 0...7 dst_t * y = yy + i*QK_K + 16*is + il; const uint8_t q = x[i].qs[il] >> (2*is); const uint8_t h = x[i].hmask[in] >> (2*is + im); const float d = (float)x[i].d; if (is == 0) { y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } else { y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } #endif } #if QK_K == 256 static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { if (j < 4) { d = q[j] & 63; m = q[j + 4] & 63; } else { d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } #endif template inline void dequantize_q4_K_common(dst_t * __restrict__ y, const uint8_t * __restrict__ qs_ptr, const float dall, const float dmin, uint8_t * __restrict__ scales_local, int il, int ir) { const int is = 2 * il; constexpr int n = 4; uint8_t sc, m; get_scale_min_k4(is + 0, scales_local, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, scales_local, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; sycl::vec q_vec = vec_aligned_load(qs_ptr + 32 * il + n * ir); for (int l = 0; l < n; ++l) { y[l + 0] = d1 * (q_vec[l] & 0xF) - m1; y[l + 32] = d2 * (q_vec[l] >> 4) - m2; } } template static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy, uint8_t* scales_local, const sycl::nd_item<3> &item_ct1) { const block_q4_K * x = (const block_q4_K *) vx; const int64_t i = item_ct1.get_group(2); #if QK_K == 256 const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid / 8; const int64_t ir = tid % 8; dst_t * y = yy + i * QK_K + 64 * il + 4 * ir; const sycl::half2 dm = x[i].dm; const float dall = dm[0]; const float dmin = dm[1]; if (tid < 12) { scales_local[tid] = x[i].scales[tid]; } item_ct1.barrier(sycl::access::fence_space::local_space); dequantize_q4_K_common(y, x[i].qs, dall, dmin, scales_local, il, ir); #else const int64_t tid = item_ct1.get_local_id(2); const uint8_t * q = x[i].qs; dst_t * y = yy + i*QK_K; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); #endif } template static void dequantize_block_q4_K_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, uint8_t * scales_local, const sycl::nd_item<1> & item_ct1, int64_t nb) { const int64_t i = item_ct1.get_group(0); // block index const int64_t tid = item_ct1.get_local_id(0); // thread index within block const int64_t il = tid / 8; const int64_t ir = tid % 8; dst_t * y = yy + i * QK_K + 64 * il + 4 * ir; const uint8_t * base = static_cast(vx); const size_t qs_offset = i * (QK_K / 2); const size_t scales_offset = nb * (QK_K / 2) + i * K_SCALE_SIZE; const size_t dm_offset = nb * (QK_K / 2) + nb * K_SCALE_SIZE + i * sizeof(ggml_half2); const uint8_t * qs_ptr = base + qs_offset; const uint8_t * scales_ptr = base + scales_offset; ggml_half2 dm_values = *reinterpret_cast(base + dm_offset); const float dall = dm_values.x(); const float dmin = dm_values.y(); if (tid < 12) { scales_local[tid] = scales_ptr[tid]; } item_ct1.barrier(sycl::access::fence_space::local_space); dequantize_q4_K_common(y, qs_ptr, dall, dmin, scales_local, il, ir); } template static void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1) { const block_q5_K * x = (const block_q5_K *) vx; const int64_t i = item_ct1.get_group(2); #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/16; // il is in 0...3 const int64_t ir = tid%16; // ir is in 0...15 const int64_t is = 2*il; // is is in 0...6 dst_t * y = yy + i*QK_K + 64*il + 2*ir; const float dall = x[i].dm[0]; const float dmin = x[i].dm[1]; const uint8_t * ql = x[i].qs + 32*il + 2*ir; const uint8_t * qh = x[i].qh + 2*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; uint8_t hm = 1 << (2*il); y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; hm <<= 1; y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; #else const int64_t tid = item_ct1.get_local_id(2); const uint8_t q = x[i].qs[tid]; const int64_t im = tid/8; // 0...3 const int64_t in = tid%8; // 0...7 const int64_t is = tid/16; // 0 or 1 const uint8_t h = x[i].qh[in] >> im; const float d = x[i].d; dst_t * y = yy + i*QK_K + tid; y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); #endif } template static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1) { const block_q6_K * x = (const block_q6_K *) vx; const int64_t i = item_ct1.get_group(2); #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int64_t tid = item_ct1.get_local_id(2); const int64_t ip = tid/32; // ip is 0 or 1 const int64_t il = tid - 32*ip; // 0...32 const int64_t is = 8*ip + il/16; dst_t * y = yy + i*QK_K + 128*ip + il; const float d = x[i].d; const uint8_t * ql = x[i].ql + 64*ip + il; const uint8_t qh = x[i].qh[32*ip + il]; const int8_t * sc = x[i].scales + is; y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); #else // assume 32 threads const int64_t tid = item_ct1.get_local_id(2); const int64_t ip = tid/16; // 0 or 1 const int64_t il = tid - 16*ip; // 0...15 dst_t * y = yy + i*QK_K + 16*ip + il; const float d = x[i].d; const uint8_t ql = x[i].ql[16*ip + il]; const uint8_t qh = x[i].qh[il] >> (2*ip); const int8_t * sc = x[i].scales; y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); #endif } template static void dequantize_block_q6_K_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> & item_ct1, int64_t n_blocks) { const int64_t ib = item_ct1.get_group(2); const int64_t tid = item_ct1.get_local_id(2); const int64_t ip = tid / 32; // ip is 0 or 1 const int64_t il = tid - 32 * ip; // 0...32 const int64_t is = 8 * ip + il / 16; const uint8_t * base_ptr = static_cast(vx); const auto ql_offset = ib * (QK_K / 2); const auto qh_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * ib; const auto base_scales_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * n_blocks + (QK_K / 16) * ib; const auto base_d_offset = ((QK_K / 2) + (QK_K / 4) + (QK_K / 16)) * n_blocks; const uint8_t * ql_ptr = base_ptr + ql_offset; const uint8_t * qh_ptr = base_ptr + qh_offset; const uint8_t * scales_ptr = base_ptr + base_scales_offset; const ggml_half * d = (const ggml_half *) (base_ptr + base_d_offset) + ib; dst_t * y = yy + ib * QK_K + 128 * ip + il; const uint8_t * ql = ql_ptr + 64 * ip + il; const uint8_t qh = *(qh_ptr + 32 * ip + il); const int8_t * sc = reinterpret_cast(scales_ptr + is); y[0] = *d * sc[0] * ((int8_t) ((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = *d * sc[2] * ((int8_t) ((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = *d * sc[4] * ((int8_t) ((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = *d * sc[6] * ((int8_t) ((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); } template static void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint64_t *iq2xxs_grid_ptr, const uint8_t *ksigns_iq2xs_ptr, const uint8_t *kmask_iq2xs_ptr) { const int64_t i = item_ct1.get_group(2); const block_iq2_xxs * x = (const block_iq2_xxs *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * q2 = x[i].qs + 4*ib; const uint8_t * aux8 = (const uint8_t *)q2; const uint8_t * grid = (const uint8_t *)(iq2xxs_grid_ptr + aux8[il]); const uint32_t aux32 = q2[2] | (q2[3] << 16); const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f; const uint8_t signs = ksigns_iq2xs_ptr[(aux32 >> 7*il) & 127]; for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs_ptr[j] ? -1.f : 1.f); #else assert(false); #endif } template static void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint64_t *iq2xs_grid, const uint8_t *ksigns_iq2xs, const uint8_t *kmask_iq2xs) { const int64_t i = item_ct1.get_group(2); const block_iq2_xs * x = (const block_iq2_xs *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * q2 = x[i].qs + 4*ib; const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511)); const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; const uint8_t signs = ksigns_iq2xs[q2[il] >> 9]; for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); #else assert(false); #endif } template __dpct_inline__ static void dequantize_block_iq2_s(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); const block_iq2_s * x = (const block_iq2_s *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300))); const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f; const uint8_t signs = x[i].qs[QK_K/8+4*ib+il]; #pragma unroll for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f); #else assert(false); #endif } template static void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint32_t *iq3xxs_grid, const uint8_t *ksigns_iq2xs, const uint8_t *kmask_iq2xs) { const int64_t i = item_ct1.get_group(2); const block_iq3_xxs * x = (const block_iq3_xxs *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * q3 = x[i].qs + 8*ib; const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib; const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]); const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]); const uint32_t aux32 = gas[0] | (gas[1] << 16); const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f; const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127]; for (int j = 0; j < 4; ++j) { y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } #else assert(false); #endif } template __dpct_inline__ static void dequantize_block_iq3_s(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint8_t *kmask_iq2xs, const uint32_t *iq3s_grid) { const int64_t i = item_ct1.get_group(2); const block_iq3_s * x = (const block_iq3_s *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint8_t * qs = x[i].qs + 8*ib; const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256))); const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256))); const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf)); const uint8_t signs = x[i].signs[4*ib + il]; #pragma unroll for (int j = 0; j < 4; ++j) { y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f); y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f); } #else assert(false); #endif } template __dpct_inline__ static void dequantize_block_iq1_s(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint32_t *iq1s_grid_gpu) { const int64_t i = item_ct1.get_group(2); const block_iq1_s * x = (const block_iq1_s *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA; const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1); uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)]; grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; grid32[0] &= 0x0f0f0f0f; #pragma unroll for (int j = 0; j < 8; ++j) { y[j] = d * (q[j] + delta); } #else assert(false); #endif } template __dpct_inline__ static void dequantize_block_iq1_m(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1, const uint32_t *iq1s_grid_gpu) { const int64_t i = item_ct1.get_group(2); const block_iq1_m * x = (const block_iq1_m *) vx; const int64_t tid = item_ct1.get_local_id(2); #if QK_K == 256 const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 8*il; const uint16_t * sc = (const uint16_t *)x[i].scales; iq1m_scale_t scale; scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); const int ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4); const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1); const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA; uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32; grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)]; grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f; grid32[0] &= 0x0f0f0f0f; #pragma unroll for (int j = 0; j < 8; ++j) { y[j] = d * (q[j] + delta); } #else assert(false); #endif } template __dpct_inline__ static void dequantize_block_iq4_nl(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL); const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[ib].qs + 4*il; const float d = (float)x[ib].d; #pragma unroll for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; } } template __dpct_inline__ static void dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy, const sycl::nd_item<3> &item_ct1) { const int64_t i = item_ct1.get_group(2); const block_iq4_xs * x = (const block_iq4_xs *)vx; const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[i].qs + 16*ib + 4*il; const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32); #pragma unroll for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf]; y[j+16] = d * kvalues_iq4nl[q4[j] >> 4]; } } template static void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1) { // auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>(); const int64_t i = item_ct1.get_group(2); const block_mxfp4 * x = (const block_mxfp4 *) vx + i*(QK_K/QK_MXFP4); const int64_t tid = item_ct1.get_local_id(2); const int64_t il = tid/8; // 0...3 const int64_t ib = tid%8; // 0...7 dst_t * y = yy + i*QK_K + 32*ib + 4*il; const uint8_t * q4 = x[ib].qs + 4*il; const float d = ggml_sycl_e8m0_to_fp32(x[ib].e); for (int j = 0; j < 4; ++j) { y[j+ 0] = d * kvalues_mxfp4[q4[j] & 0xf]*0.5f; y[j+16] = d * kvalues_mxfp4[q4[j] >> 4]*0.5f; } } #endif // GGML_SYCL_DEQUANTIZE_HPP ggml-org-ggml-3678254/src/ggml-sycl/dmmv.cpp000066400000000000000000001351131512524704700205240ustar00rootroot00000000000000#include "convert.hpp" #include "dmmv.hpp" #include "dequantize.hpp" #include "presets.hpp" static void convert_f16(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ const sycl::half *x = (const sycl::half *)vx; // automatic half -> float type cast if dfloat == float v.x() = x[ib + iqs + 0]; v.y() = x[ib + iqs + 1]; } static void convert_f32(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ const float * x = (const float *) vx; // automatic half -> float type cast if dfloat == float v.x() = x[ib + iqs + 0]; v.y() = x[ib + iqs + 1]; } template static void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows, const sycl::nd_item<3> &item_ct1) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row >= nrows) { return; } const int tid = item_ct1.get_local_id(2); const int iter_stride = 2*GGML_SYCL_DMMV_X; const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter const int y_offset = qr == 1 ? 1 : qk/2; // partial sum for each thread #ifdef GGML_SYCL_F16 sycl::half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics #else float tmp = 0.0f; #endif // GGML_SYCL_F16 for (int i = 0; i < ncols; i += iter_stride) { const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel(vx, ib, iqs + j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_SYCL_F16 dfloat2 t1{y[iybs + iqs + j / qr + 0], y[iybs + iqs + j / qr + y_offset]}; tmp += v * t1; #else tmp += v.x() * y[iybs + iqs + j / qr + 0]; tmp += v.y() * y[iybs + iqs + j / qr + y_offset]; #endif // GGML_SYCL_F16 } } // sum up partial sums and write back result const int mask_start = ncols > GGML_SYCL_DMMV_X ? WARP_SIZE >> 1 : WARP_SIZE >> 2; for (int mask = mask_start; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (tid == 0) { #ifdef GGML_SYCL_F16 dst[row] = tmp.x() + tmp.y(); #else dst[row] = tmp; #endif // GGML_SYCL_F16 } } template static void dequantize_mul_mat_vec_reorder(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows, const sycl::nd_item<3> &item_ct1) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row >= nrows) { return; } const int tid = item_ct1.get_local_id(2); const int ncols_left = ncols % (QK4_0*WARP_SIZE); const int ncols_align = ncols - ncols_left; const int iter_stride = 8*2*GGML_SYCL_DMMV_X; const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter //64/16=4, 512/16/2= 16 const int y_offset = qr == 1 ? 1 : qk/2; // partial sum for each thread #ifdef GGML_SYCL_F16 sycl::half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics #else float tmp = 0.0f; #endif // GGML_SYCL_F16 const char *d_ptr = (const char*)vx+ncols*nrows/2; int i=0; for (i = 0; i < ncols_align; i += iter_stride) { const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel_reorder((const void *)d_ptr, ib, (const void *)vx, ib * QK4_0 / 2 +iqs+j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_SYCL_F16 dfloat2 t1{y[iybs + iqs + j / qr + 0], y[iybs + iqs + j / qr + y_offset]}; tmp += v * t1; #else tmp += v.x() * y[iybs + iqs + j / qr + 0]; tmp += v.y() * y[iybs + iqs + j / qr + y_offset]; #endif // GGML_SYCL_F16 } } for (; i < ncols; i += iter_stride) { if (tid>=ncols_left/QK4_0) continue; const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel_reorder((const void *)d_ptr, ib, (const void *)vx, ib * QK4_0 / 2 +iqs+j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_SYCL_F16 dfloat2 t1{y[iybs + iqs + j / qr + 0], y[iybs + iqs + j / qr + y_offset]}; tmp += v * t1; #else tmp += v.x() * y[iybs + iqs + j / qr + 0]; tmp += v.y() * y[iybs + iqs + j / qr + y_offset]; #endif // GGML_SYCL_F16 } } // sum up partial sums and write back result const int mask_start = ncols > GGML_SYCL_DMMV_X ? WARP_SIZE >> 1 : WARP_SIZE >> 2; for (int mask = mask_start; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (tid == 0) { #ifdef GGML_SYCL_F16 dst[row] = tmp.x() + tmp.y(); #else dst[row] = tmp; #endif // GGML_SYCL_F16 } } static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols, nrows, item_ct1); }); } } /* DPCT1110:4: The total declared local variable size in device function dequantize_mul_mat_vec_q2_k exceeds 128 bytes and may cause high register pressure. Consult with your hardware vendor to find the total register size available and adjust the code, or use smaller sub-group size to avoid high register pressure. */ static void dequantize_mul_mat_vec_q2_k(const void *__restrict__ vx, const float *__restrict__ yy, float *__restrict__ dst, const int ncols, int nrows, const sycl::nd_item<3> &item_ct1) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q2_K * x = (const block_q2_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const int tid = item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...15 const int ix = item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int s_offset = 8*im; const int y_offset = 128*im + l0; uint32_t aux[4]; const uint8_t * d = (const uint8_t *)aux; const uint8_t * m = (const uint8_t *)(aux + 2); for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const float dall = x[i].dm[0]; const float dmin = x[i].dm[1]; const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); aux[0] = a[0] & 0x0f0f0f0f; aux[1] = a[1] & 0x0f0f0f0f; aux[2] = (a[0] >> 4) & 0x0f0f0f0f; aux[3] = (a[1] >> 4) & 0x0f0f0f0f; float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; } tmp += dall * sum1 - dmin * sum2; } #else const int tid = item_ct1.get_local_id(2) / (2 * K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = item_ct1.get_local_id(2) % (2 * K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; uint32_t uaux[2]; const uint8_t * d = (const uint8_t *)uaux; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint32_t * s = (const uint32_t *)x[i].scales; uaux[0] = s[0] & 0x0f0f0f0f; uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; const sycl::float2 dall = x[i].dm.convert(); float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t ql = q[l]; sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) + y[l+16] * d[1] * ((ql >> 2) & 3) + y[l+32] * d[2] * ((ql >> 4) & 3) + y[l+48] * d[3] * ((ql >> 6) & 3); sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; } tmp += dall.x() * sum1 - dall.y() * sum2; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { dst[row] = tmp; } } /* DPCT1110:5: The total declared local variable size in device function dequantize_mul_mat_vec_q3_k exceeds 128 bytes and may cause high register pressure. Consult with your hardware vendor to find the total register size available and adjust the code, or use smaller sub-group size to avoid high register pressure. */ static void dequantize_mul_mat_vec_q3_k(const void *__restrict__ vx, const float *__restrict__ yy, float *__restrict__ dst, const int ncols, int nrows, const sycl::nd_item<3> &item_ct1) { const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q3_K * x = (const block_q3_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x0303; const uint16_t kmask2 = 0x0f0f; const int tid = item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1 const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0....15 or 0...7 const uint8_t m = 1 << (4*im); const int l0 = n*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int y_offset = 128*im + l0; uint16_t utmp[4]; const int8_t * s = (const int8_t *)utmp; const uint16_t s_shift = 4*im; for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const uint8_t * h = x[i].hmask + l0; const uint16_t * a = (const uint16_t *)x[i].scales; utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); const float d = x[i].d; float sum = 0; for (int l = 0; l < n; ++l) { sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); } tmp += d * sum; } #else const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 const int in = offset/8; // 0 or 1 const int im = offset%8; // 0...7 for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint8_t * s = x[i].scales; const float dall = (float)x[i].d; float sum = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t hl = x[i].hmask[im+l] >> in; const uint8_t ql = q[l]; sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { dst[row] = tmp; } } /* DPCT1110:6: The total declared local variable size in device function dequantize_mul_mat_vec_q4_k exceeds 128 bytes and may cause high register pressure. Consult with your hardware vendor to find the total register size available and adjust the code, or use smaller sub-group size to avoid high register pressure. */ static void dequantize_mul_mat_vec_q4_k(const void *__restrict__ vx, const float *__restrict__ yy, float *__restrict__ dst, const int ncols, int nrows, const sycl::nd_item<3> &item_ct1) { const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q4_K * x = (const block_q4_K *)vx + ib0; #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 const int il = tid/step; // 0...3 const int ir = tid - step*il; // 0...7 or 0...3 const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; #if K_QUANTS_PER_ITERATION == 2 uint32_t q32[4]; const uint8_t * q4 = (const uint8_t *)q32; #else uint16_t q16[4]; const uint8_t * q4 = (const uint8_t *)q16; #endif float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = x[i].dm[0]; const float dmin = x[i].dm[1]; const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); #if K_QUANTS_PER_ITERATION == 2 const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); const uint32_t * q2 = q1 + 16; q32[0] = q1[0] & 0x0f0f0f0f; q32[1] = q1[0] & 0xf0f0f0f0; q32[2] = q2[0] & 0x0f0f0f0f; q32[3] = q2[0] & 0xf0f0f0f0; sycl::float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 4; ++l) { s.x() += y1[l] * q4[l + 0]; s.y() += y1[l + 32] * q4[l + 4]; s.z() += y2[l] * q4[l + 8]; s.w() += y2[l + 32] * q4[l + 12]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x() * sc[0] + s.y() * sc[1] * 1.f / 16.f + s.z() * sc[4] + s.w() * sc[5] * 1.f / 16.f) - dmin * smin; #else const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[0] & 0xf0f0; q16[2] = q2[0] & 0x0f0f; q16[3] = q2[0] & 0xf0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 2; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #endif } #else const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; float tmp = 0; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const float * y = yy + i*QK_K + step; const uint16_t * a = (const uint16_t *)x[i].scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (tid == 0) { dst[row] = tmp; } } /* DPCT1110:7: The total declared local variable size in device function dequantize_mul_mat_vec_q5_k exceeds 128 bytes and may cause high register pressure. Consult with your hardware vendor to find the total register size available and adjust the code, or use smaller sub-group size to avoid high register pressure. */ static void dequantize_mul_mat_vec_q5_k(const void *__restrict__ vx, const float *__restrict__ yy, float *__restrict__ dst, const int ncols, const sycl::nd_item<3> &item_ct1) { const int row = item_ct1.get_group(2); const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q5_K * x = (const block_q5_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = item_ct1.get_local_id(2) / 2; // 0...15 const int ix = item_ct1.get_local_id(2) % 2; const int il = tid/4; // 0...3 const int ir = tid - 4*il;// 0...3 const int n = 2; const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; const uint8_t hm1 = 1 << (2*im); const uint8_t hm2 = hm1 << 4; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; uint16_t q16[8]; const uint8_t * q4 = (const uint8_t *)q16; for (int i = ix; i < num_blocks_per_row; i += 2) { const uint8_t * ql1 = x[i].qs + q_offset; const uint8_t * qh = x[i].qh + l0; const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = x[i].dm[0]; const float dmin = x[i].dm[1]; const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); sycl::float4 sum = {0.f, 0.f, 0.f, 0.f}; float smin = 0; const uint16_t * q1 = (const uint16_t *)ql1; const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[8] & 0x0f0f; q16[2] = (q1[0] >> 4) & 0x0f0f; q16[3] = (q1[8] >> 4) & 0x0f0f; q16[4] = q2[0] & 0x0f0f; q16[5] = q2[8] & 0x0f0f; q16[6] = (q2[0] >> 4) & 0x0f0f; q16[7] = (q2[8] >> 4) & 0x0f0f; for (int l = 0; l < n; ++l) { sum.x() += y1[l + 0] * (q4[l + 0] + (qh[l + 0] & (hm1 << 0) ? 16 : 0)) + y1[l + 16] * (q4[l + 2] + (qh[l + 16] & (hm1 << 0) ? 16 : 0)); sum.y() += y1[l + 32] * (q4[l + 4] + (qh[l + 0] & (hm1 << 1) ? 16 : 0)) + y1[l + 48] * (q4[l + 6] + (qh[l + 16] & (hm1 << 1) ? 16 : 0)); sum.z() += y2[l + 0] * (q4[l + 8] + (qh[l + 0] & (hm2 << 0) ? 16 : 0)) + y2[l + 16] * (q4[l + 10] + (qh[l + 16] & (hm2 << 0) ? 16 : 0)); sum.w() += y2[l + 32] * (q4[l + 12] + (qh[l + 0] & (hm2 << 1) ? 16 : 0)) + y2[l + 48] * (q4[l + 14] + (qh[l + 16] & (hm2 << 1) ? 16 : 0)); smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; } tmp += dall * (sum.x() * sc[0] + sum.y() * sc[1] + sum.z() * sc[4] + sum.w() * sc[5]) - dmin * smin; } #else const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; const int im = step/8; const int in = step%8; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const int8_t * s = x[i].scales; const float * y = yy + i*QK_K + step; const float d = x[i].d; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { const uint8_t h = x[i].qh[in+j] >> im; sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { dst[row] = tmp; } } static void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows, const sycl::nd_item<3> &item_ct1) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1); if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q6_K * x = (const block_q6_K *)vx + ib0; #if QK_K == 256 const int tid = item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0, 1 const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 #if K_QUANTS_PER_ITERATION == 1 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 const int is = 0; #else const int l0 = 4 * in; // 0, 4, 8, ..., 28 const int is = in / 4; #endif const int ql_offset = 64*im + l0; const int qh_offset = 32*im + l0; const int s_offset = 8*im + is; const int y_offset = 128*im + l0; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * ql = x[i].ql + ql_offset; const uint8_t * qh = x[i].qh + qh_offset; const int8_t * s = x[i].scales + s_offset; const float d = x[i].d; #if K_QUANTS_PER_ITERATION == 1 float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); tmp += sum; #else float sum = 0; for (int l = 0; l < 4; ++l) { sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); } tmp += sum; #endif } #else const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...7 const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); // 0...3 const int step = tid * K_QUANTS_PER_ITERATION; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + step; const uint8_t * ql = x[i].ql + step; const uint8_t * qh = x[i].qh + step; const int8_t * s = x[i].scales; const float d = x[i+0].d; float sum = 0; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (tid == 0) { dst[row] = tmp; } } static void dequantize_mul_mat_vec_q4_0_sycl_reorder(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec_reorder( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0); const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { dequantize_mul_mat_vec( vx, y, dst, ncols, nrows, item_ct1); }); } } static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2 const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1); }); } static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1); }); } static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1); }); } static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const sycl::range<3> block_dims(1, 1, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1); }); } static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y, float *dst, const int ncols, const int nrows, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1); }); } void ggml_sycl_op_dequantize_mul_mat_vec( ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i, float *dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, const dpct::queue_ptr &stream) { const int64_t ne00 = src0->ne[0]; const int64_t row_diff = row_high - row_low; GGML_ASSERT(src1->type == GGML_TYPE_F32); // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics #ifdef GGML_SYCL_F16 ggml_sycl_pool_alloc src1_dfloat_a(ctx.pool()); sycl::half *src1_dfloat = nullptr; // dfloat == half bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 || src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 || src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16; if (src1_convert_f16) { scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2, " : converting src1 to fp16"); src1_dfloat = src1_dfloat_a.alloc(ne00); const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type, dst); GGML_ASSERT(to_fp16_sycl != nullptr); to_fp16_sycl(src1_ddf_i, src1_dfloat, ne00, stream); } #else const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion #endif // GGML_SYCL_F16 switch (src0->type) { case GGML_TYPE_Q4_0: if ((ggml_tensor_extra_gpu*)dst->src[0]->extra && ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) { dequantize_mul_mat_vec_q4_0_sycl_reorder(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); } else { dequantize_mul_mat_vec_q4_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); } break; case GGML_TYPE_Q4_1: dequantize_mul_mat_vec_q4_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q5_0: dequantize_mul_mat_vec_q5_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q5_1: dequantize_mul_mat_vec_q5_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q8_0: dequantize_mul_mat_vec_q8_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q2_K: dequantize_mul_mat_vec_q2_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q3_K: dequantize_mul_mat_vec_q3_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q4_K: if ((ggml_tensor_extra_gpu *) dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { // reorder is currently not supported for dmmv GGML_ABORT("Unimplemented dequantize case case for q4_k reorder"); } else { dequantize_mul_mat_vec_q4_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); } break; case GGML_TYPE_Q5_K: dequantize_mul_mat_vec_q5_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_Q6_K: dequantize_mul_mat_vec_q6_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream); break; case GGML_TYPE_F16: convert_mul_mat_vec_f16_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream); break; default: printf("ggml_sycl_op_dequantize_mul_mat_vec unsupported GGML_TYPE %d\n", src0->type); GGML_ABORT("fatal error"); } GGML_UNUSED(src1); GGML_UNUSED(dst); GGML_UNUSED(src1_ddq_i); GGML_UNUSED(src1_ncols); GGML_UNUSED(src1_padded_row_size); GGML_UNUSED(ctx); } ggml-org-ggml-3678254/src/ggml-sycl/dmmv.hpp000066400000000000000000000014501512524704700205250ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_DMMV_HPP #define GGML_SYCL_DMMV_HPP #include "common.hpp" void ggml_sycl_op_dequantize_mul_mat_vec( ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i, float *dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, const dpct::queue_ptr &stream); #endif // GGML_SYCL_DMMV_HPP ggml-org-ggml-3678254/src/ggml-sycl/dpct/000077500000000000000000000000001512524704700200035ustar00rootroot00000000000000ggml-org-ggml-3678254/src/ggml-sycl/dpct/helper.hpp000066400000000000000000003622251512524704700220050ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_DPCT_HELPER_HPP #define GGML_SYCL_DPCT_HELPER_HPP #include #include #include #include #ifdef GGML_SYCL_USE_INTEL_ONEMKL #include // Allow to use the same namespace for Intel oneMKL and oneMath namespace oneapi { namespace math = mkl; } #else #include #endif #include "ggml.h" #if defined(__linux__) #include #elif defined(_WIN64) #ifndef NOMINMAX #define NOMINMAX #endif #include #else #error "Only support Windows and Linux." #endif #if defined(__linux__) #include #include #endif #if defined(_WIN64) #ifndef NOMINMAX #define NOMINMAX #endif #include #endif #define DPCT_COMPATIBILITY_TEMP (900) #if defined(_MSC_VER) #define __dpct_align__(n) __declspec(align(n)) #define __dpct_inline__ __forceinline #else #define __dpct_align__(n) __attribute__((aligned(n))) #define __dpct_inline__ __inline__ __attribute__((always_inline)) #endif #if defined(_MSC_VER) #define __dpct_noinline__ __declspec(noinline) #else #define __dpct_noinline__ __attribute__((noinline)) #endif inline std::string get_device_type_name(const sycl::device &Device) { auto DeviceType = Device.get_info(); switch (DeviceType) { case sycl::info::device_type::cpu: return "cpu"; case sycl::info::device_type::gpu: return "gpu"; case sycl::info::device_type::host: return "host"; case sycl::info::device_type::accelerator: return "acc"; default: return "unknown"; } } inline std::string get_device_backend_and_type(const sycl::device &device) { std::stringstream device_type; sycl::backend backend = device.get_backend(); device_type << backend << ":" << get_device_type_name(device); return device_type.str(); } template struct matrix_info_t { oneapi::math::transpose transpose_info[2]; Ts value_info[2]; std::int64_t size_info[3]; std::int64_t ld_info[3]; std::int64_t groupsize_info; }; inline auto get_onemath_backend(sycl::queue& queue) #if defined(GGML_SYCL_GENERIC) || defined(GGML_SYCL_USE_INTEL_ONEMKL) -> sycl::queue& #endif { // If the backend is known at compile-time, use oneMath backend_selector to use // compile-time dispatching and avoid the need to dlopen libraries. Otherwise // fallback to runtime dispatching. #if defined(GGML_SYCL_NVIDIA) return oneapi::math::backend_selector{ queue }; #elif defined(GGML_SYCL_AMD) return oneapi::math::backend_selector{ queue }; #elif defined(GGML_SYCL_GENERIC) || defined(GGML_SYCL_USE_INTEL_ONEMKL) return queue; #else static_assert(false, "Unsupported backend"); #endif } namespace dpct { typedef sycl::queue *queue_ptr; typedef sycl::event *event_ptr; typedef char *device_ptr; typedef uint8_t byte_t; typedef sycl::buffer buffer_t; /// SYCL default exception handler inline auto exception_handler = [](sycl::exception_list exceptions) { for (std::exception_ptr const &e : exceptions) { try { std::rethrow_exception(e); } catch (sycl::exception const &e) { std::cerr << "Caught asynchronous SYCL exception:" << std::endl << e.what() << std::endl << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; } } }; enum error_code { success = 0, default_error = 999 }; enum memcpy_direction { host_to_host, host_to_device, device_to_host, device_to_device, automatic }; enum memory_region { global = 0, // device global memory constant, // device constant memory local, // device local memory shared, // memory which can be accessed by host and device }; enum class library_data_t : unsigned char { real_float = 0, complex_float, real_double, complex_double, real_half, complex_half, real_bfloat16, complex_bfloat16, real_int4, complex_int4, real_uint4, complex_uint4, real_int8, complex_int8, real_uint8, complex_uint8, real_int16, complex_int16, real_uint16, complex_uint16, real_int32, complex_int32, real_uint32, complex_uint32, real_int64, complex_int64, real_uint64, complex_uint64, real_int8_4, real_int8_32, real_uint8_4, library_data_t_size }; template struct DataType { using T2 = T; }; template struct DataType> { using T2 = std::complex; }; static void destroy_event(event_ptr event) { delete event; } static inline unsigned int get_tid() { #if defined(__linux__) return syscall(SYS_gettid); #elif defined(_WIN64) return GetCurrentThreadId(); #else #error "Only support Windows and Linux." #endif } namespace detail { static void get_version(const sycl::device &dev, int &major, int &minor) { // Version string has the following format: // a. OpenCL // b. // c. e.g gfx1030 std::string ver; ver = dev.get_info(); std::string::size_type i = 0; while (i < ver.size()) { if (isdigit(ver[i])) break; i++; } major = std::stoi(&(ver[i])); while (i < ver.size()) { if (ver[i] == '.') break; i++; } if (i < ver.size()) { // a. and b. i++; minor = std::stoi(&(ver[i])); } else { // c. minor = 0; } } template class generic_error_type { public: generic_error_type() = default; generic_error_type(T value) : value{value} {} operator T() const { return value; } private: T value; }; } // namespace detail // COPY from DPCT head files /// dim3 is used to store 3 component dimensions. class dim3 { public: unsigned x, y, z; constexpr dim3(unsigned x = 1, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {} dim3(const sycl::id<3> &r) : dim3(r[2], r[1], r[0]) {} operator sycl::range<3>() const { return sycl::range<3>(z, y, x); } }; // namespace dim3 inline dim3 operator*(const dim3 &a, const dim3 &b) { return dim3{a.x * b.x, a.y * b.y, a.z * b.z}; } // COPY from DPCT head files /// Pitched 2D/3D memory data. class pitched_data { public: pitched_data() : pitched_data(nullptr, 0, 0, 0) {} pitched_data(void *data, size_t pitch, size_t x, size_t y) : _data(data), _pitch(pitch), _x(x), _y(y) {} void *get_data_ptr() { return _data; } void set_data_ptr(void *data) { _data = data; } size_t get_pitch() { return _pitch; } void set_pitch(size_t pitch) { _pitch = pitch; } size_t get_x() { return _x; } void set_x(size_t x) { _x = x; } size_t get_y() { return _y; } void set_y(size_t y) { _y = y; } private: void *_data; size_t _pitch, _x, _y; }; class device_info { public: // get interface const char *get_name() const { return _name; } char *get_name() { return _name; } template , std::enable_if_t> || std::is_same_v, int> = 0> auto get_max_work_item_sizes() const { if constexpr (std::is_same_v>) return sycl::range<3>(_max_work_item_sizes_i[0], _max_work_item_sizes_i[1], _max_work_item_sizes_i[2]); else { return _max_work_item_sizes_i; } } template , std::enable_if_t> || std::is_same_v, int> = 0> auto get_max_work_item_sizes() { if constexpr (std::is_same_v>) return sycl::range<3>(_max_work_item_sizes_i[0], _max_work_item_sizes_i[1], _max_work_item_sizes_i[2]); else { return _max_work_item_sizes_i; } } bool get_host_unified_memory() const { return _host_unified_memory; } int get_major_version() const { return _major; } int get_minor_version() const { return _minor; } int get_integrated() const { return _integrated; } int get_max_clock_frequency() const { return _frequency; } int get_max_compute_units() const { return _max_compute_units; } int get_max_work_group_size() const { return _max_work_group_size; } int get_max_sub_group_size() const { return _max_sub_group_size; } int get_max_work_items_per_compute_unit() const { return _max_work_items_per_compute_unit; } int get_max_register_size_per_work_group() const { return _max_register_size_per_work_group; } template || std::is_same_v, int> = 0> auto get_max_nd_range_size() const { if constexpr (std::is_same_v) return _max_nd_range_size; else return _max_nd_range_size_i; } template || std::is_same_v, int> = 0> auto get_max_nd_range_size() { if constexpr (std::is_same_v) return _max_nd_range_size; else return _max_nd_range_size_i; } size_t get_global_mem_size() const { return _global_mem_size; } size_t get_local_mem_size() const { return _local_mem_size; } size_t get_max_mem_alloc_size() const { return _max_mem_alloc_size; } /// Returns the maximum clock rate of device's global memory in kHz. If /// compiler does not support this API then returns default value 3200000 kHz. unsigned int get_memory_clock_rate() const { return _memory_clock_rate; } /// Returns the maximum bus width between device and memory in bits. If /// compiler does not support this API then returns default value 64 bits. unsigned int get_memory_bus_width() const { return _memory_bus_width; } uint32_t get_device_id() const { return _device_id; } std::array get_uuid() const { return _uuid; } /// Returns global memory cache size in bytes. unsigned int get_global_mem_cache_size() const { return _global_mem_cache_size; } // set interface void set_name(const char *name) { size_t length = strlen(name); if (length < 256) { std::memcpy(_name, name, length + 1); } else { std::memcpy(_name, name, 255); _name[255] = '\0'; } } void set_max_work_item_sizes(const sycl::range<3> max_work_item_sizes) { for (int i = 0; i < 3; ++i) _max_work_item_sizes_i[i] = max_work_item_sizes[i]; } [[deprecated]] void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) { for (int i = 0; i < 3; ++i) { _max_work_item_sizes_i[i] = max_work_item_sizes[i]; } } void set_host_unified_memory(bool host_unified_memory) { _host_unified_memory = host_unified_memory; } void set_major_version(int major) { _major = major; } void set_minor_version(int minor) { _minor = minor; } void set_integrated(int integrated) { _integrated = integrated; } void set_max_clock_frequency(int frequency) { _frequency = frequency; } void set_max_compute_units(int max_compute_units) { _max_compute_units = max_compute_units; } void set_global_mem_size(size_t global_mem_size) { _global_mem_size = global_mem_size; } void set_local_mem_size(size_t local_mem_size) { _local_mem_size = local_mem_size; } void set_max_mem_alloc_size(size_t max_mem_alloc_size) { _max_mem_alloc_size = max_mem_alloc_size; } void set_max_work_group_size(int max_work_group_size) { _max_work_group_size = max_work_group_size; } void set_max_sub_group_size(int max_sub_group_size) { _max_sub_group_size = max_sub_group_size; } void set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) { _max_work_items_per_compute_unit = max_work_items_per_compute_unit; } void set_max_nd_range_size(int max_nd_range_size[]) { for (int i = 0; i < 3; i++) { _max_nd_range_size[i] = max_nd_range_size[i]; _max_nd_range_size_i[i] = max_nd_range_size[i]; } } void set_memory_clock_rate(unsigned int memory_clock_rate) { _memory_clock_rate = memory_clock_rate; } void set_memory_bus_width(unsigned int memory_bus_width) { _memory_bus_width = memory_bus_width; } void set_max_register_size_per_work_group(int max_register_size_per_work_group) { _max_register_size_per_work_group = max_register_size_per_work_group; } void set_device_id(uint32_t device_id) { _device_id = device_id; } void set_uuid(std::array uuid) { _uuid = std::move(uuid); } void set_global_mem_cache_size(unsigned int global_mem_cache_size) { _global_mem_cache_size = global_mem_cache_size; } private: char _name[256]; int _max_work_item_sizes_i[3]; bool _host_unified_memory = false; int _major; int _minor; int _integrated = 0; int _frequency; // Set estimated value 3200000 kHz as default value. unsigned int _memory_clock_rate = 3200000; // Set estimated value 64 bits as default value. unsigned int _memory_bus_width = 64; unsigned int _global_mem_cache_size; int _max_compute_units; int _max_work_group_size; int _max_sub_group_size; int _max_work_items_per_compute_unit; int _max_register_size_per_work_group; size_t _global_mem_size; size_t _local_mem_size; size_t _max_mem_alloc_size; size_t _max_nd_range_size[3]; int _max_nd_range_size_i[3]; uint32_t _device_id; std::array _uuid; }; static int get_major_version(const sycl::device &dev) { int major, minor; detail::get_version(dev, major, minor); return major; } static int get_minor_version(const sycl::device &dev) { int major, minor; detail::get_version(dev, major, minor); return minor; } static void get_device_info(device_info &out, const sycl::device &dev) { device_info prop; prop.set_name(dev.get_info().c_str()); int major, minor; detail::get_version(dev, major, minor); prop.set_major_version(major); prop.set_minor_version(minor); prop.set_max_work_item_sizes( #if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION < 20220902) // oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes // is an enum class element dev.get_info()); #else // SYCL 2020-conformant code, max_work_item_sizes is a struct templated by // an int dev.get_info>()); #endif prop.set_host_unified_memory(dev.has(sycl::aspect::usm_host_allocations)); prop.set_max_clock_frequency( dev.get_info() * 1000); prop.set_max_compute_units( dev.get_info()); prop.set_max_work_group_size( dev.get_info()); prop.set_global_mem_size(dev.get_info()); prop.set_local_mem_size(dev.get_info()); prop.set_max_mem_alloc_size(dev.get_info()); #if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6) if (dev.has(sycl::aspect::ext_intel_memory_clock_rate)) { unsigned int tmp = dev.get_info(); if (tmp != 0) prop.set_memory_clock_rate(1000 * tmp); } if (dev.has(sycl::aspect::ext_intel_memory_bus_width)) { prop.set_memory_bus_width( dev.get_info()); } if (dev.has(sycl::aspect::ext_intel_device_id)) { prop.set_device_id( dev.get_info()); } if (dev.has(sycl::aspect::ext_intel_device_info_uuid)) { prop.set_uuid(dev.get_info()); } #elif defined(_MSC_VER) && !defined(__clang__) #pragma message("get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value.") #else #warning "get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value." #endif size_t max_sub_group_size = 1; std::vector sub_group_sizes = dev.get_info(); for (const auto &sub_group_size : sub_group_sizes) { if (max_sub_group_size < sub_group_size) max_sub_group_size = sub_group_size; } prop.set_max_sub_group_size(max_sub_group_size); prop.set_max_work_items_per_compute_unit( dev.get_info()); int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; prop.set_max_nd_range_size(max_nd_range_size); // Estimates max register size per work group, feel free to update the value // according to device properties. prop.set_max_register_size_per_work_group(65536); prop.set_global_mem_cache_size( dev.get_info()); out = prop; } /// dpct device extension class device_ext : public sycl::device { typedef std::mutex mutex_type; public: device_ext() : sycl::device() {} ~device_ext() { std::lock_guard lock(m_mutex); clear_queues(); } device_ext(const sycl::device &base) : sycl::device(base) { std::lock_guard lock(m_mutex); init_queues(); } int is_native_atomic_supported() { return 0; } int get_major_version() const { return dpct::get_major_version(*this); } int get_minor_version() const { return dpct::get_minor_version(*this); } int get_max_compute_units() const { return get_device_info().get_max_compute_units(); } /// Return the maximum clock frequency of this device in KHz. int get_max_clock_frequency() const { return get_device_info().get_max_clock_frequency(); } int get_integrated() const { return get_device_info().get_integrated(); } int get_max_sub_group_size() const { return get_device_info().get_max_sub_group_size(); } int get_max_register_size_per_work_group() const { return get_device_info().get_max_register_size_per_work_group(); } int get_max_work_group_size() const { return get_device_info().get_max_work_group_size(); } int get_mem_base_addr_align() const { return get_info(); } size_t get_global_mem_size() const { return get_device_info().get_global_mem_size(); } size_t get_max_mem_alloc_size() const { return get_device_info().get_max_mem_alloc_size(); } /// Get the number of bytes of free and total memory on the SYCL device. /// \param [out] free_memory The number of bytes of free memory on the /// SYCL device. \param [out] total_memory The number of bytes of total /// memory on the SYCL device. void get_memory_info(size_t &free_memory, size_t &total_memory) { total_memory = get_device_info().get_global_mem_size(); const char *warning_info = "get_memory_info: [warning] ext_intel_free_memory is not " "supported (export/set ZES_ENABLE_SYSMAN=1 to support), " "use total memory as free memory"; #if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105) if (!has(sycl::aspect::ext_intel_free_memory)) { std::cerr << warning_info << std::endl; free_memory = total_memory; } else { free_memory = get_info(); } #else std::cerr << warning_info << std::endl; free_memory = total_memory; #if defined(_MSC_VER) && !defined(__clang__) #pragma message("Querying the number of bytes of free memory is not supported") #else #warning "Querying the number of bytes of free memory is not supported" #endif #endif } void get_device_info(device_info &out) const { dpct::get_device_info(out, *this); } device_info get_device_info() const { device_info prop; dpct::get_device_info(prop, *this); return prop; } void reset() { std::lock_guard lock(m_mutex); clear_queues(); init_queues(); } sycl::queue &in_order_queue() { return _q_in_order; } sycl::queue &out_of_order_queue() { return _q_out_of_order; } sycl::queue &default_queue() { return in_order_queue(); } void queues_wait_and_throw() { std::unique_lock lock(m_mutex); lock.unlock(); for (auto &q : _queues) { q.wait_and_throw(); } // Guard the destruct of current_queues to make sure the ref count is // safe. lock.lock(); } sycl::queue create_queue(bool enable_exception_handler = false) { return create_in_order_queue(enable_exception_handler); } sycl::queue create_queue(sycl::device device, bool enable_exception_handler = false) { return create_in_order_queue(device, enable_exception_handler); } sycl::queue create_in_order_queue(bool enable_exception_handler = false) { std::lock_guard lock(m_mutex); return create_queue_impl(enable_exception_handler, sycl::property::queue::in_order()); } sycl::queue create_in_order_queue(sycl::device device, bool enable_exception_handler = false) { std::lock_guard lock(m_mutex); return create_queue_impl(device, enable_exception_handler, sycl::property::queue::in_order()); } sycl::queue create_out_of_order_queue( bool enable_exception_handler = false) { std::lock_guard lock(m_mutex); return create_queue_impl(enable_exception_handler); } void destroy_queue(sycl::queue queue) { std::lock_guard lock(m_mutex); _queues.erase(std::remove_if(_queues.begin(), _queues.end(), [=](const sycl::queue &q) -> bool { return q == queue; }), _queues.end()); } void set_saved_queue(sycl::queue q) { std::lock_guard lock(m_mutex); _saved_queue = q; } sycl::queue get_saved_queue() const { std::lock_guard lock(m_mutex); return _saved_queue; } private: void clear_queues() { _queues.clear(); } void init_queues() { _q_in_order = create_queue_impl(true, sycl::property::queue::in_order()); _q_out_of_order = create_queue_impl(true); _saved_queue = default_queue(); } /// Caller should acquire resource \p m_mutex before calling this /// function. template sycl::queue create_queue_impl(bool enable_exception_handler, Properties... properties) { sycl::async_handler eh = {}; if (enable_exception_handler) { eh = exception_handler; } _queues.push_back(sycl::queue( *this, eh, sycl::property_list( #ifdef DPCT_PROFILING_ENABLED sycl::property::queue::enable_profiling(), #endif properties...))); return _queues.back(); } template sycl::queue create_queue_impl(sycl::device device, bool enable_exception_handler, Properties... properties) { sycl::async_handler eh = {}; if (enable_exception_handler) { eh = exception_handler; } _queues.push_back(sycl::queue( device, eh, sycl::property_list( #ifdef DPCT_PROFILING_ENABLED sycl::property::queue::enable_profiling(), #endif properties...))); return _queues.back(); } void get_version(int &major, int &minor) const { detail::get_version(*this, major, minor); } sycl::queue _q_in_order, _q_out_of_order; sycl::queue _saved_queue; std::vector _queues; mutable mutex_type m_mutex; }; /// device manager class dev_mgr { public: device_ext ¤t_device() { unsigned int dev_id = current_device_id(); check_id(dev_id); return *_devs[dev_id]; } device_ext &cpu_device() const { std::lock_guard lock(m_mutex); if (_cpu_device == -1) { throw std::runtime_error("no valid cpu device"); } else { return *_devs[_cpu_device]; } } device_ext &get_device(unsigned int id) const { std::lock_guard lock(m_mutex); check_id(id); return *_devs[id]; } unsigned int current_device_id() const { std::lock_guard lock(m_mutex); auto it = _thread2dev_map.find(get_tid()); if (it != _thread2dev_map.end()) return it->second; return DEFAULT_DEVICE_ID; } /// Select device with a device ID. /// \param [in] id The id of the device which can /// be obtained through get_device_id(const sycl::device). void select_device(unsigned int id) { std::lock_guard lock(m_mutex); check_id(id); _thread2dev_map[get_tid()] = id; } unsigned int device_count() { return _devs.size(); } unsigned int get_device_id(const sycl::device &dev) { unsigned int id = 0; for (auto &dev_item : _devs) { if (*dev_item == dev) { return id; } id++; } return -1; } inline std::string get_preferred_gpu_platform_name() { std::string result; std::string filter = ""; char* env = getenv("ONEAPI_DEVICE_SELECTOR"); if (env) { if (std::strstr(env, "level_zero")) { filter = "level-zero"; } else if (std::strstr(env, "opencl")) { filter = "opencl"; } else if (std::strstr(env, "cuda")) { filter = "cuda"; } else if (std::strstr(env, "hip")) { filter = "hip"; } else { throw std::runtime_error("invalid device filter: " + std::string(env)); } } else { auto default_device = sycl::device(sycl::default_selector_v); auto default_platform_name = default_device.get_platform().get_info(); if (std::strstr(default_platform_name.c_str(), "Level-Zero") || default_device.is_cpu()) { filter = "level-zero"; } else if (std::strstr(default_platform_name.c_str(), "CUDA")) { filter = "cuda"; } else if (std::strstr(default_platform_name.c_str(), "HIP")) { filter = "hip"; } } auto platform_list = sycl::platform::get_platforms(); for (const auto& platform : platform_list) { auto devices = platform.get_devices(); auto gpu_dev = std::find_if(devices.begin(), devices.end(), [](const sycl::device& d) { return d.is_gpu(); }); if (gpu_dev == devices.end()) { // cout << "platform [" << platform_name // << "] does not contain GPU devices, skipping\n"; continue; } auto platform_name = platform.get_info(); std::string platform_name_low_case; platform_name_low_case.resize(platform_name.size()); std::transform( platform_name.begin(), platform_name.end(), platform_name_low_case.begin(), ::tolower); if (platform_name_low_case.find(filter) == std::string::npos) { // cout << "platform [" << platform_name // << "] does not match with requested " // << filter << ", skipping\n"; continue; } result = platform_name; } if (result.empty()) throw std::runtime_error("can not find preferred GPU platform"); return result; } template std::enable_if_t< std::is_invocable_r_v> select_device(const DeviceSelector &selector = sycl::gpu_selector_v) { sycl::device selected_device = sycl::device(selector); unsigned int selected_device_id = get_device_id(selected_device); select_device(selected_device_id); } /// Returns the instance of device manager singleton. static dev_mgr &instance() { static dev_mgr d_m; return d_m; } dev_mgr(const dev_mgr &) = delete; dev_mgr &operator=(const dev_mgr &) = delete; dev_mgr(dev_mgr &&) = delete; dev_mgr &operator=(dev_mgr &&) = delete; private: mutable std::recursive_mutex m_mutex; static bool compare_dev(sycl::device &device1, sycl::device &device2) { sycl::backend backend1 = device1.get_backend(); sycl::backend backend2 = device2.get_backend(); // levelzero backends always come first if(backend1 == sycl::backend::ext_oneapi_level_zero && backend2 != sycl::backend::ext_oneapi_level_zero) return true; if(backend1 != sycl::backend::ext_oneapi_level_zero && backend2 == sycl::backend::ext_oneapi_level_zero) return false; dpct::device_info prop1; dpct::get_device_info(prop1, device1); dpct::device_info prop2; dpct::get_device_info(prop2, device2); return prop1.get_max_compute_units() > prop2.get_max_compute_units(); } static int convert_backend_index(std::string & backend) { if (backend == "ext_oneapi_level_zero:gpu") return 0; if (backend == "opencl:gpu") return 1; if (backend == "ext_oneapi_cuda:gpu") return 2; if (backend == "ext_oneapi_hip:gpu") return 3; if (backend == "opencl:cpu") return 4; if (backend == "opencl:acc") return 5; printf("convert_backend_index: can't handle backend=%s\n", backend.c_str()); GGML_ABORT("fatal error"); } static bool compare_backend(std::string &backend1, std::string &backend2) { return convert_backend_index(backend1) < convert_backend_index(backend2); } dev_mgr() { sycl::device default_device = sycl::device(sycl::default_selector_v); _devs.push_back(std::make_shared(default_device)); std::vector sycl_all_devs; // Collect other devices except for the default device. if (default_device.is_cpu()) _cpu_device = 0; auto Platforms = sycl::platform::get_platforms(); // Keep track of the number of devices per backend std::map DeviceNums; std::map> backend_devices; auto preferred_platform_name = get_preferred_gpu_platform_name(); while (!Platforms.empty()) { auto Platform = Platforms.back(); Platforms.pop_back(); auto platform_name = Platform.get_info(); if (platform_name.compare(preferred_platform_name) != 0) { continue; } auto devices = Platform.get_devices(); std::string backend_type = get_device_backend_and_type(devices[0]); for (const auto &device : devices) { backend_devices[backend_type].push_back(device); } } std::vector keys; for(auto it = backend_devices.begin(); it != backend_devices.end(); ++it) { keys.push_back(it->first); } std::sort(keys.begin(), keys.end(), compare_backend); for (auto &key : keys) { std::vector devs = backend_devices[key]; std::sort(devs.begin(), devs.end(), compare_dev); for (const auto &dev : devs) { sycl_all_devs.push_back(dev); } } for (auto &dev : sycl_all_devs) { if (dev == default_device) { continue; } _devs.push_back(std::make_shared(dev)); if (_cpu_device == -1 && dev.is_cpu()) { _cpu_device = _devs.size() - 1; } } } void check_id(unsigned int id) const { if (id >= _devs.size()) { throw std::runtime_error("invalid device id"); } } std::vector> _devs; /// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current /// thread id in _thread2dev_map, which means default device should be used /// for the current thread. const unsigned int DEFAULT_DEVICE_ID = 0; /// thread-id to device-id map. std::map _thread2dev_map; int _cpu_device = -1; }; static inline sycl::queue &get_default_queue() { return dev_mgr::instance().current_device().default_queue(); } namespace detail { enum class pointer_access_attribute { host_only = 0, device_only, host_device, end }; static pointer_access_attribute get_pointer_attribute(sycl::queue &q, const void *ptr) { switch (sycl::get_pointer_type(ptr, q.get_context())) { case sycl::usm::alloc::unknown: return pointer_access_attribute::host_only; case sycl::usm::alloc::device: return pointer_access_attribute::device_only; case sycl::usm::alloc::shared: case sycl::usm::alloc::host: return pointer_access_attribute::host_device; } } template inline constexpr std::uint64_t get_type_combination_id(ArgT Val) { static_assert((unsigned char)library_data_t::library_data_t_size <= std::numeric_limits::max() && "library_data_t size exceeds limit."); static_assert(std::is_same_v, "Unsupported ArgT"); return (std::uint64_t)Val; } template inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal, RestT... RestVal) { static_assert((std::uint8_t)library_data_t::library_data_t_size <= std::numeric_limits::max() && "library_data_t size exceeds limit."); static_assert(sizeof...(RestT) <= 8 && "Too many parameters"); static_assert(std::is_same_v, "Unsupported FirstT"); return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal); } class mem_mgr { mem_mgr() { // Reserved address space, no real memory allocation happens here. #if defined(__linux__) mapped_address_space = (byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #elif defined(_WIN64) mapped_address_space = (byte_t *)VirtualAlloc( NULL, // NULL specified as the base address parameter mapped_region_size, // Size of allocation MEM_RESERVE, // Allocate reserved pages PAGE_NOACCESS); // Protection = no access #else #error "Only support Windows and Linux." #endif next_free = mapped_address_space; } public: using buffer_id_t = int; struct allocation { buffer_t buffer; byte_t *alloc_ptr; size_t size; }; ~mem_mgr() { #if defined(__linux__) munmap(mapped_address_space, mapped_region_size); #elif defined(_WIN64) VirtualFree(mapped_address_space, 0, MEM_RELEASE); #else #error "Only support Windows and Linux." #endif } mem_mgr(const mem_mgr &) = delete; mem_mgr &operator=(const mem_mgr &) = delete; mem_mgr(mem_mgr &&) = delete; mem_mgr &operator=(mem_mgr &&) = delete; /// Allocate void *mem_alloc(size_t size) { if (!size) return nullptr; std::lock_guard lock(m_mutex); if (next_free + size > mapped_address_space + mapped_region_size) { throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool"); } // Allocation sycl::range<1> r(size); buffer_t buf(r); allocation A{buf, next_free, size}; // Map allocation to device pointer void *result = next_free; m_map.emplace(next_free + size, A); // Update pointer to the next free space. next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1); return result; } /// Deallocate void mem_free(const void *ptr) { if (!ptr) return; std::lock_guard lock(m_mutex); auto it = get_map_iterator(ptr); m_map.erase(it); } /// map: device pointer -> allocation(buffer, alloc_ptr, size) allocation translate_ptr(const void *ptr) { std::lock_guard lock(m_mutex); auto it = get_map_iterator(ptr); return it->second; } /// Check if the pointer represents device pointer or not. bool is_device_ptr(const void *ptr) const { std::lock_guard lock(m_mutex); return (mapped_address_space <= ptr) && (ptr < mapped_address_space + mapped_region_size); } /// Returns the instance of memory manager singleton. static mem_mgr &instance() { static mem_mgr m; return m; } private: std::map m_map; mutable std::mutex m_mutex; byte_t *mapped_address_space; byte_t *next_free; const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024; const size_t alignment = 256; /// This padding may be defined to some positive value to debug /// out of bound accesses. const size_t extra_padding = 0; std::map::iterator get_map_iterator(const void *ptr) { auto it = m_map.upper_bound(const_cast(reinterpret_cast(ptr))); if (it == m_map.end()) { // Not a virtual pointer. throw std::runtime_error("can not get buffer from non-virtual pointer"); } const allocation &alloc = it->second; if (ptr < alloc.alloc_ptr) { // Out of bound. // This may happen if there's a gap between allocations due to alignment // or extra padding and pointer points to this gap. throw std::runtime_error("invalid virtual pointer"); } return it; } }; template class accessor; template class memory_traits { public: static constexpr sycl::access::target target = sycl::access::target::device; static constexpr sycl::access_mode mode = (Memory == constant) ? sycl::access_mode::read : sycl::access_mode::read_write; static constexpr size_t type_size = sizeof(T); using element_t = typename std::conditional::type; using value_t = typename std::remove_cv::type; template using accessor_t = typename std::conditional< Memory == local, sycl::local_accessor, sycl::accessor>::type; using pointer_t = T *; }; static inline void *dpct_malloc(size_t size, sycl::queue &q) { return sycl::malloc_device(size, q.get_device(), q.get_context()); } #define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F)) static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z, sycl::queue &q) { pitch = PITCH_DEFAULT_ALIGN(x); return dpct_malloc(pitch * y * z, q); } /** * @brief Sets \p value to the first \p size elements starting from \p dev_ptr in \p q. * @tparam valueT The type of the element to be set. * @param [in] q The queue in which the operation is done. * @param [in] dev_ptr Pointer to the virtual device memory address. * @param [in] value The value to be set. * @param [in] size Number of elements to be set to the value. * @return An event representing the memset operation. */ template static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr, valueT value, size_t size) { return q.fill(dev_ptr, value, size); } /** * @brief Sets \p value to the 3D memory region pointed by \p data in \p q. * @tparam valueT The type of the element to be set. * @param [in] q The queue in which the operation is done. * @param [in] data Pointer to the pitched device memory region. * @param [in] value The value to be set. * @param [in] size 3D memory region by number of elements. * @return An event list representing the memset operations. */ template static inline std::vector dpct_memset(sycl::queue &q, pitched_data data, valueT value, sycl::range<3> size) { std::vector event_list; size_t slice = data.get_pitch() * data.get_y(); unsigned char *data_surface = (unsigned char *)data.get_data_ptr(); for (size_t z = 0; z < size.get(2); ++z) { unsigned char *data_ptr = data_surface; for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0))); data_ptr += data.get_pitch(); } data_surface += slice; } return event_list; } /** * @brief Sets \p val to the pitched 2D memory region pointed by \p ptr in \p q. * @tparam valueT The type of the element to be set. * @param [in] q The queue in which the operation is done. * @param [in] ptr Pointer to the virtual device memory. * @param [in] pitch The pitch size by number of elements, including padding. * @param [in] val The value to be set. * @param [in] x The width of memory region by number of elements. * @param [in] y The height of memory region by number of elements. * @return An event list representing the memset operations. */ template static inline std::vector dpct_memset(sycl::queue &q, void *ptr, size_t pitch, valueT val, size_t x, size_t y) { return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val, sycl::range<3>(x, y, 1)); } static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr, const void *from_ptr, memcpy_direction dir) { switch (dir) { case memcpy_direction::host_to_host: case memcpy_direction::host_to_device: case memcpy_direction::device_to_host: case memcpy_direction::device_to_device: return dir; case memcpy_direction::automatic: { // table[to_attribute][from_attribute] static const memcpy_direction direction_table[static_cast(pointer_access_attribute::end)] [static_cast(pointer_access_attribute::end)] = {{memcpy_direction::host_to_host, memcpy_direction::device_to_host, memcpy_direction::host_to_host}, {memcpy_direction::host_to_device, memcpy_direction::device_to_device, memcpy_direction::device_to_device}, {memcpy_direction::host_to_host, memcpy_direction::device_to_device, memcpy_direction::device_to_device}}; return direction_table[static_cast(get_pointer_attribute( q, to_ptr))][static_cast(get_pointer_attribute(q, from_ptr))]; } default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } } static sycl::event dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction, const std::vector &dep_events = {}) { if (!size) return sycl::event{}; return q.memcpy(to_ptr, from_ptr, size, dep_events); GGML_UNUSED(direction); } // Get actual copy range and make sure it will not exceed range. static inline size_t get_copy_range(sycl::range<3> size, size_t slice, size_t pitch) { return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0); } static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) { return slice * id.get(2) + pitch * id.get(1) + id.get(0); } /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr /// and \p from_range to another specified by \p to_ptr and \p to_range. static inline std::vector dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range, sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction, const std::vector &dep_events = {}) { // RAII for host pointer class host_buffer { void *_buf; size_t _size; sycl::queue &_q; const std::vector &_deps; // free operation depends public: host_buffer(size_t size, sycl::queue &q, const std::vector &deps) : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {} void *get_ptr() const { return _buf; } size_t get_size() const { return _size; } ~host_buffer() { if (_buf) { _q.submit([&](sycl::handler &cgh) { cgh.depends_on(_deps); cgh.host_task([buf = _buf] { std::free(buf); }); }); } } }; std::vector event_list; size_t to_slice = to_range.get(1) * to_range.get(0), from_slice = from_range.get(1) * from_range.get(0); unsigned char *to_surface = (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0)); const unsigned char *from_surface = (const unsigned char *)from_ptr + get_offset(from_id, from_slice, from_range.get(0)); if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) { return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2), direction, dep_events)}; } direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction); size_t size_slice = size.get(1) * size.get(0); switch (direction) { case host_to_host: for (size_t z = 0; z < size.get(2); ++z) { unsigned char *to_ptr = to_surface; const unsigned char *from_ptr = from_surface; if (to_range.get(0) == from_range.get(0) && to_range.get(0) == size.get(0)) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice, direction, dep_events)); } else { for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0), direction, dep_events)); to_ptr += to_range.get(0); from_ptr += from_range.get(0); } } to_surface += to_slice; from_surface += from_slice; } break; case host_to_device: { host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q, event_list); std::vector host_events; if (to_slice == size_slice) { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, dep_events); } else { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy( q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // If has padding data, not sure whether it is useless. So fill temp // buffer with it. std::vector{ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(), device_to_host, dep_events)}); } // Copy from temp host buffer to device with only one submit. event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(), buf.get_size(), host_to_device, host_events)); break; } case device_to_host: { host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q, event_list); // Copy from host temp buffer to host target with reshaping. event_list = dpct_memcpy( q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // Copy from device to temp host buffer with only one submit. std::vector{dpct_memcpy(q, buf.get_ptr(), from_surface, buf.get_size(), device_to_host, dep_events)}); break; } case device_to_device: event_list.push_back(q.submit([&](sycl::handler &cgh){ cgh.depends_on(dep_events); cgh.parallel_for( size, [=](sycl::id<3> id) { to_surface[get_offset(id, to_slice, to_range.get(0))] = from_surface[get_offset(id, from_slice, from_range.get(0))]; }); })); break; default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } return event_list; } /// memcpy 2D/3D matrix specified by pitched_data. static inline std::vector dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction = automatic) { return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(), sycl::range<3>(to.get_pitch(), to.get_y(), 1), sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id, size, direction); } /// memcpy 2D matrix with pitch. static inline std::vector dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic) { return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1), sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction); } namespace deprecated { template class usm_allocator { private: using Alloc = sycl::usm_allocator; Alloc _impl; public: using value_type = typename std::allocator_traits::value_type; using pointer = typename std::allocator_traits::pointer; using const_pointer = typename std::allocator_traits::const_pointer; using void_pointer = typename std::allocator_traits::void_pointer; using const_void_pointer = typename std::allocator_traits::const_void_pointer; using reference = typename std::allocator_traits::value_type &; using const_reference = const typename std::allocator_traits::value_type &; using difference_type = typename std::allocator_traits::difference_type; using size_type = typename std::allocator_traits::size_type; using propagate_on_container_copy_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_copy_assignment; using propagate_on_container_move_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_move_assignment; using propagate_on_container_swap = typename std::allocator_traits::propagate_on_container_swap; using is_always_equal = typename std::allocator_traits::is_always_equal; template struct rebind { typedef usm_allocator other; }; usm_allocator() : _impl(dpct::get_default_queue()) {} ~usm_allocator() {} usm_allocator(const usm_allocator &other) : _impl(other._impl) {} usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {} pointer address(reference r) { return &r; } const_pointer address(const_reference r) { return &r; } pointer allocate(size_type cnt, const_void_pointer hint = nullptr) { return std::allocator_traits::allocate(_impl, cnt, hint); } void deallocate(pointer p, size_type cnt) { std::allocator_traits::deallocate(_impl, p, cnt); } size_type max_size() const { return std::allocator_traits::max_size(_impl); } bool operator==(const usm_allocator &other) const { return _impl == other._impl; } bool operator!=(const usm_allocator &other) const { return _impl != other._impl; } }; } // namespace deprecated inline void dpct_free(void *ptr, const sycl::queue &q) { if (ptr) { sycl::free(ptr, q.get_context()); } } template inline auto get_memory(const void *x) { T *new_x = reinterpret_cast(const_cast(x)); return new_x; } template inline typename DataType::T2 get_value(const T *s, sycl::queue &q) { using Ty = typename DataType::T2; Ty s_h; if (get_pointer_attribute(q, s) == pointer_access_attribute::device_only) detail::dpct_memcpy(q, (void *)&s_h, (const void *)s, sizeof(T), device_to_host) .wait(); else s_h = *reinterpret_cast(s); return s_h; } } // namespace detail template inline auto get_value(const T *s, sycl::queue &q) { return detail::get_value(s, q); } namespace detail { template inline void gemm_impl(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void * a, int lda, const void * b, int ldb, const void * beta, void * c, int ldc) { Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast(beta), q); auto data_a = get_memory(a); auto data_b = get_memory(b); auto data_c = get_memory(c); oneapi::math::blas::column_major::gemm(get_onemath_backend(q), a_trans, b_trans, m, n, k, alpha_value, data_a, lda, data_b, ldb, beta_value, data_c, ldc); } template class vectorized_binary { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { VecT v4; for (size_t i = 0; i < v4.size(); ++i) { v4[i] = binary_op(a[i], b[i]); } return v4; } }; template class vectorized_binary< VecT, BinaryOperation, std::void_t>> { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { return binary_op(a, b).template as(); } }; template inline void gemm_batch_impl(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void ** a, int lda, const void ** b, int ldb, const void * beta, void ** c, int ldc, int batch_size, matrix_info_t * matrix_info) { Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast(beta), q); matrix_info->transpose_info[0] = a_trans; matrix_info->transpose_info[1] = b_trans; matrix_info->value_info[0] = alpha_value; matrix_info->value_info[1] = beta_value; matrix_info->size_info[0] = m; matrix_info->size_info[1] = n; matrix_info->size_info[2] = k; matrix_info->ld_info[0] = lda; matrix_info->ld_info[1] = ldb; matrix_info->ld_info[2] = ldc; matrix_info->groupsize_info = batch_size; sycl::event e = oneapi::math::blas::column_major::gemm_batch( get_onemath_backend(q), matrix_info->transpose_info, matrix_info->transpose_info + 1, matrix_info->size_info, matrix_info->size_info + 1, matrix_info->size_info + 2, reinterpret_cast(matrix_info->value_info), reinterpret_cast(a), matrix_info->ld_info, reinterpret_cast(b), matrix_info->ld_info + 1, reinterpret_cast(matrix_info->value_info + 1), reinterpret_cast(c), matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info)); } template inline void gemm_batch_impl(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void * a, int lda, long long int stride_a, const void * b, int ldb, long long int stride_b, const void * beta, void * c, int ldc, long long int stride_c, int batch_size) { Ts alpha_value = dpct::get_value(reinterpret_cast(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast(beta), q); auto data_a = get_memory(a); auto data_b = get_memory(b); auto data_c = get_memory(c); oneapi::math::blas::column_major::gemm_batch(get_onemath_backend(q), a_trans, b_trans, m, n, k, alpha_value, data_a, lda, stride_a, data_b, ldb, stride_b, beta_value, data_c, ldc, stride_c, batch_size); } } // namespace detail template inline unsigned vectorized_binary(unsigned a, unsigned b, const BinaryOperation binary_op) { sycl::vec v0{a}, v1{b}; auto v2 = v0.as(); auto v3 = v1.as(); auto v4 = detail::vectorized_binary()(v2, v3, binary_op); v0 = v4.template as>(); return v0; } static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction); } static inline unsigned int select_device(unsigned int id) { dev_mgr::instance().select_device(id); return id; } template T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask, unsigned int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; unsigned int target_offset = (id % logical_sub_group_size) ^ mask; return sycl::select_from_group(g, x, target_offset < logical_sub_group_size ? start_index + target_offset : id); } template using dot_product_acc_t = std::conditional_t< std::is_unsigned_v && std::is_unsigned_v, uint32_t, int32_t>; template sycl::vec extract_and_sign_or_zero_extend4(T val) { return sycl::vec(val) .template as, int8_t, uint8_t>, 4>>() .template convert(); } template inline auto dp4a(T1 a, T2 b, T3 c) { dot_product_acc_t res = c; auto va = extract_and_sign_or_zero_extend4(a); auto vb = extract_and_sign_or_zero_extend4(b); res += va[0] * vb[0]; res += va[1] * vb[1]; res += va[2] * vb[2]; res += va[3] * vb[3]; return res; } struct sub_sat { template auto operator()(const T x, const T y) const { return sycl::sub_sat(x, y); } }; template inline T vectorized_min(T a, T b) { sycl::vec v0{a}, v1{b}; auto v2 = v0.template as(); auto v3 = v1.template as(); auto v4 = sycl::min(v2, v3); v0 = v4.template as>(); return v0; } inline float pow(const float a, const int b) { return sycl::pown(a, b); } inline double pow(const double a, const int b) { return sycl::pown(a, b); } inline float pow(const float a, const float b) { return sycl::pow(a, b); } inline double pow(const double a, const double b) { return sycl::pow(a, b); } template inline typename std::enable_if_t, T> pow(const T a, const U b) { return sycl::pow(a, static_cast(b)); } template inline typename std::enable_if_t, double> pow(const T a, const U b) { return sycl::pow(static_cast(a), static_cast(b)); } inline double min(const double a, const float b) { return sycl::fmin(a, static_cast(b)); } inline double min(const float a, const double b) { return sycl::fmin(static_cast(a), b); } inline float min(const float a, const float b) { return sycl::fmin(a, b); } inline double min(const double a, const double b) { return sycl::fmin(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) { return sycl::min(a, static_cast(b)); } inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) { return sycl::min(static_cast(a), b); } inline std::int32_t min(const std::int32_t a, const std::int32_t b) { return sycl::min(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) { return sycl::min(a, static_cast(b)); } inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) { return sycl::min(static_cast(a), b); } inline std::int64_t min(const std::int64_t a, const std::int64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) { return sycl::min(a, static_cast(b)); } inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) { return sycl::min(static_cast(a), b); } inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) { return sycl::min(a, static_cast(b)); } inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) { return sycl::min(static_cast(a), b); } // max function overloads. // For floating-point types, `float` or `double` arguments are acceptable. // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or // `std::int64_t` type arguments are acceptable. inline double max(const double a, const float b) { return sycl::fmax(a, static_cast(b)); } inline double max(const float a, const double b) { return sycl::fmax(static_cast(a), b); } inline float max(const float a, const float b) { return sycl::fmax(a, b); } inline double max(const double a, const double b) { return sycl::fmax(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) { return sycl::max(a, static_cast(b)); } inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) { return sycl::max(static_cast(a), b); } inline std::int32_t max(const std::int32_t a, const std::int32_t b) { return sycl::max(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) { return sycl::max(a, static_cast(b)); } inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) { return sycl::max(static_cast(a), b); } inline std::int64_t max(const std::int64_t a, const std::int64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) { return sycl::max(a, static_cast(b)); } inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) { return sycl::max(static_cast(a), b); } inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) { return sycl::max(a, static_cast(b)); } inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) { return sycl::max(static_cast(a), b); } inline void has_capability_or_fail(const sycl::device &dev, const std::initializer_list &props) { for (const auto &it : props) { if (dev.has(it)) continue; switch (it) { case sycl::aspect::fp64: throw std::runtime_error("'double' is not supported in '" + dev.get_info() + "' device"); break; case sycl::aspect::fp16: throw std::runtime_error("'half' is not supported in '" + dev.get_info() + "' device"); break; default: #define __SYCL_ASPECT(ASPECT, ID) \ case sycl::aspect::ASPECT: \ return #ASPECT; #define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID) #define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE) auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string { switch (AspectNum) { #include #include default: return "unknown aspect"; } }; #undef __SYCL_ASPECT_DEPRECATED_ALIAS #undef __SYCL_ASPECT_DEPRECATED #undef __SYCL_ASPECT throw std::runtime_error( "'" + getAspectNameStr(it) + "' is not supported in '" + dev.get_info() + "' device"); } break; } } static inline unsigned int get_current_device_id() { return dev_mgr::instance().current_device_id(); } static inline device_ext &get_current_device() { return dev_mgr::instance().current_device(); } static inline device_ext &get_device(unsigned int id) { return dev_mgr::instance().get_device(id); } static inline sycl::queue &get_in_order_queue() { return dev_mgr::instance().current_device().in_order_queue(); } static sycl::event dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction, const std::vector &dep_events = {}) { if (!size) return sycl::event{}; return q.memcpy(to_ptr, from_ptr, size, dep_events); GGML_UNUSED(direction); } // Get actual copy range and make sure it will not exceed range. static inline size_t get_copy_range(sycl::range<3> size, size_t slice, size_t pitch) { return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0); } static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) { return slice * id.get(2) + pitch * id.get(1) + id.get(0); } /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr /// and \p from_range to another specified by \p to_ptr and \p to_range. static inline std::vector dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range, sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction, const std::vector &dep_events = {}) { // RAII for host pointer class host_buffer { void *_buf; size_t _size; sycl::queue &_q; const std::vector &_deps; // free operation depends public: host_buffer(size_t size, sycl::queue &q, const std::vector &deps) : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {} void *get_ptr() const { return _buf; } size_t get_size() const { return _size; } ~host_buffer() { if (_buf) { _q.submit([&](sycl::handler &cgh) { cgh.depends_on(_deps); cgh.host_task([buf = _buf] { std::free(buf); }); }); } } }; std::vector event_list; size_t to_slice = to_range.get(1) * to_range.get(0), from_slice = from_range.get(1) * from_range.get(0); unsigned char *to_surface = (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0)); const unsigned char *from_surface = (const unsigned char *)from_ptr + get_offset(from_id, from_slice, from_range.get(0)); if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) { return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2), direction, dep_events)}; } direction = detail::deduce_memcpy_direction(q, to_ptr, from_ptr, direction); size_t size_slice = size.get(1) * size.get(0); switch (direction) { case host_to_host: for (size_t z = 0; z < size.get(2); ++z) { unsigned char *to_ptr = to_surface; const unsigned char *from_ptr = from_surface; if (to_range.get(0) == from_range.get(0) && to_range.get(0) == size.get(0)) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice, direction, dep_events)); } else { for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0), direction, dep_events)); to_ptr += to_range.get(0); from_ptr += from_range.get(0); } } to_surface += to_slice; from_surface += from_slice; } break; case host_to_device: { host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q, event_list); std::vector host_events; if (to_slice == size_slice) { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, dep_events); } else { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy( q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // If has padding data, not sure whether it is useless. So fill temp // buffer with it. std::vector{ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(), device_to_host, dep_events)}); } // Copy from temp host buffer to device with only one submit. event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(), buf.get_size(), host_to_device, host_events)); break; } case device_to_host: { host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q, event_list); // Copy from host temp buffer to host target with reshaping. event_list = dpct_memcpy( q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // Copy from device to temp host buffer with only one submit. std::vector{dpct_memcpy(q, buf.get_ptr(), from_surface, buf.get_size(), device_to_host, dep_events)}); break; } case device_to_device: event_list.push_back(q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); cgh.parallel_for( size, [=](sycl::id<3> id) { to_surface[get_offset(id, to_slice, to_range.get(0))] = from_surface[get_offset(id, from_slice, from_range.get(0))]; }); })); break; default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } return event_list; } /// memcpy 2D/3D matrix specified by pitched_data. static inline std::vector dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction = automatic) { return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(), sycl::range<3>(to.get_pitch(), to.get_y(), 1), sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id, size, direction); } /// memcpy 2D matrix with pitch. static inline std::vector dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic) { return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1), sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction); } inline void gemm(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void * a, library_data_t a_type, int lda, const void * b, library_data_t b_type, int ldb, const void * beta, void * c, library_data_t c_type, int ldc, library_data_t scaling_type) { if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_impl, std::complex, std::complex, std::complex>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_impl, std::complex, std::complex, std::complex>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_impl(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast(alpha), q); float beta_value = dpct::get_value(reinterpret_cast(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_impl(q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast(alpha), q); float beta_float = dpct::get_value(reinterpret_cast(beta), q); detail::gemm_impl( q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc); break; } #endif // __INTEL_MKL__ default: throw std::runtime_error("the combination of data type is unsupported"); } } // gemm() /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void * a[], library_data_t a_type, int lda, const void * b[], library_data_t b_type, int ldb, const void * beta, void * c[], library_data_t c_type, int ldc, int batch_size, library_data_t scaling_type, matrix_info_t * matrix_info) { std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } #endif case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast(alpha), q); float beta_float = dpct::get_value(reinterpret_cast(beta), q); detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast(alpha), q); float beta_value = dpct::get_value(reinterpret_cast(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc, batch_size, matrix_info); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] stride_a Stride between the different A matrices. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] stride_b Stride between the different B matrices. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] stride_c Stride between the different C matrices. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue & q, oneapi::math::transpose a_trans, oneapi::math::transpose b_trans, int m, int n, int k, const void * alpha, const void * a, library_data_t a_type, int lda, long long int stride_a, const void * b, library_data_t b_type, int ldb, long long int stride_b, const void * beta, void * c, library_data_t c_type, int ldc, long long int stride_c, int batch_size, library_data_t scaling_type) { if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_batch_impl, std::complex, std::complex, std::complex>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_batch_impl, std::complex, std::complex, std::complex>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #endif case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { detail::gemm_batch_impl(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast(alpha), q); float beta_value = dpct::get_value(reinterpret_cast(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b, &beta_half, c, ldc, stride_c, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } static inline void async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y, direction); } using err0 = detail::generic_error_type; using err1 = detail::generic_error_type; static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) { detail::dpct_free(ptr, q); } /// dpct accessor used as device function parameter. template class accessor; template class accessor { public: using memory_t = detail::memory_traits; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<3>; accessor(pointer_t data, const sycl::range<3> &in_range) : _data(data), _range(in_range) {} template accessor(typename std::enable_if::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<3> &in_range) : accessor(acc.get_pointer(), in_range) {} accessor operator[](size_t index) const { sycl::range<2> sub(_range.get(1), _range.get(2)); return accessor(_data + index * sub.size(), sub); } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<3> _range; }; template class accessor { public: using memory_t = detail::memory_traits; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<2>; accessor(pointer_t data, const sycl::range<2> &in_range) : _data(data), _range(in_range) {} template accessor(typename std::enable_if::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<2> &in_range) : accessor(acc.get_pointer(), in_range) {} pointer_t operator[](size_t index) const { return _data + _range.get(1) * index; } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<2> _range; }; namespace detail { /// Device variable with address space of shared, global or constant. template class device_memory { public: using accessor_t = typename detail::memory_traits::template accessor_t; using value_t = typename detail::memory_traits::value_t; using dpct_accessor_t = dpct::accessor; device_memory() : device_memory(sycl::range(1)) {} /// Constructor of 1-D array with initializer list device_memory(const sycl::range &in_range, std::initializer_list &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range.size()); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T)); } /// Constructor of 2-D array with initializer list template device_memory( const typename std::enable_if>::type &in_range, std::initializer_list> &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range[0]); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); auto tmp_data = _host_ptr; for (auto sub_list : init_list) { assert(sub_list.size() <= in_range[1]); std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T)); tmp_data += in_range[1]; } } /// Constructor with range device_memory(const sycl::range &range_in) : _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false), _host_ptr(nullptr), _device_ptr(nullptr) { static_assert( (Memory == global) || (Memory == constant) || (Memory == shared), "device memory region should be global, constant or shared"); // Make sure that singleton class mem_mgr and dev_mgr will destruct // later than this. detail::mem_mgr::instance(); dev_mgr::instance(); } /// Constructor with range template device_memory(Args... Arguments) : device_memory(sycl::range(Arguments...)) {} ~device_memory() { if (_device_ptr && !_reference) dpct::dpct_free(_device_ptr); if (_host_ptr) std::free(_host_ptr); } /// Allocate memory with default queue, and init memory if has initial /// value. void init() { init(dpct::get_default_queue()); } /// Allocate memory with specified queue, and init memory if has initial /// value. void init(sycl::queue &q) { if (_device_ptr) return; if (!_size) return; allocate_device(q); if (_host_ptr) detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device); } /// The variable is assigned to a device pointer. void assign(value_t *src, size_t size) { this->~device_memory(); new (this) device_memory(src, size); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr() { return get_ptr(get_default_queue()); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr(sycl::queue &q) { init(q); return _device_ptr; } /// Get the device memory object size in bytes. size_t get_size() { return _size; } template typename std::enable_if::type &operator[](size_t index) { init(); return _device_ptr[index]; } /// Get dpct::accessor with dimension info for the device memory object /// when usm is used and dimension is greater than 1. template typename std::enable_if::type get_access([[maybe_unused]] sycl::handler &cgh) { return dpct_accessor_t((T *)_device_ptr, _range); } private: device_memory(value_t *memory_ptr, size_t size) : _size(size), _range(size / sizeof(T)), _reference(true), _device_ptr(memory_ptr) {} void allocate_device(sycl::queue &q) { #ifndef DPCT_USM_LEVEL_NONE if (Memory == shared) { _device_ptr = (value_t *)sycl::malloc_shared(_size, q.get_device(), q.get_context()); return; } #ifdef SYCL_EXT_ONEAPI_USM_DEVICE_READ_ONLY if (Memory == constant) { _device_ptr = (value_t *)sycl::malloc_device( _size, q.get_device(), q.get_context(), sycl::ext::oneapi::property::usm::device_read_only()); return; } #endif #endif _device_ptr = (value_t *)detail::dpct_malloc(_size, q); } size_t _size; sycl::range _range; bool _reference; value_t *_host_ptr; value_t *_device_ptr; }; template class device_memory : public device_memory { public: using base = device_memory; using value_t = typename base::value_t; using accessor_t = typename detail::memory_traits::template accessor_t<0>; /// Constructor with initial value. device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {} /// Default constructor device_memory() : base(1) {} }; } // namespace detail template using global_memory = detail::device_memory; template using constant_memory = detail::device_memory; template using shared_memory = detail::device_memory; template inline T atomic_fetch_add(T *addr, T operand) { auto atm = sycl::atomic_ref(addr[0]); return atm.fetch_add(operand); } template inline T1 atomic_fetch_add(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref(addr[0]); return atm.fetch_add(operand); } template inline T atomic_fetch_add(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_add(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_add(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_add(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template inline T1 atomic_fetch_add(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_add(addr, operand, memoryOrder); } inline unsigned int byte_level_permute( unsigned int a, unsigned int b, unsigned int s) { unsigned int ret; ret = ((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) | (((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) | (((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) | (((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24); return ret; } inline uint32_t byte_level_permute_custom( uint32_t low32, uint32_t high32, uint32_t sel, int mode = 0) { constexpr uint16_t lookup[6][4] = { {0x3210, 0x4321, 0x5432, 0x6543}, // Forward 4-byte extract {0x5670, 0x6701, 0x7012, 0x0123}, // Backward 4-byte extract {0x0000, 0x1111, 0x2222, 0x3333}, // Replicate 8-bit values {0x3210, 0x3211, 0x3222, 0x3333}, // Edge clamp left {0x0000, 0x1110, 0x2210, 0x3210}, // Edge clamp right {0x1010, 0x3232, 0x1010, 0x3232} // Replicate 16-bit values }; if (mode >= 1 && mode <= 6) { return byte_level_permute(low32, high32, lookup[mode - 1][sel & 0x3]); } else if (!mode) { return byte_level_permute(low32, high32, sel); } return 0; } } // COPY from DPCT head files #endif // GGML_SYCL_DPCT_HELPER_HPP ggml-org-ggml-3678254/src/ggml-sycl/element_wise.cpp000066400000000000000000001355771512524704700222570ustar00rootroot00000000000000#include "common.hpp" #include "ggml-sycl/presets.hpp" #include "ggml.h" #include "element_wise.hpp" #define SYCL_GLOBAL_ID_LOOP(K, ITEM) \ for (auto i = ITEM.get_global_id(0); i < (size_t)K; i += ITEM.get_global_range(0)) #define SYCL_LOCAL_ID_CALC(ITEM, IDX) \ (ITEM.get_local_range(IDX) * ITEM.get_group(IDX) + ITEM.get_local_id(IDX)) static void acc_f32(const float * x, const float * y, float * dst, const int ne, const int ne10, const int ne11, const int ne12, const int nb1, const int nb2, int offset, const sycl::nd_item<1> &item_ct1) { const int i = SYCL_LOCAL_ID_CALC(item_ct1, 0); if (i >= ne) { return; } int src1_idx = i - offset; int oz = src1_idx / nb2; int oy = (src1_idx - (oz * nb2)) / nb1; int ox = src1_idx % nb1; if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) { dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11]; } else { dst[i] = x[i]; } } /* Unary OP funcs */ template static __dpct_inline__ T op_sgn(T x) { return x > static_cast(0.f) ? static_cast(1.f) : ((x < static_cast(0.f) ? static_cast(-1.f) : static_cast(0.f))); } template static __dpct_inline__ T op_abs(T x) { return sycl::fabs(x); } template static __dpct_inline__ T op_elu(T x) { return (x > static_cast(0.f)) ? x : sycl::expm1(x); } template static __dpct_inline__ T op_gelu(T x) { const T GELU_COEF_A = static_cast(0.044715f); const T SQRT_2_OVER_PI = static_cast(0.79788456080286535587989211986876f); return static_cast(0.5f) * x * (static_cast(1.0f) + sycl::tanh(SQRT_2_OVER_PI * x * (static_cast(1.0f) + GELU_COEF_A * x * x))); } template static __dpct_inline__ T op_silu(T x) { return x / (static_cast(1.0f) + sycl::native::exp(-x)); } template static __dpct_inline__ T op_gelu_quick(T x) { const T GELU_QUICK_COEF_LOCAL = static_cast(-1.702f); return x * (static_cast(1.0f) / (static_cast(1.0f) + sycl::native::exp(GELU_QUICK_COEF_LOCAL * x))); } template static __dpct_inline__ T op_gelu_erf(T x) { const T SQRT_2_INV = static_cast(0.70710678118654752440084436210484f); return static_cast(0.5f) * x * (static_cast(1.0f) + sycl::erf(x * SQRT_2_INV)); } template static __dpct_inline__ T op_tanh(T x) { return sycl::tanh(x); } template static __dpct_inline__ T op_relu(T x) { return sycl::fmax(x, static_cast(0)); } template static __dpct_inline__ T op_sigmoid(T x) { return static_cast(1.0f) / (static_cast(1.0f) + sycl::native::exp(-x)); } template static __dpct_inline__ T op_sqrt(T x) { return sycl::sqrt(x); } template static __dpct_inline__ T op_sin(T x) { return sycl::sin(x); } template static __dpct_inline__ T op_cos(T x) { return sycl::cos(x); } template static __dpct_inline__ T op_hardsigmoid(T x) { return sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x + static_cast(3.0f)) / static_cast(6.0f))); } template static __dpct_inline__ T op_hardswish(T x) { return x * sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x + static_cast(3.0f)) / static_cast(6.0f))); } template static __dpct_inline__ T op_exp(T x) { return sycl::exp(x); } template static __dpct_inline__ T op_log(T x) { if (x <= static_cast(0)) { return neg_infinity(); } return sycl::log(x); } template static __dpct_inline__ T op_neg(T x) { return -x; } template static __dpct_inline__ T op_step(T x) { return (x > static_cast(0.0f)) ? static_cast(1.0f) : static_cast(0.0f); } template static __dpct_inline__ T op_leaky_relu(T x, float negative_slope) { T neg_slope_T = static_cast(negative_slope); return sycl::fmax(x, static_cast(0)) + sycl::fmin(x, static_cast(0.0f)) * neg_slope_T; } template static __dpct_inline__ T op_sqr(T x) { return x * x; } template static __dpct_inline__ T op_clamp(T x, float min_val, float max_val) { return x < static_cast(min_val) ? static_cast(min_val) : (x > static_cast(max_val) ? static_cast(max_val) : x); } template static __dpct_inline__ T op_floor(T x) { return sycl::floor(x); } template static __dpct_inline__ T op_ceil(T x) { return sycl::ceil(x); } template static __dpct_inline__ T op_round(T x) { return sycl::round(x); } template static __dpct_inline__ T op_trunc(T x) { return sycl::trunc(x); } template static void unary_op_generic_kernel( const T * x, T * dst, const int k, const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, const size_t nb0, const size_t nb1, const size_t nb2, const size_t nb3, const size_t nbd0, const size_t nbd1, const size_t nbd2, const size_t nbd3, const sycl::nd_item<1> & item_ct1, F func) { (void) ne3; SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t i0 = i % ne0; const int64_t i1 = (i / ne0) % ne1; const int64_t i2 = (i / (ne0*ne1)) % ne2; const int64_t i3 = i / (ne0*ne1*ne2); const char * src_base = (const char *) x; char * dst_base = (char *) dst; const T * srcp = (const T *)(src_base + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3 ); T * dstp = (T *)(dst_base + i0*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3); *dstp = func(*srcp); } } template static void unary_op_sqrt_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_sqrt(x[i]); } } template static void unary_op_sin_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_sin(x[i]); } } template static void unary_op_cos_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_cos(x[i]); } } template static void unary_op_log_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_log(x[i]); } } template static void unary_op_leaky_relu_kernel(const T * x, T * dst, const int k, float negative_slope, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_leaky_relu(x[i], negative_slope); } } template static void unary_op_sqr_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_sqr(x[i]); } } template static void unary_op_clamp_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1, float min_val, float max_val) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_clamp(x[i], min_val, max_val); } } template static void unary_op_floor_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_floor(x[i]); } } template static void unary_op_ceil_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_ceil(x[i]); } } template static void unary_op_round_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_round(x[i]); } } template static void unary_op_trunc_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = op_trunc(x[i]); } } template static void upscale(const T *x, T *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) { int index = item_ct1.get_local_id(0) + item_ct1.get_group(0) * item_ct1.get_local_range(0); if (index >= ne10 * ne11 * ne12 * ne13) { return; } // operation int i10 = index % ne10; int i11 = (index / ne10) % ne11; int i12 = (index / (ne10 * ne11)) % ne12; int i13 = (index / (ne10 * ne11 * ne12)) % ne13; int i00 = static_cast(i10 / sf0); int i01 = static_cast(i11 / sf1); int i02 = static_cast(i12 / sf2); int i03 = static_cast(i13 / sf3); dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } template static void clamp(const T * x, T * dst, const float min, const float max, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = x[i] < static_cast(min) ? static_cast(min) : (x[i] > static_cast(max) ? static_cast(max) : x[i]); } } template static void gated_op_fused_geglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = op_gelu(x[j0]) * g[j1]; } } template static void gated_op_fused_reglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = op_relu(x[j0]) * g[j1]; } } template static void gated_op_fused_swiglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = op_silu(x[j0]) * g[j1]; } } template static void gated_op_fused_geglu_erf(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = op_gelu_erf(x[j0]) * g[j1]; } } template static void gated_op_fused_geglu_quick(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); dst[i] = op_gelu_quick(x[j0]) * g[j1]; } } namespace ggml_sycl_detail { static void acc_f32_sycl(const float *x, const float *y, float *dst, const int n_elements, const int ne10, const int ne11, const int ne12, const int nb1, const int nb2, const int offset, queue_ptr stream) { int num_blocks = ceil_div(n_elements, SYCL_ACC_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_ACC_BLOCK_SIZE), sycl::range<1>(SYCL_ACC_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset, item_ct1); }); } template static void arange_kernel(T * dst, const int k, T start, T step, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { dst[i] = start + static_cast(i) * step; } } template static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, queue_ptr stream) { int dst_size = ne10 * ne11 * ne12 * ne13; int num_blocks = ceil_div(dst_size, SYCL_UPSCALE_BLOCK_SIZE); sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { upscale(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1); }); } template static inline void dispatch_ggml_sycl_op_unary(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); #else GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); #endif GGML_ASSERT(dst->src[0]->type == dst->type); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); switch (dst->type) { #if defined (GGML_SYCL_F16) case GGML_TYPE_F16: { auto data_pts = cast_data(dst); kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { auto data_pts = cast_data(dst); kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward(args)...); break; } default: GGML_ABORT("GGML tensor type not supported!\n"); } } template static inline void dispatch_ggml_sycl_op_fused_glu(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); #else GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); #endif GGML_ASSERT(dst->src[0]->type == dst->type); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2;; GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_is_contiguous_1(dst->src[0])); GGML_ASSERT(ggml_is_contiguous(dst)); const int32_t swapped = ((const int32_t *) dst->op_params)[1]; void * src0_d = src0->data; void * src1_d = src1 ? src1->data : src0->data; const int64_t src0_o = src0->nb[1]; const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; void * dst_d = dst->data; if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); GGML_ASSERT(src1->ne[0] == nc); GGML_ASSERT(src0->type == src1->type); } switch (dst->type) { #if defined (GGML_SYCL_F16) case GGML_TYPE_F16: { sycl::half * src0_p = (sycl::half *) src0_d; sycl::half * src1_p = (sycl::half *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } kernel_invoker(src0_p, src1_p, (sycl::half *) dst_d, ggml_nelements(dst), nc, src0_o / sizeof(sycl::half), src1_o / sizeof(sycl::half), main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { float * src0_p = (float *) src0_d; float * src1_p = (float *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } kernel_invoker(src0_p, src1_p, (float *) dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), main_stream, std::forward(args)...); break; } default: GGML_ABORT("GGML tensor type not supported!\n"); } } template static inline void dispatch_ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); #else GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); #endif GGML_ASSERT(dst->src[0]->type == dst->type); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0]; const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1]; const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2]; const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3]; switch (dst->type) { #if defined (GGML_SYCL_F16) case GGML_TYPE_F16: { auto data_pts = cast_data(dst); kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2], (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3, main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { auto data_pts = cast_data(dst); kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2], (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3, main_stream, std::forward(args)...); break; } default: GGML_ABORT("GGML tensor type not supported!\n"); } } template static inline void ggml_sycl_op_unary( ggml_backend_sycl_context & ctx, ggml_tensor * dst, F func) { ggml_tensor * src0 = dst->src[0]; const int64_t ne0 = dst->ne[0]; const int64_t ne1 = dst->ne[1]; const int64_t ne2 = dst->ne[2]; const int64_t ne3 = dst->ne[3]; const size_t nb0 = src0->nb[0]; const size_t nb1 = src0->nb[1]; const size_t nb2 = src0->nb[2]; const size_t nb3 = src0->nb[3]; const size_t nbd0 = dst->nb[0]; const size_t nbd1 = dst->nb[1]; const size_t nbd2 = dst->nb[2]; const size_t nbd3 = dst->nb[3]; ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [=](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, 256); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), sycl::range<1>(256)), [=](sycl::nd_item<1> item_ct1) { unary_op_generic_kernel( src, dst_ptr, k_elements, ne0, ne1, ne2, ne3, nb0, nb1, nb2, nb3, nbd0, nbd1, nbd2, nbd3, item_ct1, func ); }); }); } static inline void ggml_sycl_op_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->type == GGML_TYPE_F32); float start, stop, step; memcpy(&start, dst->op_params, sizeof(float)); memcpy(&stop, (float *) dst->op_params + 1, sizeof(float)); memcpy(&step, (float *) dst->op_params + 2, sizeof(float)); dpct::queue_ptr stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); float * dst_ptr = (float *)dst->data; const int k = (int)ggml_nelements(dst); const int num_blocks = ceil_div(k, SYCL_ARANGE_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE), sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { arange_kernel(dst_ptr, k, start, step, item_ct1); }); } } // namespace ggml_sycl_detail static inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_sgn(x); }); } static inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_abs(x); }); } static inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_elu(x); }); } static inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_silu(x); }); } static inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_gelu(x); }); } static inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_gelu_quick(x); }); } static inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_gelu_erf(x); }); } static inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_tanh(x); }); } static inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_relu(x); }); } static inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_hardsigmoid(x); }); } static inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_hardswish(x); }); } static inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_exp(x); }); } static inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, SYCL_EXP_BLOCK_SIZE); // Using EXP block size stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_EXP_BLOCK_SIZE), sycl::range<1>(SYCL_EXP_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_log_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_neg(x); }); } static inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_step(x); }); } static inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { return op_sigmoid(x); }); } static inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, SYCL_SQRT_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQRT_BLOCK_SIZE), sycl::range<1>(SYCL_SQRT_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_sqrt_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE), sycl::range<1>(SYCL_SIN_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_sin_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE); // Using SIN block size stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE), sycl::range<1>(SYCL_SIN_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_cos_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float slope) { const int num_blocks = ceil_div(k_elements, SYCL_RELU_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_RELU_BLOCK_SIZE), sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_leaky_relu_kernel(src, dst_ptr, k_elements, slope, item_ct1); }); }, negative_slope); } static inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, SYCL_SQR_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQR_BLOCK_SIZE), sycl::range<1>(SYCL_SQR_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { unary_op_sqr_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_upscale(ctx, dst, [](const auto* src, auto* dst_ptr, int nb00, int nb01, int nb02, int nb03, int ne10, int ne11, int ne12, int ne13, float sf0, float sf1, float sf2, float sf3, queue_ptr stream) { ggml_sycl_detail::upscale_sycl(src, dst_ptr, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, stream); }); } static inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { float min_val; float max_val; memcpy(&min_val, dst->op_params, sizeof(float)); memcpy(&max_val, (float *) dst->op_params + 1, sizeof(float)); ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float min_arg, float max_arg) { const int num_blocks = ceil_div(k_elements, SYCL_CLAMP_BLOCK_SIZE); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE), sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { clamp(src, dst_ptr, min_arg, max_arg, k_elements, item_ct1); }); }, min_val, max_val); } static inline void ggml_sycl_op_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, 256); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), sycl::range<1>(256)), [=](sycl::nd_item<1> item_ct1) { unary_op_floor_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, 256); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), sycl::range<1>(256)), [=](sycl::nd_item<1> item_ct1) { unary_op_ceil_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, 256); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), sycl::range<1>(256)), [=](sycl::nd_item<1> item_ct1) { unary_op_round_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { const int num_blocks = ceil_div(k_elements, 256); stream->parallel_for( sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), sycl::range<1>(256)), [=](sycl::nd_item<1> item_ct1) { unary_op_trunc_kernel(src, dst_ptr, k_elements, item_ct1); }); }); } static inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); const float * src1_dd = static_cast(dst->src[1]->data); float * dst_dd = static_cast(dst->data); int nb1 = dst->op_params[0] / 4; // 4 bytes of float32 int nb2 = dst->op_params[1] / 4; // 4 bytes of float32 // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused int offset = dst->op_params[3] / 4; // offset in bytes ggml_sycl_detail::acc_f32_sycl(src0_dd, src1_dd, dst_dd, (int)ggml_nelements(dst), (int)dst->src[1]->ne[0], (int)dst->src[1]->ne[1], (int)dst->src[1]->ne[2], nb1, nb2, offset, main_stream); } static inline void ggml_sycl_op_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); main_stream->parallel_for( sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { gated_op_fused_geglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); }); }); } static inline void ggml_sycl_op_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_RELU_BLOCK_SIZE); // Using RELU block size for reglu main_stream->parallel_for( sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { gated_op_fused_reglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); }); }); } static inline void ggml_sycl_op_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_SILU_BLOCK_SIZE); // Using SILU block size for swiglu main_stream->parallel_for( sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { gated_op_fused_swiglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); }); }); } __dpct_inline__ float ggml_sycl_op_swiglu_oai_single(float x, float g, float alpha = 1.702f, float limit = 7.0f) { x = sycl::fmin(x, limit); g = sycl::fmax(sycl::fmin(g, limit), -limit); float out_glu = x / (1.0f + sycl::native::exp(-x * alpha)); out_glu = out_glu * (1.0f + g); return out_glu; } template static void swiglu_oai_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, float alpha, float limit, sycl::nd_item<3> item_ct1) { const int64_t i = int64_t(item_ct1.get_local_range(2)) * item_ct1.get_group(2) + item_ct1.get_local_id(2); if (i >= k) { return; } const int64_t j0 = (i / n) * o0 + (i % n); const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); float xi = x[j0]; float gi = g[j1]; dst[i] = ggml_sycl_op_swiglu_oai_single(xi, gi, alpha, limit); } template static void swiglu_oai_sycl(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, const float alpha, const float limit, dpct::queue_ptr stream) { const int64_t num_blocks = (k + SYCL_GLU_BLOCK_SIZE - 1) / SYCL_GLU_BLOCK_SIZE; stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { swiglu_oai_kernel(x, g, dst, k, n, o0, o1, alpha, limit, item_ct1); }); } void ggml_sycl_op_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; void * src0_d = src0->data; void * src1_d = src1 ? src1->data : src0->data; const int64_t src0_o = src0->nb[1]; const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; void * dst_d = dst->data; const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; dpct::queue_ptr stream = ctx.stream(); GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->type == dst->type); GGML_ASSERT(dst->ne[0] == nc); GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); GGML_ASSERT(src1->ne[0] == nc); GGML_ASSERT(src0->type == src1->type); } //const int32_t swapped = ((const int32_t *) dst->op_params)[1]; const int32_t swapped = ggml_get_op_params_i32(dst, 1); const float alpha = ggml_get_op_params_f32(dst, 2); const float limit = ggml_get_op_params_f32(dst, 3); float * src0_p = (float *) src0_d; float * src1_p = (float *) src1_d; if (!src1) { src0_p += swapped ? nc : 0; src1_p += swapped ? 0 : nc; } swiglu_oai_sycl(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream); } static inline void ggml_sycl_op_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); main_stream->parallel_for( sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { gated_op_fused_geglu_erf(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); }); }); } static inline void ggml_sycl_op_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); main_stream->parallel_for( sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { gated_op_fused_geglu_quick(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); }); }); } void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_sqrt(ctx, dst); } void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_sin(ctx, dst); } void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_cos(ctx, dst); } void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2); ggml_sycl_op_acc(ctx, dst); } void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_gelu(ctx, dst); } void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_silu(ctx, dst); } void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_gelu_quick(ctx, dst); } void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_gelu_erf(ctx, dst); } void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_tanh(ctx, dst); } void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_relu(ctx, dst); } void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_sigmoid(ctx, dst); } void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_hardsigmoid(ctx, dst); } void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_hardswish(ctx, dst); } void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_exp(ctx, dst); } void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_log(ctx, dst); } void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_neg(ctx, dst); } void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_step(ctx, dst); } void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_leaky_relu(ctx, dst); } void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_sqr(ctx, dst); } void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_upscale(ctx, dst); } void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_clamp(ctx, dst); } void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_sgn(ctx, dst); } void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_abs(ctx, dst); } void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_elu(ctx, dst); } void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_geglu(ctx, dst); } void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_reglu(ctx, dst); } void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_swiglu(ctx, dst); } void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_swiglu_oai(ctx, dst); } void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_geglu_erf(ctx, dst); } void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_geglu_quick(ctx, dst); } void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/0); ggml_sycl_detail::ggml_sycl_op_arange(ctx, dst); } void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_floor(ctx, dst); } void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_ceil(ctx, dst); } void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_round(ctx, dst); } void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_trunc(ctx, dst); } ggml-org-ggml-3678254/src/ggml-sycl/element_wise.hpp000066400000000000000000000064311512524704700222460ustar00rootroot00000000000000#ifndef GGML_SYCL_ELEMENTWISE_HPP #define GGML_SYCL_ELEMENTWISE_HPP #include "common.hpp" #include "ggml.h" #include // For std::numeric_limits #define SYCL_GLU_BLOCK_SIZE 256 template T neg_infinity() { return -std::numeric_limits::infinity(); } template struct typed_data { const T_Src * src; T_Dst * dst; }; template typed_data cast_data(ggml_tensor * dst) { return { /* .src = */ static_cast(dst->src[0]->data), /* .dst = */ static_cast(dst->data) }; } const float GELU_QUICK_COEF = -1.702f; void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_ELEMENTWISE_HPP ggml-org-ggml-3678254/src/ggml-sycl/gemm.hpp000066400000000000000000000061361512524704700205150ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_GEMM_HPP #define GGML_SYCL_GEMM_HPP #include "ggml-sycl.h" #if GGML_SYCL_DNNL #include "dnnl.hpp" #include "dnnl_sycl.hpp" class DnnlGemmWrapper { public: using dt = dnnl::memory::data_type; using tag = dnnl::memory::format_tag; template static constexpr dt to_dt() { if constexpr (std::is_same_v) return dt::f32; else if constexpr (std::is_same_v) return dt::f16; else static_assert(0); } static void gemm(ggml_backend_sycl_context & ctx, int m, int n, int k, const void * a, dt at, dnnl_dim_t stra0, dnnl_dim_t stra1, dnnl_dim_t stra2, const void * b, dt bt, dnnl_dim_t strb0, dnnl_dim_t strb1, dnnl_dim_t strb2, void * c, dt ct, const queue_ptr & q, dnnl_dim_t batches_a, dnnl_dim_t batches_b) { auto stream = ctx.stream_dnnl(q); auto eng = ctx.engine_dnnl(q); dnnl::memory::dims a_dims = {batches_a, m, k }; dnnl::memory::dims a_strides = {stra2, stra1, stra0}; const auto a_in_md = dnnl::memory::desc(a_dims, at, a_strides); dnnl::memory::dims b_dims = {batches_b, k, n }; dnnl::memory::dims b_strides = {strb2, strb0, strb1}; const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_strides); dnnl::memory::dims c_dims = { std::max(batches_a, batches_b), m, n}; dnnl::memory::dims c_strides = {m*n, 1, m }; const auto c_md = dnnl::memory::desc(c_dims, ct, c_strides); dnnl::primitive_attr primitive_attr; primitive_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user); #ifdef GGML_SYCL_F16 primitive_attr.set_fpmath_mode(dnnl::fpmath_mode::f16); #endif auto a_mem = dnnl::memory(a_in_md, eng, const_cast(a)); auto b_mem = dnnl::memory(b_in_md, eng, const_cast(b)); auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md, primitive_attr); auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c); auto scratchpad_md = matmul_pd.scratchpad_desc(); auto scratchpad_mem = ctx.get_scratchpad_mem(scratchpad_md, eng, q); auto matmul_prim = dnnl::matmul(matmul_pd); std::unordered_map matmul_args; matmul_args.insert({ DNNL_ARG_SRC, a_mem }); matmul_args.insert({ DNNL_ARG_WEIGHTS, b_mem }); matmul_args.insert({ DNNL_ARG_DST, c_mem }); matmul_args.insert({ DNNL_ARG_SCRATCHPAD, scratchpad_mem }); matmul_prim.execute(stream, matmul_args); } static void row_gemm(ggml_backend_sycl_context & ctx, int m, int n, int k, const void * a, dt at, const void * b, dt bt, void * c, dt ct, const queue_ptr & q) { gemm(ctx, m, n, k, a, at, 1, k, k * m, b, bt, 1, k, n * k, c, ct, q, 1, 1); } }; #endif #endif // GGML_SYCL_GEMM_HPP ggml-org-ggml-3678254/src/ggml-sycl/getrows.cpp000066400000000000000000000213221512524704700212470ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #include "ggml-impl.h" #include "common.hpp" #include "dequantize.hpp" #include "getrows.hpp" template static void k_get_rows( const void * src0, const int32_t * src1, dst_t * dst, int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ /*size_t s0,*/ size_t s1, size_t s2, size_t s3, /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, size_t s10, size_t s11, size_t s12, const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2)) * 2; const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1); const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + item_ct1.get_local_id(0)) / ne12; const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + item_ct1.get_local_id(0)) % ne12; if (i00 >= ne00) { return; } const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03; const int ib = i00/qk; // block index const int iqs = (i00%qk)/qr; // quant index const int iybs = i00 - i00%qk; // dst block start index const int y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(src0_row, ib, iqs, v); dst_row[iybs + iqs + 0] = v.x(); dst_row[iybs + iqs + y_offset] = v.y(); } template static void k_get_rows_float( const src0_t * src0, const int32_t * src1, dst_t * dst, int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ /*size_t s0,*/ size_t s1, size_t s2, size_t s3, /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, size_t s10, size_t s11, size_t s12, const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2); const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1); const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + item_ct1.get_local_id(0)) / ne12; const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + item_ct1.get_local_id(0)) % ne12; if (i00 >= ne00) { return; } const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03); dst_row[i00] = src0_row[i00]; } template static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const void *src0_dd, const int32_t *src1_dd, float *dst_dd, queue_ptr stream) { GGML_TENSOR_BINARY_OP_LOCALS const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE); const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); // strides in elements //const size_t s0 = nb0 / ggml_element_size(dst); const size_t s1 = nb1 / ggml_element_size(dst); const size_t s2 = nb2 / ggml_element_size(dst); const size_t s3 = nb3 / ggml_element_size(dst); const size_t s10 = nb10 / ggml_element_size(src1); const size_t s11 = nb11 / ggml_element_size(src1); const size_t s12 = nb12 / ggml_element_size(src1); //const size_t s13 = nb13 / ggml_element_size(src1); GGML_ASSERT(ne00 % 2 == 0); stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_get_rows( src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); }); GGML_UNUSED(dst); GGML_UNUSED(ctx); } template static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const src0_t *src0_dd, const int32_t *src1_dd, float *dst_dd, queue_ptr stream) { GGML_TENSOR_BINARY_OP_LOCALS const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE; const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); // strides in elements //const size_t s0 = nb0 / ggml_element_size(dst); const size_t s1 = nb1 / ggml_element_size(dst); const size_t s2 = nb2 / ggml_element_size(dst); const size_t s3 = nb3 / ggml_element_size(dst); const size_t s10 = nb10 / ggml_element_size(src1); const size_t s11 = nb11 / ggml_element_size(src1); const size_t s12 = nb12 / ggml_element_size(src1); //const size_t s13 = nb13 / ggml_element_size(src1); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); }); } GGML_UNUSED(dst); GGML_UNUSED(ctx); } void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(dst->src[0]->nb[0] == ggml_type_size(dst->src[0]->type)); GGML_ASSERT(dst->src[1]->nb[0] == ggml_type_size(dst->src[1]->type)); GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); const int32_t * src1_i32 = (const int32_t *) dst->src[1]->data; /* TODO: Refactor and remove duplicates */ switch (dst->src[0]->type) { case GGML_TYPE_F16: get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const sycl::half *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_F32: get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q4_0: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q4_1: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q5_0: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q5_1: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q8_0: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, src1_i32, (float *)dst->data, ctx.stream()); break; default: // TODO: k-quants GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(dst->src[0]->type)); GGML_ABORT("fatal error"); } } ggml-org-ggml-3678254/src/ggml-sycl/getrows.hpp000066400000000000000000000007621512524704700212610ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #ifndef GGML_SYCL_GETROWS_HPP #define GGML_SYCL_GETROWS_HPP #include "common.hpp" void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor *dst); #endif // GGML_SYCL_GETROWS_HPP ggml-org-ggml-3678254/src/ggml-sycl/ggml-sycl.cpp000066400000000000000000005771251512524704700214740ustar00rootroot00000000000000// // MIT license // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: MIT // // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC # include #endif #include #include "ggml-sycl.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-sycl/add-id.hpp" #include "ggml-sycl/backend.hpp" #include "ggml-sycl/common.hpp" #include "ggml-sycl/element_wise.hpp" #include "ggml-sycl/norm.hpp" #include "ggml-sycl/presets.hpp" #include "ggml-sycl/gemm.hpp" #include "ggml-sycl/set_rows.hpp" #include "ggml-sycl/set.hpp" #include "ggml-sycl/sycl_hw.hpp" #include "ggml-sycl/getrows.hpp" #include "ggml-sycl/repeat_back.hpp" #include "ggml-sycl/quantize.hpp" #include "ggml-sycl/ssm_conv.hpp" #include "ggml.h" static bool g_sycl_loaded = false; int g_ggml_sycl_debug = 0; int g_ggml_sycl_disable_optimize = 0; int g_ggml_sycl_disable_graph = 0; int g_ggml_sycl_disable_dnn = 0; int g_ggml_sycl_prioritize_dmmv = 0; int g_ggml_sycl_use_async_mem_op = 0; static ggml_sycl_device_info ggml_sycl_init() { ggml_sycl_device_info info = {}; info.device_count = dpct::dev_mgr::instance().device_count(); if (info.device_count == 0) { GGML_LOG_ERROR("%s: failed to initialize: %s\n", GGML_SYCL_NAME, __func__); return info; } GGML_ASSERT(info.device_count <= GGML_SYCL_MAX_DEVICES); int64_t total_vram = 0; /* This is a bit misleading; reserved for later */ // #if defined(SYCL_USE_XMX) // GGML_LOG_INFO("%s: SYCL_USE_XMX: yes\n", __func__); // #else // GGML_LOG_INFO("%s: SYCL_USE_XMX: no\n", __func__); // #endif for (int i = 0; i < info.device_count; ++i) { info.devices[i].vmm = 0; dpct::device_info prop; sycl::device device = dpct::dev_mgr::instance().get_device(i); SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info( prop, device))); info.default_tensor_split[i] = total_vram; total_vram += prop.get_global_mem_size(); info.devices[i].cc = 100 * prop.get_major_version() + 10 * prop.get_minor_version(); info.devices[i].nsm = prop.get_max_compute_units(); info.devices[i].opt_feature.reorder = device.ext_oneapi_architecture_is(syclex::arch_category::intel_gpu); info.devices[i].smpbo = prop.get_local_mem_size(); info.max_work_group_sizes[i] = prop.get_max_work_group_size(); } for (int id = 0; id < info.device_count; ++id) { info.default_tensor_split[id] /= total_vram; } return info; } const ggml_sycl_device_info & ggml_sycl_info() { static ggml_sycl_device_info info = ggml_sycl_init(); return info; } static void print_device_detail(int id, sycl::device &device, std::string device_type) { dpct::device_info prop; SYCL_CHECK(CHECK_TRY_ERROR( dpct::get_device_info(prop, device))); std::string version; version += std::to_string(prop.get_major_version()); version += "."; version += std::to_string(prop.get_minor_version()); device_type = std::regex_replace(device_type, std::regex("ext_oneapi_"), ""); std::string name = std::string(prop.get_name()); name = std::regex_replace(name, std::regex("\\(R\\)"), ""); name = std::regex_replace(name, std::regex("\\(TM\\)"), ""); auto global_mem_size = prop.get_global_mem_size()/1000000; GGML_LOG_INFO("|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|\n", id, device_type.c_str(), name.c_str(), version.c_str(), prop.get_max_compute_units(), prop.get_max_work_group_size(), prop.get_max_sub_group_size(), global_mem_size, device.get_info().c_str()); } static void print_device_opt_feature(int device_count) { GGML_LOG_INFO("SYCL Optimization Feature:\n"); GGML_LOG_INFO( "|ID| Device Type|Reorder|\n"); GGML_LOG_INFO( "|--|-------------------|-------|\n"); std::map DeviceNums; for (int id = 0; id < device_count; ++id) { sycl::device device = dpct::dev_mgr::instance().get_device(id); std::string backend_type = get_device_backend_and_type(device); int type_id = DeviceNums[backend_type]++; std::stringstream device_type; device_type << "[" << backend_type << ":" << std::to_string(type_id) << "]"; std::string device_type_s = device_type.str(); device_type_s = std::regex_replace(device_type_s, std::regex("ext_oneapi_"), ""); GGML_LOG_INFO("|%2d|%19s|%7s|\n", id, device_type_s.c_str(), ggml_sycl_info().devices[id].opt_feature.reorder ? "Y": "N"); } } void ggml_backend_sycl_print_sycl_devices() { GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_print_sycl_devices\n"); int device_count = dpct::dev_mgr::instance().device_count(); std::map DeviceNums; GGML_LOG_INFO("Found %d SYCL devices:\n", device_count); GGML_LOG_INFO( "| | | | " " |Max | |Max |Global | |\n"); GGML_LOG_INFO( "| | | | " " |compute|Max work|sub |mem | |\n"); GGML_LOG_INFO( "|ID| Device Type| " "Name|Version|units |group |group|size | Driver version|\n"); GGML_LOG_INFO( "|--|-------------------|---------------------------------------|------" "-|-------|--------|-----|-------|---------------------|\n"); for (int id = 0; id < device_count; ++id) { sycl::device device = dpct::dev_mgr::instance().get_device(id); std::string backend_type = get_device_backend_and_type(device); int type_id = DeviceNums[backend_type]++; std::stringstream device_type; device_type << "[" << backend_type << ":" << std::to_string(type_id) << "]"; print_device_detail(id, device, device_type.str()); } print_device_opt_feature(device_count); } static inline int get_sycl_env(const char *env_name, int default_val) { char *user_device_string = getenv(env_name); int user_number = default_val; unsigned n; if (user_device_string != NULL && sscanf(user_device_string, " %u", &n) == 1) { user_number = (int)n; } else { user_number = default_val; } return user_number; } static void ggml_check_sycl() try { static bool initialized = false; if (!initialized) { g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0); g_ggml_sycl_disable_optimize = get_sycl_env("GGML_SYCL_DISABLE_OPT", 0); g_ggml_sycl_disable_graph = get_sycl_env("GGML_SYCL_DISABLE_GRAPH", 1); g_ggml_sycl_disable_dnn = get_sycl_env("GGML_SYCL_DISABLE_DNN", 0); g_ggml_sycl_prioritize_dmmv = get_sycl_env("GGML_SYCL_PRIORITIZE_DMMV", 0); GGML_SYCL_DEBUG("[SYCL] call ggml_check_sycl\n"); GGML_LOG_INFO("Running with Environment Variables:\n"); GGML_LOG_INFO(" GGML_SYCL_DEBUG: %d\n", g_ggml_sycl_debug); GGML_LOG_INFO(" GGML_SYCL_DISABLE_OPT: %d\n", g_ggml_sycl_disable_optimize); #ifdef GGML_SYCL_GRAPH GGML_LOG_INFO(" GGML_SYCL_DISABLE_GRAPH: %d\n", g_ggml_sycl_disable_graph); #else GGML_LOG_INFO(" GGML_SYCL_DISABLE_GRAPH: graph disabled by compile flag\n"); #endif #if GGML_SYCL_DNNL GGML_LOG_INFO(" GGML_SYCL_DISABLE_DNN: %d\n", g_ggml_sycl_disable_dnn); #else GGML_LOG_INFO(" GGML_SYCL_DISABLE_DNN: DNN disabled by compile flag\n"); #endif GGML_LOG_INFO(" GGML_SYCL_PRIORITIZE_DMMV: %d\n", g_ggml_sycl_prioritize_dmmv); GGML_LOG_INFO("Build with Macros:\n"); #if defined(GGML_SYCL_FORCE_MMQ) GGML_LOG_INFO(" GGML_SYCL_FORCE_MMQ: yes\n"); #else GGML_LOG_INFO(" GGML_SYCL_FORCE_MMQ: no\n"); #endif #if defined(GGML_SYCL_F16) GGML_LOG_INFO(" GGML_SYCL_F16: yes\n"); #else GGML_LOG_INFO(" GGML_SYCL_F16: no\n"); #endif /* NOT REMOVE, keep it for next optimize for XMX. #if defined(SYCL_USE_XMX) fprintf(stderr, "%s: SYCL_USE_XMX: yes\n", __func__); #else fprintf(stderr, "%s: SYCL_USE_XMX: no\n", __func__); #endif */ // Currently, we only use async malloc / free when graphs are enabled as it is required for the calls to be // properly recorded. As this SYCL extension matures it may be beneficial to enable as the default path and in // other places. #if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC g_ggml_sycl_use_async_mem_op = !g_ggml_sycl_disable_graph; if (g_ggml_sycl_use_async_mem_op) { for (unsigned int i = 0; i < dpct::dev_mgr::instance().device_count(); ++i) { if (!dpct::dev_mgr::instance().get_device(i).has(sycl::aspect::ext_oneapi_async_memory_alloc)) { g_ggml_sycl_use_async_mem_op = 0; break; } } } #endif if (CHECK_TRY_ERROR(g_all_sycl_device_count = dpct::dev_mgr::instance().device_count()) != 0) { initialized = true; g_sycl_loaded = false; return; } GGML_ASSERT(g_all_sycl_device_count <= GGML_SYCL_MAX_DEVICES); initialized = true; g_sycl_loaded = true; ggml_backend_sycl_print_sycl_devices(); } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } /* device_index: device index from 0 to n (continue numbers). It is used for device select/set in SYCL backend internal data structure. */ inline void check_allow_gpu_index(const int device_index) { if (device_index >= ggml_sycl_info().device_count) { char error_buf[256]; snprintf( error_buf, sizeof(error_buf), "%s error: device_index:%d is out of range: [0-%d]", __func__, device_index, ggml_sycl_info().device_count - 1); GGML_LOG_ERROR("%s\n", error_buf); assert(false); } } GGML_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len) try { GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_gpu_list\n"); for(int i=0;i=max_len) break; id_list[i] = i; } return; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } // sycl buffer struct ggml_backend_sycl_buffer_context { int device; void * dev_ptr = nullptr; queue_ptr stream; std::string name; optimize_feature opt_feature; std::vector tensor_extras; ggml_backend_sycl_buffer_context(int device, void * dev_ptr, queue_ptr stream) : device(device), dev_ptr(dev_ptr), stream(stream) { check_allow_gpu_index(device); name = (GGML_SYCL_NAME + std::to_string(device)); opt_feature = ggml_sycl_info().devices[device].opt_feature; } ~ggml_backend_sycl_buffer_context() { if (dev_ptr != nullptr) { ggml_sycl_set_device(device); SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(dev_ptr, *stream))); } //release extra used by tensors for (ggml_tensor_extra_gpu * extra : tensor_extras) { release_extra_gpu(extra); } } }; static const char * ggml_backend_sycl_buffer_type_get_name(ggml_backend_buffer_type_t buft); static bool ggml_backend_buffer_is_sycl(ggml_backend_buffer_t buffer) { return buffer->buft->iface.get_name == ggml_backend_sycl_buffer_type_get_name; } static void ggml_backend_sycl_buffer_free_buffer(ggml_backend_buffer_t buffer) try { ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; ggml_sycl_set_device(ctx->device); delete ctx; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void * ggml_backend_sycl_buffer_get_base(ggml_backend_buffer_t buffer) { ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; return ctx->dev_ptr; } static enum ggml_status ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str()); ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context; if (tensor->view_src != NULL) { assert(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; } if ((tensor->type == GGML_TYPE_Q4_0 || tensor->type == GGML_TYPE_Q4_K || tensor->type == GGML_TYPE_Q6_K) && !g_ggml_sycl_disable_optimize) { ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; tensor->extra = extra; ctx->tensor_extras.push_back(extra); //used to release it when destroy ctx. } if (ggml_is_quantized(tensor->type)) { // initialize padding to 0 to avoid possible NaN values size_t original_size = ggml_nbytes(tensor); size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor); if (padded_size > original_size && tensor->view_src == nullptr) { SYCL_CHECK(CHECK_TRY_ERROR(ctx->stream->memset( (char *)tensor->data + original_size, 0, padded_size - original_size).wait())); } } return GGML_STATUS_SUCCESS; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; ggml_sycl_set_device(ctx->device); auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue()); SYCL_CHECK(CHECK_TRY_ERROR(dpct::dev_mgr::instance().get_device(ctx->device).queues_wait_and_throw())); #ifndef _WIN32 // Note: Use host buffer to save the data from mmap(), then copy to device. It's workaround for mmap() issue on PVC GPU. // This function will be called during load model from disk. Use memory buffer replace dynamic won't save more time and brings potential memory leak risk here. char * host_buf = (char *) malloc(size); memcpy(host_buf, data, size); SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, host_buf, size).wait())); free(host_buf); #else SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, data, size).wait())); #endif } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor *tensor, void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; ggml_sycl_set_device(ctx->device); auto stream = dpct::dev_mgr::instance().get_device(ctx->device).default_queue(); SYCL_CHECK(CHECK_TRY_ERROR( stream.memcpy(data, (const char *)tensor->data + offset, size) .wait())); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void dev2dev_memcpy(sycl::queue &q_dst, sycl::queue &q_src, void *ptr_dst, const void *ptr_src, size_t size) { char *host_buf = (char *)malloc(size); q_src.memcpy(host_buf, (const char *)ptr_src, size).wait(); q_dst.memcpy((char *)ptr_dst, host_buf, size).wait(); free(host_buf); } static bool ggml_backend_sycl_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor *src, ggml_tensor *dst) try { bool is_cpy_supported = ggml_backend_buffer_is_sycl(src->buffer); GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": dst", dst).c_str()); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" src", src).c_str()); GGML_SYCL_DEBUG(" is_cpy_supported=%d\n", is_cpy_supported); if (is_cpy_supported) { ggml_backend_sycl_buffer_context * src_ctx = (ggml_backend_sycl_buffer_context *)src->buffer->context; ggml_backend_sycl_buffer_context * dst_ctx = (ggml_backend_sycl_buffer_context *)dst->buffer->context; ggml_sycl_set_device(src_ctx->device); /* DPCT1009:198: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK(CHECK_TRY_ERROR( dpct::dev_mgr::instance().get_device(src_ctx->device).queues_wait_and_throw())); ggml_sycl_set_device(dst_ctx->device); /* DPCT1009:199: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK(CHECK_TRY_ERROR( dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw())); /* DPCT1009:200: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ queue_ptr stream_dst = dst_ctx->stream; queue_ptr stream_src = src_ctx->stream; size_t size = ggml_nbytes(src); //todo. it's dirty solutino to walkaroud known issue:device2device cross GPUs. dev2dev_memcpy(*stream_dst, *stream_src, dst->data, src->data, size); //todo, it's known issue:error in device2device cross GPUs. reused when the issue is fixed. DON"T remove #if 0 SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy( (char *)dst->data, (const char *)src->data, size).wait())); /* DPCT1009:201: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK(CHECK_TRY_ERROR( dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw())); #endif return true; } return false; GGML_UNUSED(buffer); } catch (const sycl::exception & exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) try { GGML_SYCL_DEBUG("[SYCL] call %s: size=%zu\n", __func__, buffer->size); ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context; ggml_sycl_set_device(ctx->device); queue_ptr stream = ctx->stream; SYCL_CHECK( CHECK_TRY_ERROR(dpct::get_current_device().queues_wait_and_throw())); SYCL_CHECK(CHECK_TRY_ERROR((*stream) .memset(ctx->dev_ptr, value, buffer->size) .wait())); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu value=%u\n", size, offset, value); ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context; SYCL_CHECK(ggml_sycl_set_device(ctx->device)); auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue()); if (size == 0) { return; // Nothing to do } if (tensor->data == nullptr) { GGML_ABORT("Error: Tensor data pointer is null.\n"); } void * target_ptr = static_cast(tensor->data) + offset; SYCL_CHECK(CHECK_TRY_ERROR((*stream).memset(target_ptr, value, size))); SYCL_CHECK(CHECK_TRY_ERROR((*stream).wait())); } static void ggml_backend_sycl_buffer_reset(ggml_backend_buffer_t buffer) { GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__); if (buffer == nullptr) { return; } ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context; if (ctx != nullptr) { for (ggml_tensor_extra_gpu * extra : ctx->tensor_extras) { release_extra_gpu(extra); } ctx->tensor_extras.clear(); // reset the tensor_extras vector } } static const ggml_backend_buffer_i ggml_backend_sycl_buffer_interface = { /* .free_buffer = */ ggml_backend_sycl_buffer_free_buffer, /* .get_base = */ ggml_backend_sycl_buffer_get_base, /* .init_tensor = */ ggml_backend_sycl_buffer_init_tensor, /* .memset_tensor = */ ggml_backend_sycl_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_sycl_buffer_set_tensor, /* .get_tensor = */ ggml_backend_sycl_buffer_get_tensor, /* .cpy_tensor = */ ggml_backend_sycl_buffer_cpy_tensor, /* .clear = */ ggml_backend_sycl_buffer_clear, /* .reset = */ ggml_backend_sycl_buffer_reset, }; // sycl buffer type struct ggml_backend_sycl_buffer_type_context { int device; std::string name; // each buffer type has its own stream queue_ptr stream = nullptr; }; static const char * ggml_backend_sycl_buffer_type_get_name(ggml_backend_buffer_type_t buft) { ggml_backend_sycl_buffer_type_context * ctx = (ggml_backend_sycl_buffer_type_context *)buft->context; return ctx->name.c_str(); } static ggml_backend_buffer_t ggml_backend_sycl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) try { ggml_backend_sycl_buffer_type_context * buft_ctx = (ggml_backend_sycl_buffer_type_context *)buft->context; ggml_sycl_set_device(buft_ctx->device); const queue_ptr stream = buft_ctx->stream; size = std::max(size, (size_t)1); // syclMalloc returns null for size 0 void * dev_ptr; SYCL_CHECK(CHECK_TRY_ERROR(dev_ptr = (void *)sycl::malloc_device( size, *stream))); if (!dev_ptr) { GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on device\n", __func__, size); return nullptr; } ggml_backend_sycl_buffer_context * ctx = new ggml_backend_sycl_buffer_context(buft_ctx->device, dev_ptr, buft_ctx->stream); return ggml_backend_buffer_init(buft, ggml_backend_sycl_buffer_interface, ctx, size); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static size_t ggml_backend_sycl_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); } static size_t ggml_backend_sycl_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { return dpct::get_current_device().get_max_mem_alloc_size(); GGML_UNUSED(buft); } static size_t ggml_backend_sycl_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { size_t size = ggml_nbytes(tensor); int64_t ne0 = tensor->ne[0]; if (ggml_is_quantized(tensor->type)) { if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } } return size; GGML_UNUSED(buft); } static const ggml_backend_buffer_type_i ggml_backend_sycl_buffer_type_interface = { /* .get_name = */ ggml_backend_sycl_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_sycl_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_sycl_buffer_type_get_alignment, /* .get_max_size = */ ggml_backend_sycl_buffer_type_get_max_size, /* .get_alloc_size = */ ggml_backend_sycl_buffer_type_get_alloc_size, /* .is_host = */ NULL, }; ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) { static std::mutex mutex; std::lock_guard lock(mutex); auto dev_count = ggml_backend_sycl_get_device_count(); if (device>=dev_count or device<0) { GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n", device, dev_count-1); GGML_ASSERT(devicedevice; if (device>=ggml_sycl_info().device_count or device<0) { GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n", device, ggml_sycl_info().device_count-1); GGML_ASSERT(devicestream(i, 0)}, }; } ggml_backend_sycl_buffer_type_initialized = true; } return &ggml_backend_sycl_buffer_types[device]; } // sycl split buffer static int64_t get_row_rounding(ggml_type type, const std::array & tensor_split) { int64_t min_compute_capability = INT_MAX; int64_t max_compute_capability = INT_MIN; for (int i = 0; i < ggml_sycl_info().device_count; ++i) { if (tensor_split[i] < (i + 1 < ggml_sycl_info().device_count ? tensor_split[i + 1] : 1.0f)) { if (min_compute_capability > ggml_sycl_info().devices[i].cc) { min_compute_capability = ggml_sycl_info().devices[i].cc; } if (max_compute_capability < ggml_sycl_info().devices[i].cc) { max_compute_capability = ggml_sycl_info().devices[i].cc; } } } switch(type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: return max_compute_capability >= VER_GEN9 ? 128 : 64; case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: return 64; case GGML_TYPE_F16: case GGML_TYPE_F32: return 1; case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ1_S: case GGML_TYPE_IQ1_M: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: return max_compute_capability >= VER_GEN9 ? 128 : 64; case GGML_TYPE_IQ3_S: return max_compute_capability >= VER_GEN9 ? 128 : 64; case GGML_TYPE_Q6_K: return 64; default: GGML_ABORT("fatal error"); } } static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array & tensor_split, int id) { const int64_t nrows = ggml_nrows(tensor); const int64_t rounding = get_row_rounding(tensor->type, tensor_split); *row_low = id == 0 ? 0 : nrows*tensor_split[id]; *row_low -= *row_low % rounding; if (id == ggml_sycl_info().device_count - 1) { *row_high = nrows; } else { *row_high = nrows*tensor_split[id + 1]; *row_high -= *row_high % rounding; } } static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]); } struct ggml_backend_sycl_split_buffer_type_context { std::array tensor_split; }; struct ggml_backend_sycl_split_buffer_context { ~ggml_backend_sycl_split_buffer_context() try { for (ggml_tensor_extra_gpu * extra : tensor_extras) { release_extra_gpu(extra, streams); } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } std::vector tensor_extras; std::vector streams; }; static void ggml_backend_sycl_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; delete ctx; } static void * ggml_backend_sycl_split_buffer_get_base(ggml_backend_buffer_t buffer) { // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced return (void *)0x1000; GGML_UNUSED(buffer); } static enum ggml_status ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str()); GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; ctx->tensor_extras.push_back(extra); ctx->streams.push_back(&(dpct::get_current_device().default_queue())); for (int i = 0; i < ggml_sycl_info().device_count; ++i) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } // FIXME: do not crash if SYCL Buffer alloc fails // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first ggml_sycl_set_device(i); const queue_ptr stream = ctx->streams[i]; char * buf; /* DPCT1009:208: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK(CHECK_TRY_ERROR(buf = (char *)sycl::malloc_device( size, *stream))); if (!buf) { char err_buf[1024]; snprintf(err_buf, 1023, "%s: can't allocate %lu Bytes of memory on device\n", __func__, size); throw std::runtime_error(err_buf); } // set padding to 0 to avoid possible NaN values if (size > original_size) { /* DPCT1009:209: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK(CHECK_TRY_ERROR( (*stream) .memset(buf + original_size, 0, size - original_size) .wait())); } extra->data_device[i] = buf; for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) { /* DPCT1009:210: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ SYCL_CHECK( CHECK_TRY_ERROR(extra->events[i][is] = new sycl::event())); } } tensor->extra = extra; return GGML_STATUS_SUCCESS; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; const size_t nb1 = tensor->nb[1]; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; for (int i = 0; i < ggml_sycl_info().device_count; ++i) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } const size_t offset_split = row_low*nb1; size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } const char * buf_host = (const char *)data + offset_split; /* DPCT1009:211: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ ggml_sycl_set_device(i); const queue_ptr stream = ctx->streams[i]; SYCL_CHECK(CHECK_TRY_ERROR( (*stream) .memcpy(extra->data_device[i], buf_host, original_size) .wait())); } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor *tensor, void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); GGML_ASSERT(size == ggml_nbytes(tensor)); ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context; const int64_t ne0 = tensor->ne[0]; const size_t nb1 = tensor->nb[1]; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; for (int i = 0; i < ggml_sycl_info().device_count; ++i) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } const size_t offset_split = row_low*nb1; size_t size = ggml_nbytes_split(tensor, nrows_split); const size_t original_size = size; // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } char * buf_host = (char *)data + offset_split; /* DPCT1009:212: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ ggml_sycl_set_device(i); const queue_ptr stream = ctx->streams[i]; SYCL_CHECK(CHECK_TRY_ERROR( (*stream) .memcpy(buf_host, extra->data_device[i], original_size) .wait())); } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_backend_sycl_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { GGML_UNUSED(buffer); GGML_UNUSED(value); } static struct ggml_backend_buffer_i ggml_backend_sycl_split_buffer_interface = { /* .free_buffer = */ ggml_backend_sycl_split_buffer_free_buffer, /* .get_base = */ ggml_backend_sycl_split_buffer_get_base, /* .init_tensor = */ ggml_backend_sycl_split_buffer_init_tensor, /* .memset_tensor = */ NULL, /* .set_tensor = */ ggml_backend_sycl_split_buffer_set_tensor, /* .get_tensor = */ ggml_backend_sycl_split_buffer_get_tensor, /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_sycl_split_buffer_clear, /* .reset = */ NULL, }; // sycl split buffer type static const char * ggml_backend_sycl_split_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return GGML_SYCL_NAME "_Split"; GGML_UNUSED(buft); } static bool ggml_backend_buffer_is_sycl_split(ggml_backend_buffer_t buffer) { return buffer->buft->iface.get_name == ggml_backend_sycl_split_buffer_type_get_name; } static ggml_backend_buffer_t ggml_backend_sycl_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point // instead, we allocate them for each tensor separately in init_tensor // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated, // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct. ggml_backend_sycl_split_buffer_context * ctx = new ggml_backend_sycl_split_buffer_context(); return ggml_backend_buffer_init(buft, ggml_backend_sycl_split_buffer_interface, ctx, size); } static size_t ggml_backend_sycl_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); } static size_t ggml_backend_sycl_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { ggml_backend_sycl_split_buffer_type_context * ctx = (ggml_backend_sycl_split_buffer_type_context *)buft->context; size_t total_size = 0; const int64_t ne0 = tensor->ne[0]; for (int i = 0; i < ggml_sycl_info().device_count; ++i) { int64_t row_low, row_high; get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, i); int64_t nrows_split = row_high - row_low; if (nrows_split == 0) { continue; } total_size += ggml_nbytes_split(tensor, nrows_split); // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses if (ne0 % MATRIX_ROW_PADDING != 0) { total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } } return total_size; } static bool ggml_backend_sycl_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return false; GGML_UNUSED(buft); } static ggml_backend_buffer_type_i ggml_backend_sycl_split_buffer_type_interface = { /* .get_name = */ ggml_backend_sycl_split_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_sycl_split_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_sycl_split_buffer_type_get_alignment, /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_sycl_split_buffer_type_get_alloc_size, /* .is_host = */ ggml_backend_sycl_split_buffer_type_is_host, }; ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split) { static std::mutex mutex; std::lock_guard lock(mutex); GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_split_buffer_type\n"); ggml_check_sycl(); // FIXME: this is not thread safe static std::map, struct ggml_backend_buffer_type> buft_map; std::array tensor_split_arr = {}; bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_SYCL_MAX_DEVICES, [](float x) { return x == 0.0f; }); if (all_zero) { tensor_split_arr = ggml_sycl_info().default_tensor_split; } else { float split_sum = 0.0f; for (int i = 0; i < ggml_sycl_info().device_count; ++i) { tensor_split_arr[i] = split_sum; split_sum += tensor_split[i]; } for (int i = 0; i < ggml_sycl_info().device_count; ++i) { tensor_split_arr[i] /= split_sum; } } auto it = buft_map.find(tensor_split_arr); if (it != buft_map.end()) { return &it->second; } struct ggml_backend_buffer_type buft { /* .iface = */ ggml_backend_sycl_split_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), 0), /* .context = */ new ggml_backend_sycl_split_buffer_type_context{tensor_split_arr}, }; auto result = buft_map.emplace(tensor_split_arr, buft); return &result.first->second; } // host buffer type static const char * ggml_backend_sycl_host_buffer_type_name(ggml_backend_buffer_type_t buft) { return GGML_SYCL_NAME "_Host"; GGML_UNUSED(buft); } static void ggml_backend_sycl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_sycl_host_free(buffer->context); } static ggml_backend_buffer_t ggml_backend_sycl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { void * ptr = ggml_sycl_host_malloc(size); if (ptr == nullptr) { // fallback to cpu buffer return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; buffer->iface.free_buffer = ggml_backend_sycl_host_buffer_free_buffer; return buffer; } ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type() { GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_host_buffer_type\n"); static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_type_host = { /* .iface = */ { /* .get_name = */ ggml_backend_sycl_host_buffer_type_name, /* .alloc_buffer = */ ggml_backend_sycl_host_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, /* .get_max_size = */ NULL, // TODO: return device.maxBufferLength /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, }, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), 0), /* .context = */ nullptr, }; return &ggml_backend_sycl_buffer_type_host; } // buffer pool for sycl (legacy) struct ggml_sycl_pool_leg : public ggml_sycl_pool { static const int MAX_SYCL_BUFFERS = 256; int device; queue_ptr qptr; struct ggml_sycl_buffer { void * ptr = nullptr; size_t size = 0; }; ggml_sycl_buffer buffer_pool[MAX_SYCL_BUFFERS] = {}; size_t pool_size = 0; explicit ggml_sycl_pool_leg(queue_ptr qptr_, int device_) : device(device_), qptr(qptr_) {} ~ggml_sycl_pool_leg() { for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) { ggml_sycl_buffer & b = buffer_pool[i]; if (b.ptr != nullptr) { SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(b.ptr, *qptr))); pool_size -= b.size; } } GGML_ASSERT(pool_size == 0); } void * alloc(size_t size, size_t * actual_size) override { #ifdef DEBUG_sycl_MALLOC int nnz = 0; size_t max_size = 0; #endif size_t best_diff = 1ull << 36; int ibest = -1; for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) { ggml_sycl_buffer& b = buffer_pool[i]; if (b.ptr != nullptr) { #ifdef DEBUG_sycl_MALLOC ++nnz; if (b.size > max_size) max_size = b.size; #endif if (b.size >= size) { size_t diff = b.size - size; if (diff < best_diff) { best_diff = diff; ibest = i; if (!best_diff) { void * ptr = b.ptr; *actual_size = b.size; b.ptr = nullptr; b.size = 0; return ptr; } } } } } if (ibest >= 0) { ggml_sycl_buffer& b = buffer_pool[ibest]; void * ptr = b.ptr; *actual_size = b.size; b.ptr = nullptr; b.size = 0; return ptr; } void * ptr; size_t look_ahead_size = (size_t) (1.05 * size); SYCL_CHECK( CHECK_TRY_ERROR(ptr = (void *)sycl::malloc_device( look_ahead_size, *qptr))); if (!ptr) { GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on device/GPU\n", __func__, look_ahead_size); return nullptr; } *actual_size = look_ahead_size; pool_size += look_ahead_size; #ifdef DEBUG_SYCL_MALLOC GGML_LOG_DEBUG("%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz, (uint32_t)(max_size/1024/1024), (uint32_t)(g_sycl_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024)); #endif // GGML_SYCL_DEBUG("ggml_sycl_pool_malloc_leg look_ahead_size=%lu, return %p\n", look_ahead_size, ptr); return ptr; } void free(void * ptr, size_t size) override { for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) { ggml_sycl_buffer& b = buffer_pool[i]; if (b.ptr == nullptr) { b.ptr = ptr; b.size = size; return; } } GGML_LOG_WARN("WARNING: sycl buffer pool full, increase MAX_sycl_BUFFERS\n"); SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, *qptr))); pool_size -= size; } }; struct ggml_sycl_pool_host : public ggml_sycl_pool { queue_ptr qptr; int device; inline static int counter{ 0 }; struct ggml_sycl_buffer { void * ptr = nullptr; size_t size = 0; }; // Set arbitrarly to 64 static constexpr int MAX_POOL_SIZE{ 64 }; std::vector buffer_pool = std::vector(MAX_POOL_SIZE); size_t pool_size = 0; explicit ggml_sycl_pool_host(queue_ptr qptr_, int device_) : qptr(qptr_), device(device_) {} ~ggml_sycl_pool_host() { for (int i = 0; i < MAX_POOL_SIZE; ++i) { ggml_sycl_buffer & b = buffer_pool[i]; if (b.ptr != nullptr) { SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(b.ptr, *qptr))); b.ptr = nullptr; pool_size -= b.size; b.size = 0; } } counter = 0; } void * alloc(size_t size, size_t * actual_size) override { if (counter == MAX_POOL_SIZE) { ggml_sycl_buffer b = buffer_pool[0]; void * ptr = b.ptr; *actual_size = b.size; counter = 1; return ptr; } ggml_sycl_buffer & b = buffer_pool[counter]; if (b.ptr == nullptr) { void * ptr; SYCL_CHECK(CHECK_TRY_ERROR(ptr = (void *) sycl::malloc_host(size, *qptr))); if (!ptr) { GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on host\n", __func__, size); return nullptr; } pool_size += size; *actual_size = size; counter = counter + 1; return ptr; } else { ++counter; b.size = size; return b.ptr; } } void free(void * ptr, size_t size) override { // if the pool is not completed add the pointer to it in place of the first nullptr found. // Otherwise do nothing, pointers will be freed once the pool is deallocated. for (int i = 0; i < MAX_POOL_SIZE; ++i) { ggml_sycl_buffer & b = buffer_pool[i]; if (b.ptr == nullptr) { b.ptr = ptr; b.size = size; return; } } } }; std::unique_ptr ggml_backend_sycl_context::new_pool_for_host(queue_ptr qptr, int device) { // return pool for the host to speed up memory management return std::unique_ptr(new ggml_sycl_pool_host(qptr, device)); } std::unique_ptr ggml_backend_sycl_context::new_pool_for_device(queue_ptr qptr, int device) { // TBD: NO VMM support // if (ggml_sycl_info().devices[device].vmm) { // return std::unique_ptr(new ggml_sycl_pool_vmm(device)); // } return std::unique_ptr(new ggml_sycl_pool_leg(qptr, device)); } // TBD pool with virtual memory management // struct ggml_sycl_pool_vmm : public ggml_sycl_pool /// kernels typedef void (*ggml_sycl_op_mul_mat_t)( ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i, float *dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, const queue_ptr &stream); static void mul_mat_p021_f16_f32( const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y, const sycl::nd_item<3> &item_ct1) { const sycl::half *x = (const sycl::half *)vx; const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1); const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) + item_ct1.get_local_id(0); const int channel_x = channel / (nchannels_y / nchannels_x); const int nrows_y = ncols_x; const int nrows_dst = nrows_x; const int row_dst = row_x; float tmp = 0.0f; for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += item_ct1.get_local_range(2)) { const int col_x = col_x0 + item_ct1.get_local_id(2); if (col_x >= ncols_x) { break; } // x is transposed and permuted const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x; const float xi = sycl::vec(x[ix]) .convert()[0]; const int row_y = col_x; // y is not transposed but permuted const int iy = channel*nrows_y + row_y; tmp += xi * y[iy]; } // dst is not transposed and not permuted const int idst = channel*nrows_dst + row_dst; // sum up partial sums and write back result #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { dst[idst] = tmp; } } static void mul_mat_vec_nc_f16_f32( // nc == non-contiguous const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int row_stride_x, const int channel_stride_x,const int channel_stride_y, const int channel_x_divisor, const sycl::nd_item<3> &item_ct1) { const sycl::half *x = (const sycl::half *)vx; const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1); const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) + item_ct1.get_local_id(0); const int channel_x = channel / channel_x_divisor; const int nrows_dst = nrows_x; const int row_dst = row_x; const int idst = channel*nrows_dst + row_dst; float tmp = 0.0f; for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += item_ct1.get_local_range(2)) { const int col_x = col_x0 + item_ct1.get_local_id(2); if (col_x >= ncols_x) { break; } const int row_y = col_x; const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x; const int iy = channel * channel_stride_y + row_y; const float xi = sycl::vec(x[ix]) .convert()[0]; tmp += xi * y[iy]; } // sum up partial sums and write back result #pragma unroll for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) { tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask); } if (item_ct1.get_local_id(2) == 0) { dst[idst] = tmp; } } static void k_sum_rows_f32(const float * x, float * dst, const int ncols, const sycl::nd_item<3> &item_ct1) { const int row = item_ct1.get_group(1); const int col = item_ct1.get_local_id(2); float sum = 0.0f; for (int i = col; i < ncols; i += item_ct1.get_local_range(2)) { sum += x[row * ncols + i]; } sum = warp_reduce_sum(sum, item_ct1); if (col == 0) { dst[row] = sum; } } template static inline void ggml_sycl_swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } template __dpct_inline__ static void k_argsort_f32_i32(const float *x, int *dst, const int ncols, int ncols_pad, const int tasks_per_thread, const sycl::nd_item<3> &item_ct1, uint8_t *dpct_local) { // bitonic sort int col_index = item_ct1.get_local_id(2); int row = item_ct1.get_group(1); for (int i = 0; i < tasks_per_thread; i++) { int col = col_index * tasks_per_thread + i; if (col >= ncols_pad) { return; } } const float * x_row = x + row * ncols; auto dst_row = (int *)dpct_local; // initialize indices for (int i=0;i 0; j /= 2) { for (int i = 0; i < tasks_per_thread; i++) { int col = col_index * tasks_per_thread + i; int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]))) { ggml_sycl_swap(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]))) { ggml_sycl_swap(dst_row[col], dst_row[ixj]); } } } item_ct1.barrier(sycl::access::fence_space::local_space); } } } // copy the result to dst without the padding for (int i = 0; i < tasks_per_thread; i++) { int col = col_index * tasks_per_thread + i; if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } } static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past, const sycl::nd_item<3> &item_ct1) { const int col = item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1); const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); if (col >= ncols) { return; } const int i = row*ncols + col; //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i]; //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX; } static void scale_f32(const float * x, float * dst, const float scale, const float bias, const int k, const sycl::nd_item<3> &item_ct1) { const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2); if (i >= k) { return; } dst[i] = scale * x[i] + bias; } template static void pool2d_nchw_kernel( const int ih, const int iw, const int oh, const int ow, const int kh, const int kw, const int sh, const int sw, const int ph, const int pw, const int parallel_elements, const Ti* src, To* dst, const enum ggml_op_pool op, const sycl::nd_item<3> &item_ct1) { int idx = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2); if (idx >= parallel_elements) { return; } const int I_HW = ih * iw; const int O_HW = oh * ow; const int nc = idx / O_HW; const int cur_oh = idx % O_HW / ow; const int cur_ow = idx % O_HW % ow; const Ti* i_ptr = src + nc * I_HW; To* o_ptr = dst + nc * O_HW; const int start_h = cur_oh * sh - ph; const int bh = sycl::max(0, start_h); const int eh = sycl::min(ih, start_h + kh); const int start_w = cur_ow * sw - pw; const int bw = sycl::max(0, start_w); const int ew = sycl::min(iw, start_w + kw); To res = 0; switch (op) { case GGML_OP_POOL_AVG: res = 0; break; case GGML_OP_POOL_MAX: res = -FLT_MAX; break; default: res = (To) sycl::nan(uint32_t(0)); break; } for (int i = bh; i < eh; i += 1) { for (int j = bw; j < ew; j += 1) { #if DPCT_COMPATIBILITY_TEMP >= 350 /* DPCT1098:106: The '*' expression is used instead of the __ldg call. These two expressions do not provide the exact same functionality. Check the generated code for potential precision and/or performance issues. */ Ti cur = *(i_ptr + i * iw + j); #else Ti cur = i_ptr[i * iw + j]; #endif switch (op) { case GGML_OP_POOL_AVG: res += (cur / (kh * kw)); break; case GGML_OP_POOL_MAX: res = sycl::max(res, (To)cur); break; default: res = (To) sycl::nan(uint32_t(0)); break; } } } o_ptr[cur_oh * ow + cur_ow] = res; } static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y, float *dst, const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y, queue_ptr stream) { const sycl::range<3> block_nums(nchannels_y, nrows_x, 1); const sycl::range<3> block_dims(1, 1, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_p021_f16_f32(vx, y, dst, ncols_x, nrows_x, nchannels_x, nchannels_y, item_ct1); }); } } static void ggml_mul_mat_vec_nc_f16_f32_sycl( const void *vx, const float *y, float *dst, const int ncols_x, const int nrows_x, const int row_stride_x, const int nchannels_x, const int nchannels_y, const int channel_stride_x, const int channel_stride_y, queue_ptr stream) { const sycl::range<3> block_nums(nchannels_y, nrows_x, 1); const sycl::range<3> block_dims(1, 1, WARP_SIZE); { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); stream->parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { mul_mat_vec_nc_f16_f32(vx, y, dst, ncols_x, nrows_x, row_stride_x, channel_stride_x, channel_stride_y, nchannels_y / nchannels_x, item_ct1); }); } } static void scale_f32_sycl(const float *x, float *dst, const float scale, const float bias, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SCALE_BLOCK_SIZE - 1) / SYCL_SCALE_BLOCK_SIZE; stream->parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { scale_f32(x, dst, scale, bias, k, item_ct1); }); } static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols, const int nrows, queue_ptr stream) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); const sycl::range<3> block_nums(1, nrows, 1); stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { k_sum_rows_f32(x, dst, ncols, item_ct1); }); } static int next_power_of_2(int x) { int n = 1; while (n < x) { n *= 2; } return n; } static void argsort_f32_i32_sycl(const float *x, int *dst, const int ncols, const int nrows, ggml_sort_order order, queue_ptr stream, int device) { // bitonic sort requires ncols to be power of 2 const int ncols_pad = next_power_of_2(ncols); int nth = 1; int max_block_size = ggml_sycl_info().max_work_group_sizes[device]; while (nth < ncols_pad && nth < max_block_size) nth *= 2; if (nth > max_block_size) nth = max_block_size; const int tasks_per_thread = ncols_pad / nth; const sycl::range<3> block_dims(1, 1, nth); const sycl::range<3> block_nums(1, nrows, 1); const size_t shared_mem = ncols_pad * sizeof(int); GGML_ASSERT(shared_mem<=ggml_sycl_info().devices[device].smpbo); if (order == GGML_SORT_ORDER_ASC) { stream->submit([&](sycl::handler &cgh) { sycl::local_accessor dpct_local_acc_ct1( sycl::range<1>(shared_mem), cgh); cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_argsort_f32_i32( x, dst, ncols, ncols_pad, tasks_per_thread, item_ct1, dpct_local_acc_ct1 .get_multi_ptr() .get()); }); }); } else if (order == GGML_SORT_ORDER_DESC) { stream->submit([&](sycl::handler &cgh) { sycl::local_accessor dpct_local_acc_ct1( sycl::range<1>(shared_mem), cgh); cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_argsort_f32_i32( x, dst, ncols, ncols_pad, tasks_per_thread, item_ct1, dpct_local_acc_ct1 .get_multi_ptr() .get()); }); }); } else { GGML_ABORT("fatal error"); } } static void argmax_f32_i32_sycl(const float *x, int *dst, const int ncols, const int nrows, queue_ptr stream) { const sycl::range<3> block_dims(1, 1, SYCL_ARGMAX_BLOCK_SIZE); const sycl::range<3> block_nums(1, nrows, 1); const size_t shared_mem = 256 * sizeof(float); stream->submit([&](sycl::handler &cgh) { sycl::local_accessor shared_data( sycl::range<1>(shared_mem/sizeof(float)), cgh); sycl::local_accessor shared_indices( sycl::range<1>(shared_mem/sizeof(float)), cgh); cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { const int tid = item_ct1.get_local_id(2); const int row = item_ct1.get_global_id(1); float max_val = -INFINITY; int max_idx = -1; for (int col = tid; col < ncols; col += 256) { float val = x[row * ncols + col]; if (val > max_val) { max_val = val; max_idx = col; } } shared_data[tid] = max_val; shared_indices[tid] = max_idx; item_ct1.barrier(sycl::access::fence_space::local_space); for (int stride = 256/2; stride > 0; stride >>= 1) { if (tid < stride) { float val1 = shared_data[tid]; float val2 = shared_data[tid + stride]; if (val2 > val1) { shared_data[tid] = val2; shared_indices[tid] = shared_indices[tid + stride]; } } item_ct1.barrier(sycl::access::fence_space::local_space); } if (tid == 0) { dst[row] = shared_indices[0]; } }); }); } static void diag_mask_inf_f32_sycl(const float *x, float *dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, queue_ptr stream) { const sycl::range<3> block_dims(1, SYCL_DIAG_MASK_INF_BLOCK_SIZE, 1); const int block_num_x = (ncols_x + SYCL_DIAG_MASK_INF_BLOCK_SIZE - 1) / SYCL_DIAG_MASK_INF_BLOCK_SIZE; const sycl::range<3> block_nums(1, block_num_x, nrows_x); stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { diag_mask_inf_f32(x, dst, ncols_x, rows_per_channel, n_past, item_ct1); }); } static dpct::err0 ggml_sycl_cpy_tensor_2d(void *dst, const struct ggml_tensor *src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, queue_ptr stream) try { dpct::memcpy_direction kind; char * src_ptr; if (ggml_backend_buffer_is_host(src->buffer)) { kind = dpct::host_to_device; //GGML_SYCL_DEBUG("%s: Host buffer type src tensor\n", __func__); src_ptr = (char *) src->data; // GGML_SYCL_DEBUG("ggml_sycl_cpy_tensor_2d GGML_BACKEND_TYPE_CPU src_ptr %p\n", src_ptr); } else if (ggml_backend_buffer_is_sycl(src->buffer)) { // If buffer is a SYCL buffer //GGML_SYCL_DEBUG("%s: SYCL buffer type src tensor\n", __func__); kind = dpct::device_to_device; src_ptr = (char *) src->data; } else if (ggml_backend_buffer_is_sycl_split(src->buffer)) { /* If buffer is a SYCL split buffer */ //GGML_SYCL_DEBUG("%s: Split buffer type src tensor\n", __func__); GGML_ASSERT(i1_low == 0 && i1_high == src->ne[1]); kind = dpct::device_to_device; ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra; int id; SYCL_CHECK(CHECK_TRY_ERROR( id = get_current_device_id())); // GGML_SYCL_DEBUG("current device index %d\n", id); src_ptr = (char *) extra->data_device[id]; } else { // GGML_SYCL_DEBUG("GGML_ABORT("fatal error")\n"); GGML_ABORT("fatal error"); } char * dst_ptr = (char *) dst; GGML_TENSOR_LOCALS_1(int64_t, ne, src, ne); GGML_TENSOR_LOCALS(int64_t, nb, src, nb); const enum ggml_type type = src->type; const int64_t ts = ggml_type_size(type); const int64_t bs = ggml_blck_size(type); int64_t i1_diff = i1_high - i1_low; const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3; if (nb0 == ts && nb1 == ts*ne0/bs) { // GGML_SYCL_DEBUG("stream->memcpy: dst_ptr=%p, x=%p, size=%lu\n", dst_ptr, x, i1_diff * nb1); // return CHECK_TRY_ERROR(stream->memcpy(dst_ptr, x, i1_diff * nb1)); return CHECK_TRY_ERROR(dpct::async_dpct_memcpy(dst_ptr, x, i1_diff * nb1, kind, *stream)); } else if (nb0 == ts) { return CHECK_TRY_ERROR( dpct::async_dpct_memcpy(dst_ptr, ts * ne0 / bs, x, nb1, ts * ne0 / bs, i1_diff, kind, *stream)); } else { for (int64_t i1 = 0; i1 < i1_diff; i1++) { const void * rx = (const void *) ((const char *) x + i1*nb1); void * rd = (void *) (dst_ptr + i1*ts*ne0/bs); // pretend the row is a matrix with cols=1 dpct::err0 r = CHECK_TRY_ERROR(dpct::async_dpct_memcpy( rd, ts / bs, rx, nb0, ts / bs, ne0, kind, *stream)); /* DPCT1001:85: The statement could not be removed. */ /* DPCT1000:86: Error handling if-stmt was detected but could not be rewritten. */ if (r != 0) return r; } return 0; } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } inline void ggml_sycl_op_mul_mat_sycl( ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i, float *dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, const queue_ptr &stream) try { GGML_ASSERT(src0_dd_i != nullptr); GGML_ASSERT(src1_ddf_i != nullptr); GGML_ASSERT(dst_dd_i != nullptr); const int64_t ne00 = src0->ne[0]; const int64_t ne10 = src1->ne[0]; GGML_ASSERT(ne00 == ne10); const int64_t row_diff = row_high - row_low; int id; SYCL_CHECK( CHECK_TRY_ERROR(id = get_current_device_id())); const int64_t ne0 = dst->ne[0]; // used by MKL only // the main device has a larger memory buffer to hold the results from all GPUs // ldc == nrows of the matrix that cuBLAS writes into int ldc = id == ctx.device ? ne0 : row_diff; // used by MKL only #ifdef GGML_SYCL_F16 bool use_fp16 = true; // TODO(Yu) SYCL capability check #else bool use_fp16 = false; #endif if ((src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && use_fp16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { ggml_sycl_pool_alloc src0_as_f16(ctx.pool()); if (src0->type != GGML_TYPE_F16) { scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2, " : converting src0 to fp16"); const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src0->type, dst); GGML_ASSERT(to_fp16_sycl != nullptr); size_t ne = row_diff*ne00; src0_as_f16.alloc(ne); to_fp16_sycl(src0_dd_i, src0_as_f16.get(), ne, stream); } const sycl::half *src0_ptr = src0->type == GGML_TYPE_F16 ? (const sycl::half *)src0_dd_i : src0_as_f16.get(); ggml_sycl_pool_alloc src1_as_f16(ctx.pool()); if (src1->type != GGML_TYPE_F16) { scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2, " : converting src1 to fp16"); const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type, dst); GGML_ASSERT(to_fp16_sycl != nullptr); size_t ne = src1_ncols*ne10; src1_as_f16.alloc(ne); to_fp16_sycl(src1_ddf_i, src1_as_f16.get(), ne, stream); } const sycl::half *src1_ptr = src1->type == GGML_TYPE_F16 ? (const sycl::half *)src1->data + src1_padded_row_size : src1_as_f16.get(); #if GGML_SYCL_DNNL if (!g_ggml_sycl_disable_dnn) { DnnlGemmWrapper::row_gemm(ctx,row_diff, src1_ncols , ne10, src0_ptr, DnnlGemmWrapper::to_dt(), src1_ptr, DnnlGemmWrapper::to_dt(), dst_dd_i, DnnlGemmWrapper::to_dt(), stream); } else #endif { ggml_sycl_pool_alloc dst_f16(ctx.pool(), row_diff * src1_ncols); const sycl::half alpha_f16 = 1.0f; const sycl::half beta_f16 = 0.0f; SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm( *stream, oneapi::math::transpose::trans, oneapi::math::transpose::nontrans, row_diff, src1_ncols, ne10, &alpha_f16, src0_ptr, dpct::library_data_t::real_half, ne00, src1_ptr, dpct::library_data_t::real_half, ne10, &beta_f16, dst_f16.get(), dpct::library_data_t::real_half, ldc, dpct::library_data_t::real_half))); scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2, " : converting dst to fp32"); const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16, dst); to_fp32_sycl(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); } } else { ggml_sycl_pool_alloc src0_ddq_as_f32(ctx.pool()); ggml_sycl_pool_alloc src1_ddq_as_f32(ctx.pool()); if (src0->type != GGML_TYPE_F32) { scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2, " : converting src0 to fp32"); const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src0->type, dst); GGML_ASSERT(to_fp32_sycl != nullptr); src0_ddq_as_f32.alloc(row_diff*ne00); to_fp32_sycl(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream); } if (src1->type != GGML_TYPE_F32) { scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2, " : converting src1 to fp32"); const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src1->type, dst); GGML_ASSERT(to_fp32_sycl != nullptr); src1_ddq_as_f32.alloc(src1_ncols*ne10); to_fp32_sycl(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream); } const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get(); #if GGML_SYCL_DNNL if (!g_ggml_sycl_disable_dnn) { DnnlGemmWrapper::row_gemm(ctx, row_diff, src1_ncols, ne10, src0_ddf_i, DnnlGemmWrapper::to_dt(), src1_ddf1_i, DnnlGemmWrapper::to_dt(), dst_dd_i, DnnlGemmWrapper::to_dt(), stream); } else #endif { const float alpha = 1.0f; const float beta = 0.0f; SYCL_CHECK(CHECK_TRY_ERROR(oneapi::math::blas::column_major::gemm( get_onemath_backend(*stream), oneapi::math::transpose::trans, oneapi::math::transpose::nontrans, row_diff, src1_ncols, ne10, dpct::get_value(&alpha, *stream), src0_ddf_i, ne00, src1_ddf1_i, ne10, dpct::get_value(&beta, *stream), dst_dd_i, ldc))); } } GGML_UNUSED(dst); GGML_UNUSED(src1_ddq_i); GGML_UNUSED(src1_padded_row_size); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } static void ggml_sycl_op_pool2d(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); const int32_t * opts = (const int32_t *)dst->op_params; enum ggml_op_pool op = static_cast(opts[0]); const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; const int64_t IH = dst->src[0]->ne[1]; const int64_t IW = dst->src[0]->ne[0]; const int64_t N = dst->ne[3]; const int64_t OC = dst->ne[2]; const int64_t OH = dst->ne[1]; const int64_t OW = dst->ne[0]; const int parallel_elements = N * OC * OH * OW; const int num_blocks = (parallel_elements + SYCL_POOL2D_BLOCK_SIZE - 1) / SYCL_POOL2D_BLOCK_SIZE; sycl::range<3> block_nums(1, 1, num_blocks); main_stream->parallel_for( sycl::nd_range<3>(block_nums * sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { pool2d_nchw_kernel(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0, parallel_elements, src0_dd, dst_dd, op, item_ct1); }); } inline void ggml_sycl_op_sum(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); const int64_t ne = ggml_nelements(dst->src[0]); sum_rows_f32_sycl(src0_dd, dst_dd, ne, 1, main_stream); } inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); const int64_t ncols = dst->src[0]->ne[0]; const int64_t nrows = ggml_nrows(dst->src[0]); sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); } inline void ggml_sycl_op_mean(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); const int64_t ncols = dst->src[0]->ne[0]; const int64_t nrows = ggml_nrows(dst->src[0]); sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); main_stream->parallel_for( sycl::range<1>(nrows), [=](sycl::id<1> row) { dst_dd[row] /= ncols; } ); } inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_I32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); int32_t * dst_dd = static_cast(dst->data); const int64_t ncols = dst->src[0]->ne[0]; const int64_t nrows = ggml_nrows(dst->src[0]); enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; argsort_f32_i32_sycl(src0_dd, (int *)dst_dd, ncols, nrows, order, main_stream, ctx.device); } inline void ggml_sycl_op_argmax(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_I32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); int32_t * dst_dd = static_cast(dst->data); const int64_t ncols = dst->src[0]->ne[0]; const int64_t nrows = ggml_nrows(dst->src[0]); argmax_f32_i32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); } inline void ggml_sycl_op_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); const int64_t ne00 = dst->src[0]->ne[0]; const int64_t ne01 = dst->src[0]->ne[1]; const int nrows0 = ggml_nrows(dst->src[0]); const int n_past = ((int32_t *) dst->op_params)[0]; diag_mask_inf_f32_sycl(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream); } inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); const float * src0_dd = static_cast(dst->src[0]->data); float * dst_dd = static_cast(dst->data); float scale; float bias; memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&bias, (float *) dst->op_params + 1, sizeof(float)); scale_f32_sycl(src0_dd, dst_dd, scale, bias, ggml_nelements(dst->src[0]), main_stream); /* DPCT1010:87: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ SYCL_CHECK(0); } static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) { static bool peer_access_enabled = false; const bool enable_peer_access = n_tokens <= GGML_SYCL_PEER_MAX_BATCH_SIZE; if (peer_access_enabled == enable_peer_access) { return; } #ifdef NDEBUG for (int i = 0; i < ggml_sycl_info().device_count; ++i) { SYCL_CHECK(ggml_sycl_set_device(i)); } for (int i = 0; i < ggml_sycl_info().device_count; ++i) { SYCL_CHECK(ggml_sycl_set_device(i)); for (int id_other = 0; id_other < ggml_sycl_info().device_count; ++id_other) { if (i == id_other) { continue; } if (i != main_device && id_other != main_device) { continue; } // int can_access_peer; // SYCL_CHECK(syclDeviceCanAccessPeer(&can_access_peer, id, id_other)); // if (can_access_peer) { // if (enable_peer_access) { // SYCL_CHECK(syclDeviceEnablePeerAccess(id_other, 0)); // } else { // SYCL_CHECK(syclDeviceDisablePeerAccess(id_other)); // } // } } } #endif // NDEBUG peer_access_enabled = enable_peer_access; } template